aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCherry Zhang <cherryyz@google.com>2020-01-31 14:45:52 -0500
committerCherry Zhang <cherryyz@google.com>2020-01-31 14:45:52 -0500
commitee04d45b8f380c2e3b3b26bc4771cd97fbd9d260 (patch)
tree5db2e0ca22bb7d54f7c6d8a8d4acc96224b2e85c /src
parent23c96e9bbd29e3a815e6faabbc40af0d4c4d4353 (diff)
parent96002cd25c343edfb6c06d2bf1f31ae1e345b81f (diff)
downloadgo-ee04d45b8f380c2e3b3b26bc4771cd97fbd9d260.tar.xz
[dev.link] all: merge branch 'master' into dev.link
It has been a while we have not done this. Merge conflict resolution: - deleted/rewritten code modified on master - CL 214286, ported in CL 217317 (cmd/internal/obj/objfile.go) - CL 210678, it already includes a fix to new code (cmd/link/internal/ld/deadcode.go) - CL 209317, applied in this CL (cmd/link/internal/loadelf/ldelf.go) Change-Id: Ie927ea6a1d69ce49e8d03e56148cb2725e377876
Diffstat (limited to 'src')
-rw-r--r--src/archive/tar/reader.go2
-rw-r--r--src/cmd/asm/internal/asm/testdata/riscvenc.s57
-rw-r--r--src/cmd/compile/internal/gc/fmt.go301
-rw-r--r--src/cmd/compile/internal/gc/inl_test.go2
-rw-r--r--src/cmd/compile/internal/gc/main.go4
-rw-r--r--src/cmd/compile/internal/gc/plive.go6
-rw-r--r--src/cmd/compile/internal/gc/ssa.go34
-rw-r--r--src/cmd/compile/internal/gc/walk.go14
-rw-r--r--src/cmd/compile/internal/logopt/log_opts.go14
-rw-r--r--src/cmd/compile/internal/riscv64/galign.go25
-rw-r--r--src/cmd/compile/internal/riscv64/ggen.go48
-rw-r--r--src/cmd/compile/internal/riscv64/gsubr.go20
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go496
-rw-r--r--src/cmd/compile/internal/ssa/config.go10
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/fuse.go2
-rw-r--r--src/cmd/compile/internal/ssa/fuse_test.go38
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules29
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules478
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64Ops.go338
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go2
-rw-r--r--src/cmd/compile/internal/ssa/gen/main.go1
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go1541
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go56
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go5561
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go4
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts8
-rw-r--r--src/cmd/compile/internal/types/utils.go12
-rw-r--r--src/cmd/compile/main.go2
-rw-r--r--src/cmd/dist/build.go20
-rw-r--r--src/cmd/dist/buildtool.go3
-rw-r--r--src/cmd/dist/main.go2
-rw-r--r--src/cmd/doc/main.go7
-rw-r--r--src/cmd/go.mod4
-rw-r--r--src/cmd/go.sum6
-rw-r--r--src/cmd/go/alldocs.go39
-rw-r--r--src/cmd/go/go_test.go412
-rw-r--r--src/cmd/go/internal/clean/clean.go4
-rw-r--r--src/cmd/go/internal/get/vcs.go7
-rw-r--r--src/cmd/go/internal/help/helpdoc.go6
-rw-r--r--src/cmd/go/internal/list/list.go18
-rw-r--r--src/cmd/go/internal/load/pkg.go53
-rw-r--r--src/cmd/go/internal/modcmd/download.go4
-rw-r--r--src/cmd/go/internal/modcmd/edit.go25
-rw-r--r--src/cmd/go/internal/modfetch/cache.go18
-rw-r--r--src/cmd/go/internal/modfetch/codehost/git.go7
-rw-r--r--src/cmd/go/internal/modfetch/coderepo.go80
-rw-r--r--src/cmd/go/internal/modfetch/repo.go23
-rw-r--r--src/cmd/go/internal/modload/build.go2
-rw-r--r--src/cmd/go/internal/modload/help.go7
-rw-r--r--src/cmd/go/internal/modload/import.go39
-rw-r--r--src/cmd/go/internal/modload/load.go15
-rw-r--r--src/cmd/go/internal/modload/query.go2
-rw-r--r--src/cmd/go/internal/modload/query_test.go13
-rw-r--r--src/cmd/go/internal/test/testflag.go6
-rw-r--r--src/cmd/go/internal/vet/vet.go1
-rw-r--r--src/cmd/go/internal/vet/vetflag.go6
-rw-r--r--src/cmd/go/internal/web/file_test.go2
-rw-r--r--src/cmd/go/internal/work/action.go2
-rw-r--r--src/cmd/go/internal/work/exec.go7
-rw-r--r--src/cmd/go/internal/work/gc.go4
-rw-r--r--src/cmd/go/internal/work/security.go2
-rw-r--r--src/cmd/go/internal/work/security_test.go2
-rw-r--r--src/cmd/go/testdata/badmod/go.mod1
-rw-r--r--src/cmd/go/testdata/badmod/x.go4
-rw-r--r--src/cmd/go/testdata/importcom/bad.go3
-rw-r--r--src/cmd/go/testdata/importcom/conflict.go3
-rw-r--r--src/cmd/go/testdata/importcom/src/bad/bad.go1
-rw-r--r--src/cmd/go/testdata/importcom/src/conflict/a.go1
-rw-r--r--src/cmd/go/testdata/importcom/src/conflict/b.go1
-rw-r--r--src/cmd/go/testdata/importcom/src/works/x/x.go1
-rw-r--r--src/cmd/go/testdata/importcom/src/works/x/x1.go1
-rw-r--r--src/cmd/go/testdata/importcom/src/wrongplace/x.go1
-rw-r--r--src/cmd/go/testdata/importcom/works.go3
-rw-r--r--src/cmd/go/testdata/importcom/wrongplace.go3
-rw-r--r--src/cmd/go/testdata/importcycle/src/selfimport/selfimport.go3
-rw-r--r--src/cmd/go/testdata/script/README2
-rw-r--r--src/cmd/go/testdata/script/build_exe.txt21
-rw-r--r--src/cmd/go/testdata/script/build_import_comment.txt47
-rw-r--r--src/cmd/go/testdata/script/build_import_cycle.txt10
-rw-r--r--src/cmd/go/testdata/script/build_plugin_non_main.txt17
-rw-r--r--src/cmd/go/testdata/script/cgo_asm_error.txt21
-rw-r--r--src/cmd/go/testdata/script/clean_testcache.txt9
-rw-r--r--src/cmd/go/testdata/script/cover_cgo.txt37
-rw-r--r--src/cmd/go/testdata/script/cover_cgo_extra_file.txt43
-rw-r--r--src/cmd/go/testdata/script/cover_cgo_extra_test.txt44
-rw-r--r--src/cmd/go/testdata/script/cover_cgo_xtest.txt40
-rw-r--r--src/cmd/go/testdata/script/cover_import_main_loop.txt22
-rw-r--r--src/cmd/go/testdata/script/cover_pattern.txt37
-rw-r--r--src/cmd/go/testdata/script/doc.txt75
-rw-r--r--src/cmd/go/testdata/script/generate_bad_imports.txt11
-rw-r--r--src/cmd/go/testdata/script/goflags.txt8
-rw-r--r--src/cmd/go/testdata/script/gopath_vendor_dup_err.txt24
-rw-r--r--src/cmd/go/testdata/script/link_syso_issue33139.txt4
-rw-r--r--src/cmd/go/testdata/script/list_constraints.txt86
-rw-r--r--src/cmd/go/testdata/script/list_parse_err.txt17
-rw-r--r--src/cmd/go/testdata/script/list_tags.txt10
-rw-r--r--src/cmd/go/testdata/script/list_wildcard_skip_nonmatching.txt13
-rw-r--r--src/cmd/go/testdata/script/mod_bad_domain.txt8
-rw-r--r--src/cmd/go/testdata/script/mod_build_info_err.txt4
-rw-r--r--src/cmd/go/testdata/script/mod_edit.txt14
-rw-r--r--src/cmd/go/testdata/script/mod_get_test.txt2
-rw-r--r--src/cmd/go/testdata/script/mod_goroot_errors.txt53
-rw-r--r--src/cmd/go/testdata/script/mod_list_e_readonly.txt15
-rw-r--r--src/cmd/go/testdata/script/mod_load_badchain.txt2
-rw-r--r--src/cmd/go/testdata/script/mod_readonly.txt8
-rw-r--r--src/cmd/go/testdata/script/mod_replace_gopkgin.txt57
-rw-r--r--src/cmd/go/testdata/script/mod_replace_import.txt19
-rw-r--r--src/cmd/go/testdata/script/mod_tidy_error.txt4
-rw-r--r--src/cmd/go/testdata/script/mod_vendor.txt6
-rw-r--r--src/cmd/go/testdata/script/mod_vendor_trimpath.txt45
-rw-r--r--src/cmd/go/testdata/script/modfile_flag.txt9
-rw-r--r--src/cmd/go/testdata/script/run_hello_pkg.txt14
-rw-r--r--src/cmd/go/testdata/script/run_internal.txt (renamed from src/cmd/go/testdata/script/mod_run_internal.txt)34
-rw-r--r--src/cmd/go/testdata/script/test_bad_example.txt13
-rw-r--r--src/cmd/go/testdata/script/test_badtest.txt19
-rw-r--r--src/cmd/go/testdata/script/test_benchmark_fatal.txt15
-rw-r--r--src/cmd/go/testdata/script/test_benchmark_labels.txt19
-rw-r--r--src/cmd/go/testdata/script/test_flag.txt (renamed from src/cmd/go/testdata/flag_test.go)6
-rw-r--r--src/cmd/go/testdata/script/test_import_error_stack.txt17
-rw-r--r--src/cmd/go/testdata/script/test_main_twice.txt (renamed from src/cmd/go/testdata/src/multimain/multimain_test.go)7
-rw-r--r--src/cmd/go/testdata/script/test_match_no_tests_build_failure.txt15
-rw-r--r--src/cmd/go/testdata/script/test_minus_n.txt14
-rw-r--r--src/cmd/go/testdata/script/test_no_tests.txt11
-rw-r--r--src/cmd/go/testdata/script/test_race.txt (renamed from src/cmd/go/testdata/src/testrace/race_test.go)16
-rw-r--r--src/cmd/go/testdata/script/test_race_cover_mode_issue20435.txt44
-rw-r--r--src/cmd/go/testdata/script/test_regexps.txt75
-rw-r--r--src/cmd/go/testdata/script/test_syntax_error_says_fail.txt14
-rw-r--r--src/cmd/go/testdata/script/vet_flags.txt38
-rw-r--r--src/cmd/go/testdata/src/badc/x.c1
-rw-r--r--src/cmd/go/testdata/src/badc/x.go1
-rw-r--r--src/cmd/go/testdata/src/badpkg/x.go1
-rw-r--r--src/cmd/go/testdata/src/bench/x_test.go6
-rw-r--r--src/cmd/go/testdata/src/benchfatal/x_test.go7
-rw-r--r--src/cmd/go/testdata/src/cgoasm/p.go8
-rw-r--r--src/cmd/go/testdata/src/cgoasm/p.s2
-rw-r--r--src/cmd/go/testdata/src/cgocover/p.go19
-rw-r--r--src/cmd/go/testdata/src/cgocover/p_test.go7
-rw-r--r--src/cmd/go/testdata/src/cgocover2/p.go19
-rw-r--r--src/cmd/go/testdata/src/cgocover2/x_test.go10
-rw-r--r--src/cmd/go/testdata/src/cgocover3/p.go19
-rw-r--r--src/cmd/go/testdata/src/cgocover3/p_test.go1
-rw-r--r--src/cmd/go/testdata/src/cgocover3/x_test.go10
-rw-r--r--src/cmd/go/testdata/src/cgocover4/notcgo.go1
-rw-r--r--src/cmd/go/testdata/src/cgocover4/p.go19
-rw-r--r--src/cmd/go/testdata/src/cgocover4/x_test.go10
-rw-r--r--src/cmd/go/testdata/src/dupload/dupload.go8
-rw-r--r--src/cmd/go/testdata/src/dupload/p/p.go1
-rw-r--r--src/cmd/go/testdata/src/dupload/p2/p2.go3
-rw-r--r--src/cmd/go/testdata/src/dupload/vendor/p/p.go1
-rw-r--r--src/cmd/go/testdata/src/gencycle/gencycle.go5
-rw-r--r--src/cmd/go/testdata/src/importmain/ismain/main.go5
-rw-r--r--src/cmd/go/testdata/src/importmain/test/test.go1
-rw-r--r--src/cmd/go/testdata/src/importmain/test/test_test.go6
-rw-r--r--src/cmd/go/testdata/src/not_main/not_main.go3
-rw-r--r--src/cmd/go/testdata/src/notest/hello.go6
-rw-r--r--src/cmd/go/testdata/src/run/bad.go5
-rw-r--r--src/cmd/go/testdata/src/run/good.go5
-rw-r--r--src/cmd/go/testdata/src/run/internal/internal.go1
-rw-r--r--src/cmd/go/testdata/src/run/subdir/internal/private/private.go1
-rw-r--r--src/cmd/go/testdata/src/sleepy1/p_test.go10
-rw-r--r--src/cmd/go/testdata/src/sleepy2/p_test.go10
-rw-r--r--src/cmd/go/testdata/src/sleepybad/p.go5
-rw-r--r--src/cmd/go/testdata/src/syntaxerror/x.go1
-rw-r--r--src/cmd/go/testdata/src/syntaxerror/x_test.go4
-rw-r--r--src/cmd/go/testdata/src/testcycle/p1/p1.go7
-rw-r--r--src/cmd/go/testdata/src/testcycle/p1/p1_test.go6
-rw-r--r--src/cmd/go/testdata/src/testcycle/p2/p2.go7
-rw-r--r--src/cmd/go/testdata/src/testcycle/p3/p3.go5
-rw-r--r--src/cmd/go/testdata/src/testcycle/p3/p3_test.go10
-rw-r--r--src/cmd/go/testdata/src/testcycle/q1/q1.go1
-rw-r--r--src/cmd/go/testdata/src/testcycle/q1/q1_test.go6
-rw-r--r--src/cmd/go/testdata/src/testdep/p1/p1.go1
-rw-r--r--src/cmd/go/testdata/src/testdep/p1/p1_test.go3
-rw-r--r--src/cmd/go/testdata/src/testdep/p2/p2.go3
-rw-r--r--src/cmd/go/testdata/src/testdep/p3/p3.go3
-rw-r--r--src/cmd/go/testdata/src/testnorun/p.go5
-rw-r--r--src/cmd/go/testdata/src/testregexp/x_test.go17
-rw-r--r--src/cmd/go/testdata/src/testregexp/z_test.go19
-rw-r--r--src/cmd/internal/obj/riscv/asm_test.go133
-rw-r--r--src/cmd/internal/obj/riscv/cpu.go72
-rw-r--r--src/cmd/internal/obj/riscv/list.go4
-rw-r--r--src/cmd/internal/obj/riscv/obj.go701
-rw-r--r--src/cmd/internal/src/pos.go2
-rw-r--r--src/cmd/link/dwarf_test.go1
-rw-r--r--src/cmd/link/elf_test.go217
-rw-r--r--src/cmd/link/internal/ld/data.go46
-rw-r--r--src/cmd/link/internal/ld/deadcode2.go2
-rw-r--r--src/cmd/link/internal/ld/pcln.go7
-rw-r--r--src/cmd/link/internal/loadelf/ldelf.go65
-rw-r--r--src/cmd/link/link_test.go1
-rw-r--r--src/cmd/objdump/objdump_test.go12
-rw-r--r--src/cmd/objdump/testdata/fmthello.go6
-rw-r--r--src/cmd/pprof/readlineui.go2
-rw-r--r--src/cmd/trace/trace_test.go33
-rw-r--r--src/cmd/vendor/golang.org/x/crypto/ssh/terminal/terminal.go4
-rw-r--r--src/cmd/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go4
-rw-r--r--src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go3
-rw-r--r--src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s54
-rw-r--r--src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go2
-rw-r--r--src/cmd/vendor/modules.txt4
-rw-r--r--src/crypto/elliptic/p256.go2
-rw-r--r--src/crypto/tls/conn.go2
-rw-r--r--src/crypto/tls/tls.go3
-rw-r--r--src/crypto/x509/root_cgo_darwin.go2
-rw-r--r--src/crypto/x509/root_windows.go20
-rw-r--r--src/database/sql/sql_test.go6
-rw-r--r--src/encoding/gob/gobencdec_test.go2
-rw-r--r--src/go.mod4
-rw-r--r--src/go.sum8
-rw-r--r--src/go/ast/ast.go6
-rw-r--r--src/go/build/build.go78
-rw-r--r--src/go/build/build_test.go80
-rw-r--r--src/go/build/deps_test.go4
-rw-r--r--src/go/build/testdata/cgo_disabled/cgo_disabled.go5
-rw-r--r--src/go/build/testdata/cgo_disabled/empty.go1
-rw-r--r--src/go/doc/example.go3
-rw-r--r--src/go/types/builtins.go2
-rw-r--r--src/go/types/call.go2
-rw-r--r--src/go/types/check.go2
-rw-r--r--src/go/types/errors.go9
-rw-r--r--src/go/types/lookup.go20
-rw-r--r--src/go/types/resolver.go1
-rw-r--r--src/go/types/testdata/issues.src22
-rw-r--r--src/html/escape.go2
-rw-r--r--src/internal/poll/fcntl_js.go14
-rw-r--r--src/internal/poll/fcntl_libc.go13
-rw-r--r--src/internal/poll/fcntl_syscall.go20
-rw-r--r--src/internal/poll/fd_fsync_darwin.go9
-rw-r--r--src/internal/poll/fd_fsync_posix.go8
-rw-r--r--src/internal/poll/fd_unix.go2
-rw-r--r--src/internal/poll/splice_linux.go3
-rw-r--r--src/internal/syscall/unix/fcntl_linux_32bit.go16
-rw-r--r--src/internal/syscall/unix/nonblocking.go8
-rw-r--r--src/internal/syscall/unix/nonblocking_libc.go (renamed from src/internal/syscall/unix/nonblocking_darwin.go)4
-rw-r--r--src/internal/syscall/windows/reparse_windows.go10
-rw-r--r--src/internal/syscall/windows/syscall_windows.go20
-rw-r--r--src/io/example_test.go7
-rw-r--r--src/io/io_test.go2
-rw-r--r--src/io/multi_test.go2
-rw-r--r--src/math/big/arith_decl.go2
-rw-r--r--src/math/big/arith_decl_pure.go2
-rw-r--r--src/math/big/int.go2
-rw-r--r--src/math/rand/rand.go2
-rw-r--r--src/math/stubs_riscv64.s113
-rw-r--r--src/net/dial_test.go2
-rw-r--r--src/net/dnsclient_unix_test.go2
-rw-r--r--src/net/http/client.go17
-rw-r--r--src/net/http/httputil/reverseproxy.go8
-rw-r--r--src/net/http/omithttp2.go4
-rw-r--r--src/net/http/request.go16
-rw-r--r--src/net/http/serve_test.go191
-rw-r--r--src/net/http/transfer.go125
-rw-r--r--src/net/http/transfer_test.go284
-rw-r--r--src/net/http/transport.go68
-rw-r--r--src/net/http/transport_test.go136
-rw-r--r--src/net/interface_windows.go2
-rw-r--r--src/net/lookup_test.go6
-rw-r--r--src/net/lookup_windows.go11
-rw-r--r--src/net/net.go1
-rw-r--r--src/os/dir_darwin.go10
-rw-r--r--src/os/dir_unix.go2
-rw-r--r--src/os/env_windows.go18
-rw-r--r--src/os/exec/exec.go1
-rw-r--r--src/os/exec/exec_plan9.go19
-rw-r--r--src/os/exec/exec_test.go5
-rw-r--r--src/os/exec_windows.go5
-rw-r--r--src/os/file.go4
-rw-r--r--src/os/file_unix.go1
-rw-r--r--src/os/os_test.go42
-rw-r--r--src/os/os_windows_test.go8
-rw-r--r--src/os/user/lookup_windows.go11
-rw-r--r--src/reflect/all_test.go21
-rw-r--r--src/reflect/asm_riscv64.s36
-rw-r--r--src/reflect/type.go23
-rw-r--r--src/runtime/alg.go2
-rw-r--r--src/runtime/asm_riscv64.s669
-rw-r--r--src/runtime/atomic_riscv64.s12
-rw-r--r--src/runtime/callers_test.go34
-rw-r--r--src/runtime/cgocall.go19
-rw-r--r--src/runtime/chan.go76
-rw-r--r--src/runtime/chan_test.go15
-rw-r--r--src/runtime/checkptr.go31
-rw-r--r--src/runtime/checkptr_test.go46
-rw-r--r--src/runtime/debug.go4
-rw-r--r--src/runtime/defs_linux_386.go11
-rw-r--r--src/runtime/defs_linux_amd64.go11
-rw-r--r--src/runtime/defs_linux_riscv64.go209
-rw-r--r--src/runtime/export_darwin_test.go13
-rw-r--r--src/runtime/export_test.go51
-rw-r--r--src/runtime/extern.go28
-rw-r--r--src/runtime/gcinfo_test.go2
-rw-r--r--src/runtime/hash64.go2
-rw-r--r--src/runtime/internal/atomic/atomic_riscv64.go67
-rw-r--r--src/runtime/internal/atomic/atomic_riscv64.s242
-rw-r--r--src/runtime/internal/sys/arch.go1
-rw-r--r--src/runtime/internal/sys/arch_riscv64.go18
-rw-r--r--src/runtime/lfstack_64bit.go2
-rw-r--r--src/runtime/malloc.go1
-rw-r--r--src/runtime/malloc_test.go8
-rw-r--r--src/runtime/mem_aix.go11
-rwxr-xr-xsrc/runtime/memclr_riscv64.s44
-rw-r--r--src/runtime/memmove_386.s2
-rw-r--r--src/runtime/memmove_amd64.s2
-rw-r--r--src/runtime/memmove_arm.s2
-rw-r--r--src/runtime/memmove_arm64.s44
-rw-r--r--src/runtime/memmove_mips64x.s2
-rw-r--r--src/runtime/memmove_mipsx.s2
-rw-r--r--src/runtime/memmove_plan9_386.s2
-rw-r--r--src/runtime/memmove_plan9_amd64.s2
-rw-r--r--src/runtime/memmove_ppc64x.s2
-rwxr-xr-xsrc/runtime/memmove_riscv64.s98
-rw-r--r--src/runtime/memmove_s390x.s2
-rw-r--r--src/runtime/memmove_test.go67
-rw-r--r--src/runtime/memmove_wasm.s2
-rw-r--r--src/runtime/mgc.go15
-rw-r--r--src/runtime/mgcmark.go2
-rw-r--r--src/runtime/mgcscavenge.go198
-rw-r--r--src/runtime/mgcscavenge_test.go28
-rw-r--r--src/runtime/mheap.go15
-rw-r--r--src/runtime/mkpreempt.go6
-rw-r--r--src/runtime/mpagealloc.go240
-rw-r--r--src/runtime/mpagealloc_32bit.go7
-rw-r--r--src/runtime/mpagealloc_64bit.go100
-rw-r--r--src/runtime/mpagealloc_test.go246
-rw-r--r--src/runtime/mpagecache.go16
-rw-r--r--src/runtime/mpagecache_test.go6
-rw-r--r--src/runtime/mpallocbits.go13
-rw-r--r--src/runtime/mranges.go161
-rw-r--r--src/runtime/nbpipe_fcntl_libc_test.go2
-rw-r--r--src/runtime/nbpipe_fcntl_unix_test.go9
-rw-r--r--src/runtime/os2_aix.go12
-rw-r--r--src/runtime/os_linux.go19
-rw-r--r--src/runtime/os_linux_arm.go2
-rw-r--r--src/runtime/os_linux_arm64.go2
-rw-r--r--src/runtime/os_linux_mips64x.go2
-rw-r--r--src/runtime/os_linux_mipsx.go2
-rw-r--r--src/runtime/os_linux_ppc64x.go2
-rw-r--r--src/runtime/os_linux_riscv64.go7
-rw-r--r--src/runtime/os_linux_s390x.go2
-rw-r--r--src/runtime/os_linux_x86.go74
-rw-r--r--src/runtime/os_windows.go95
-rw-r--r--src/runtime/panic.go6
-rw-r--r--src/runtime/preempt.go14
-rw-r--r--src/runtime/preempt_nonwindows.go13
-rw-r--r--src/runtime/preempt_riscv64.s8
-rw-r--r--src/runtime/proc.go117
-rw-r--r--src/runtime/race.go3
-rw-r--r--src/runtime/race/race.go2
-rw-r--r--src/runtime/rt0_linux_riscv64.s14
-rw-r--r--src/runtime/runtime1.go4
-rw-r--r--src/runtime/runtime2.go13
-rw-r--r--src/runtime/sema.go6
-rw-r--r--src/runtime/signal_linux_riscv64.go68
-rw-r--r--src/runtime/signal_riscv64.go85
-rw-r--r--src/runtime/signal_windows.go9
-rw-r--r--src/runtime/string.go34
-rw-r--r--src/runtime/string_test.go31
-rw-r--r--src/runtime/stubs.go12
-rw-r--r--src/runtime/sys_freebsd_arm64.s29
-rw-r--r--src/runtime/sys_linux_386.s19
-rw-r--r--src/runtime/sys_linux_amd64.s26
-rw-r--r--src/runtime/sys_linux_arm.s16
-rw-r--r--src/runtime/sys_linux_arm64.s6
-rw-r--r--src/runtime/sys_linux_ppc64x.s2
-rw-r--r--src/runtime/sys_linux_riscv64.s517
-rw-r--r--src/runtime/sys_openbsd_arm64.s10
-rw-r--r--src/runtime/sys_riscv64.go18
-rw-r--r--src/runtime/syscall_solaris.go3
-rw-r--r--src/runtime/testdata/testprog/checkptr.go36
-rw-r--r--src/runtime/testdata/testprog/preempt.go10
-rw-r--r--src/runtime/time.go239
-rw-r--r--src/runtime/tls_riscv64.s18
-rw-r--r--src/runtime/trace.go17
-rw-r--r--src/runtime/trace/trace_stack_test.go1
-rw-r--r--src/runtime/utf8.go2
-rw-r--r--src/strconv/quote.go11
-rw-r--r--src/strings/strings.go22
-rw-r--r--src/sync/waitgroup_test.go11
-rw-r--r--src/syscall/asm_linux_riscv64.s24
-rw-r--r--src/syscall/exec_linux.go6
-rw-r--r--src/syscall/flock_linux_32bit.go7
-rw-r--r--src/syscall/fs_js.go8
-rw-r--r--src/syscall/lsf_linux.go4
-rw-r--r--src/syscall/netlink_linux.go2
-rw-r--r--src/syscall/security_windows.go2
-rw-r--r--src/syscall/sock_cloexec_linux.go29
-rw-r--r--src/syscall/syscall_aix.go2
-rw-r--r--src/syscall/syscall_linux_riscv64.go6
-rw-r--r--src/syscall/syscall_windows.go21
-rw-r--r--src/syscall/types_linux.go1
-rw-r--r--src/syscall/zsyscall_linux_riscv64.go4
-rw-r--r--src/syscall/ztypes_linux_riscv64.go78
-rw-r--r--src/testing/benchmark.go12
-rw-r--r--src/testing/example.go4
-rw-r--r--src/testing/panic_test.go129
-rw-r--r--src/testing/sub_test.go31
-rw-r--r--src/testing/testing.go65
-rw-r--r--src/text/template/exec_test.go3
-rw-r--r--src/text/template/funcs.go4
-rw-r--r--src/text/template/parse/lex.go1
-rw-r--r--src/text/template/template.go8
-rw-r--r--src/time/format.go3
-rw-r--r--src/time/sleep.go2
-rw-r--r--src/time/sleep_test.go2
-rw-r--r--src/time/time.go3
-rw-r--r--src/unicode/utf8/utf8.go2
-rw-r--r--src/vendor/golang.org/x/crypto/cryptobyte/asn1.go5
-rw-r--r--src/vendor/golang.org/x/crypto/cryptobyte/string.go7
-rw-r--r--src/vendor/golang.org/x/crypto/poly1305/sum_arm.go19
-rw-r--r--src/vendor/golang.org/x/crypto/poly1305/sum_arm.s427
-rw-r--r--src/vendor/golang.org/x/crypto/poly1305/sum_noasm.go2
-rw-r--r--src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go11
-rw-r--r--src/vendor/modules.txt4
414 files changed, 16934 insertions, 3107 deletions
diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
index 3943718517..4f9135b791 100644
--- a/src/archive/tar/reader.go
+++ b/src/archive/tar/reader.go
@@ -433,7 +433,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
// files generated by a pre-Go1.8 toolchain. If the generated file
// happened to have a prefix field that parses as valid
// atime and ctime fields (e.g., when they are valid octal strings),
- // then it is impossible to distinguish between an valid GNU file
+ // then it is impossible to distinguish between a valid GNU file
// and an invalid pre-Go1.8 file.
//
// See https://golang.org/issues/12594
diff --git a/src/cmd/asm/internal/asm/testdata/riscvenc.s b/src/cmd/asm/internal/asm/testdata/riscvenc.s
index 73f41dd877..8a905514db 100644
--- a/src/cmd/asm/internal/asm/testdata/riscvenc.s
+++ b/src/cmd/asm/internal/asm/testdata/riscvenc.s
@@ -60,11 +60,13 @@ start:
AUIPC $0, X10 // 17050000
AUIPC $0, X11 // 97050000
AUIPC $1, X10 // 17150000
- AUIPC $1048575, X10 // 17f5ffff
+ AUIPC $-524288, X15 // 97070080
+ AUIPC $524287, X10 // 17f5ff7f
LUI $0, X15 // b7070000
LUI $167, X15 // b7770a00
- LUI $1048575, X15 // b7f7ffff
+ LUI $-524288, X15 // b7070080
+ LUI $524287, X15 // b7f7ff7f
SLL X6, X5, X7 // b3936200
SLL X5, X6 // 33135300
@@ -89,15 +91,15 @@ start:
// to 2 because they transfer control to the second instruction
// in the function (the first instruction being an invisible
// stack pointer adjustment).
- JAL X5, start // JAL X5, 2 // eff2dff0
+ JAL X5, start // JAL X5, 2 // eff25ff0
JALR X6, (X5) // 67830200
JALR X6, 4(X5) // 67834200
- BEQ X5, X6, start // BEQ X5, X6, 2 // e38062f0
- BNE X5, X6, start // BNE X5, X6, 2 // e39e62ee
- BLT X5, X6, start // BLT X5, X6, 2 // e3cc62ee
- BLTU X5, X6, start // BLTU X5, X6, 2 // e3ea62ee
- BGE X5, X6, start // BGE X5, X6, 2 // e3d862ee
- BGEU X5, X6, start // BGEU X5, X6, 2 // e3f662ee
+ BEQ X5, X6, start // BEQ X5, X6, 2 // e38c62ee
+ BNE X5, X6, start // BNE X5, X6, 2 // e39a62ee
+ BLT X5, X6, start // BLT X5, X6, 2 // e3c862ee
+ BLTU X5, X6, start // BLTU X5, X6, 2 // e3e662ee
+ BGE X5, X6, start // BGE X5, X6, 2 // e3d462ee
+ BGEU X5, X6, start // BGEU X5, X6, 2 // e3f262ee
// 2.6: Load and Store Instructions
LW (X5), X6 // 03a30200
@@ -267,3 +269,40 @@ start:
MOVD 4(X5), F0 // 07b04200
MOVD F0, 4(X5) // 27b20200
MOVD F0, F1 // d3000022
+
+ // These jumps can get printed as jumps to 2 because they go to the
+ // second instruction in the function (the first instruction is an
+ // invisible stack pointer adjustment).
+ JMP start // JMP 2 // 6ff0dfcc
+ JMP (X5) // 67800200
+ JMP 4(X5) // 67804200
+
+ // JMP and CALL to symbol are encoded as:
+ // AUIPC $0, TMP
+ // JALR $0, TMP
+ // with a R_RISCV_PCREL_ITYPE relocation - the linker resolves the
+ // real address and updates the immediates for both instructions.
+ CALL asmtest(SB) // 970f0000
+ JMP asmtest(SB) // 970f0000
+
+ SEQZ X15, X15 // 93b71700
+ SNEZ X15, X15 // b337f000
+
+ // F extension
+ FNEGS F0, F1 // d3100020
+
+ // TODO(jsing): FNES gets encoded as FEQS+XORI - this should
+ // be handled as a single *obj.Prog so that the full two
+ // instruction encoding is tested here.
+ FNES F0, F1, X7 // d3a300a0
+
+ // D extension
+ FNEGD F0, F1 // d3100022
+ FEQD F0, F1, X5 // d3a200a2
+ FLTD F0, F1, X5 // d39200a2
+ FLED F0, F1, X5 // d38200a2
+
+ // TODO(jsing): FNED gets encoded as FEQD+XORI - this should
+ // be handled as a single *obj.Prog so that the full two
+ // instruction encoding is tested here.
+ FNED F0, F1, X5 // d3a200a2
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index 95576b6395..d7fc5416e2 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -5,12 +5,14 @@
package gc
import (
+ "bytes"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"io"
"strconv"
"strings"
+ "sync"
"unicode/utf8"
)
@@ -650,23 +652,64 @@ var basicnames = []string{
TBLANK: "blank",
}
-func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
+var tconvBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string {
+ buf := tconvBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer tconvBufferPool.Put(buf)
+
+ tconv2(buf, t, flag, mode, nil)
+ return types.InternString(buf.Bytes())
+}
+
+// tconv2 writes a string representation of t to b.
+// flag and mode control exactly what is printed.
+// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
+// See #16897 before changing the implementation of tconv.
+func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited map[*types.Type]int) {
+ if off, ok := visited[t]; ok {
+ // We've seen this type before, so we're trying to print it recursively.
+ // Print a reference to it instead.
+ fmt.Fprintf(b, "@%d", off)
+ return
+ }
if t == nil {
- return "<T>"
+ b.WriteString("<T>")
+ return
+ }
+ if t.Etype == types.TSSA {
+ b.WriteString(t.Extra.(string))
+ return
+ }
+ if t.Etype == types.TTUPLE {
+ b.WriteString(t.FieldType(0).String())
+ b.WriteByte(',')
+ b.WriteString(t.FieldType(1).String())
+ return
}
+ flag, mode = flag.update(mode)
+ if mode == FTypeIdName {
+ flag |= FmtUnsigned
+ }
if t == types.Bytetype || t == types.Runetype {
// in %-T mode collapse rune and byte with their originals.
switch mode {
case FTypeIdName, FTypeId:
t = types.Types[t.Etype]
default:
- return sconv(t.Sym, FmtShort, mode)
+ b.WriteString(sconv(t.Sym, FmtShort, mode))
+ return
}
}
-
if t == types.Errortype {
- return "error"
+ b.WriteString("error")
+ return
}
// Unless the 'L' flag was specified, if the type has a name, just print that name.
@@ -675,161 +718,197 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
case FTypeId, FTypeIdName:
if flag&FmtShort != 0 {
if t.Vargen != 0 {
- return mode.Sprintf("%v·%d", sconv(t.Sym, FmtShort, mode), t.Vargen)
+ fmt.Fprintf(b, "%s·%d", sconv(t.Sym, FmtShort, mode), t.Vargen)
+ return
}
- return sconv(t.Sym, FmtShort, mode)
+ b.WriteString(sconv(t.Sym, FmtShort, mode))
+ return
}
if mode == FTypeIdName {
- return sconv(t.Sym, FmtUnsigned, mode)
+ b.WriteString(sconv(t.Sym, FmtUnsigned, mode))
+ return
}
if t.Sym.Pkg == localpkg && t.Vargen != 0 {
- return mode.Sprintf("%v·%d", t.Sym, t.Vargen)
+ b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen))
+ return
}
}
- return smodeString(t.Sym, mode)
+ b.WriteString(smodeString(t.Sym, mode))
+ return
}
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
+ var name string
switch t {
case types.Idealbool:
- return "untyped bool"
+ name = "untyped bool"
case types.Idealstring:
- return "untyped string"
+ name = "untyped string"
case types.Idealint:
- return "untyped int"
+ name = "untyped int"
case types.Idealrune:
- return "untyped rune"
+ name = "untyped rune"
case types.Idealfloat:
- return "untyped float"
+ name = "untyped float"
case types.Idealcomplex:
- return "untyped complex"
+ name = "untyped complex"
+ default:
+ name = basicnames[t.Etype]
}
- return basicnames[t.Etype]
+ b.WriteString(name)
+ return
}
- if mode == FDbg {
- return t.Etype.String() + "-" + typefmt(t, flag, FErr, depth)
+ // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
+ // try to print it recursively.
+ // We record the offset in the result buffer where the type's text starts. This offset serves as a reference
+ // point for any later references to the same type.
+ // Note that we remove the type from the visited map as soon as the recursive call is done.
+ // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
+ // but I'd like to use the @ notation only when strictly necessary.)
+ if visited == nil {
+ visited = map[*types.Type]int{}
}
+ visited[t] = b.Len()
+ defer delete(visited, t)
+ if mode == FDbg {
+ b.WriteString(t.Etype.String())
+ b.WriteByte('-')
+ tconv2(b, t, flag, FErr, visited)
+ return
+ }
switch t.Etype {
case TPTR:
+ b.WriteByte('*')
switch mode {
case FTypeId, FTypeIdName:
if flag&FmtShort != 0 {
- return "*" + tconv(t.Elem(), FmtShort, mode, depth)
+ tconv2(b, t.Elem(), FmtShort, mode, visited)
+ return
}
}
- return "*" + tmodeString(t.Elem(), mode, depth)
+ tconv2(b, t.Elem(), 0, mode, visited)
case TARRAY:
- return "[" + strconv.FormatInt(t.NumElem(), 10) + "]" + tmodeString(t.Elem(), mode, depth)
+ b.WriteByte('[')
+ b.WriteString(strconv.FormatInt(t.NumElem(), 10))
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
case TSLICE:
- return "[]" + tmodeString(t.Elem(), mode, depth)
+ b.WriteString("[]")
+ tconv2(b, t.Elem(), 0, mode, visited)
case TCHAN:
switch t.ChanDir() {
case types.Crecv:
- return "<-chan " + tmodeString(t.Elem(), mode, depth)
-
+ b.WriteString("<-chan ")
+ tconv2(b, t.Elem(), 0, mode, visited)
case types.Csend:
- return "chan<- " + tmodeString(t.Elem(), mode, depth)
- }
-
- if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
- return "chan (" + tmodeString(t.Elem(), mode, depth) + ")"
+ b.WriteString("chan<- ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ default:
+ b.WriteString("chan ")
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
+ b.WriteByte('(')
+ tconv2(b, t.Elem(), 0, mode, visited)
+ b.WriteByte(')')
+ } else {
+ tconv2(b, t.Elem(), 0, mode, visited)
+ }
}
- return "chan " + tmodeString(t.Elem(), mode, depth)
case TMAP:
- return "map[" + tmodeString(t.Key(), mode, depth) + "]" + tmodeString(t.Elem(), mode, depth)
+ b.WriteString("map[")
+ tconv2(b, t.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
case TINTER:
if t.IsEmptyInterface() {
- return "interface {}"
+ b.WriteString("interface {}")
+ break
}
- buf := make([]byte, 0, 64)
- buf = append(buf, "interface {"...)
+ b.WriteString("interface {")
for i, f := range t.Fields().Slice() {
if i != 0 {
- buf = append(buf, ';')
+ b.WriteByte(';')
}
- buf = append(buf, ' ')
+ b.WriteByte(' ')
switch {
case f.Sym == nil:
// Check first that a symbol is defined for this type.
// Wrong interface definitions may have types lacking a symbol.
break
case types.IsExported(f.Sym.Name):
- buf = append(buf, sconv(f.Sym, FmtShort, mode)...)
+ b.WriteString(sconv(f.Sym, FmtShort, mode))
default:
flag1 := FmtLeft
if flag&FmtUnsigned != 0 {
flag1 = FmtUnsigned
}
- buf = append(buf, sconv(f.Sym, flag1, mode)...)
+ b.WriteString(sconv(f.Sym, flag1, mode))
}
- buf = append(buf, tconv(f.Type, FmtShort, mode, depth)...)
+ tconv2(b, f.Type, FmtShort, mode, visited)
}
if t.NumFields() != 0 {
- buf = append(buf, ' ')
+ b.WriteByte(' ')
}
- buf = append(buf, '}')
- return string(buf)
+ b.WriteByte('}')
case TFUNC:
- buf := make([]byte, 0, 64)
if flag&FmtShort != 0 {
// no leading func
} else {
if t.Recv() != nil {
- buf = append(buf, "method"...)
- buf = append(buf, tmodeString(t.Recvs(), mode, depth)...)
- buf = append(buf, ' ')
+ b.WriteString("method")
+ tconv2(b, t.Recvs(), 0, mode, visited)
+ b.WriteByte(' ')
}
- buf = append(buf, "func"...)
+ b.WriteString("func")
}
- buf = append(buf, tmodeString(t.Params(), mode, depth)...)
+ tconv2(b, t.Params(), 0, mode, visited)
switch t.NumResults() {
case 0:
// nothing to do
case 1:
- buf = append(buf, ' ')
- buf = append(buf, tmodeString(t.Results().Field(0).Type, mode, depth)...) // struct->field->field's type
+ b.WriteByte(' ')
+ tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
default:
- buf = append(buf, ' ')
- buf = append(buf, tmodeString(t.Results(), mode, depth)...)
+ b.WriteByte(' ')
+ tconv2(b, t.Results(), 0, mode, visited)
}
- return string(buf)
case TSTRUCT:
if m := t.StructType().Map; m != nil {
mt := m.MapType()
// Format the bucket struct for map[x]y as map.bucket[x]y.
// This avoids a recursive print that generates very long names.
- var subtype string
switch t {
case mt.Bucket:
- subtype = "bucket"
+ b.WriteString("map.bucket[")
case mt.Hmap:
- subtype = "hdr"
+ b.WriteString("map.hdr[")
case mt.Hiter:
- subtype = "iter"
+ b.WriteString("map.iter[")
default:
Fatalf("unknown internal map type")
}
- return fmt.Sprintf("map.%s[%s]%s", subtype, tmodeString(m.Key(), mode, depth), tmodeString(m.Elem(), mode, depth))
+ tconv2(b, m.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, m.Elem(), 0, mode, visited)
+ break
}
- buf := make([]byte, 0, 64)
if funarg := t.StructType().Funarg; funarg != types.FunargNone {
- buf = append(buf, '(')
+ b.WriteByte('(')
var flag1 FmtFlag
switch mode {
case FTypeId, FTypeIdName, FErr:
@@ -838,42 +917,42 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
}
for i, f := range t.Fields().Slice() {
if i != 0 {
- buf = append(buf, ", "...)
+ b.WriteString(", ")
}
- buf = append(buf, fldconv(f, flag1, mode, depth, funarg)...)
+ fldconv(b, f, flag1, mode, visited, funarg)
}
- buf = append(buf, ')')
+ b.WriteByte(')')
} else {
- buf = append(buf, "struct {"...)
+ b.WriteString("struct {")
for i, f := range t.Fields().Slice() {
if i != 0 {
- buf = append(buf, ';')
+ b.WriteByte(';')
}
- buf = append(buf, ' ')
- buf = append(buf, fldconv(f, FmtLong, mode, depth, funarg)...)
+ b.WriteByte(' ')
+ fldconv(b, f, FmtLong, mode, visited, funarg)
}
if t.NumFields() != 0 {
- buf = append(buf, ' ')
+ b.WriteByte(' ')
}
- buf = append(buf, '}')
+ b.WriteByte('}')
}
- return string(buf)
case TFORW:
+ b.WriteString("undefined")
if t.Sym != nil {
- return "undefined " + smodeString(t.Sym, mode)
+ b.WriteByte(' ')
+ b.WriteString(smodeString(t.Sym, mode))
}
- return "undefined"
case TUNSAFEPTR:
- return "unsafe.Pointer"
+ b.WriteString("unsafe.Pointer")
case Txxx:
- return "Txxx"
+ b.WriteString("Txxx")
+ default:
+ // Don't know how to handle - fall back to detailed prints.
+ b.WriteString(mode.Sprintf("%v <%v>", t.Etype, t.Sym))
}
-
- // Don't know how to handle - fall back to detailed prints.
- return mode.Sprintf("%v <%v>", t.Etype, t.Sym)
}
// Statements which may be rendered with a simplestmt as init.
@@ -1657,15 +1736,11 @@ func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
return symfmt(s, flag, mode)
}
-func tmodeString(t *types.Type, mode fmtMode, depth int) string {
- return tconv(t, 0, mode, depth)
-}
-
-func fldconv(f *types.Field, flag FmtFlag, mode fmtMode, depth int, funarg types.Funarg) string {
+func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visited map[*types.Type]int, funarg types.Funarg) {
if f == nil {
- return "<T>"
+ b.WriteString("<T>")
+ return
}
-
flag, mode = flag.update(mode)
if mode == FTypeIdName {
flag |= FmtUnsigned
@@ -1694,27 +1769,26 @@ func fldconv(f *types.Field, flag FmtFlag, mode fmtMode, depth int, funarg types
}
}
- var typ string
+ if name != "" {
+ b.WriteString(name)
+ b.WriteString(" ")
+ }
+
if f.IsDDD() {
var et *types.Type
if f.Type != nil {
et = f.Type.Elem()
}
- typ = "..." + tmodeString(et, mode, depth)
+ b.WriteString("...")
+ tconv2(b, et, 0, mode, visited)
} else {
- typ = tmodeString(f.Type, mode, depth)
- }
-
- str := typ
- if name != "" {
- str = name + " " + typ
+ tconv2(b, f.Type, 0, mode, visited)
}
if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" {
- str += " " + strconv.Quote(f.Note)
+ b.WriteString(" ")
+ b.WriteString(strconv.Quote(f.Note))
}
-
- return str
}
// "%L" print definition, not name
@@ -1722,45 +1796,12 @@ func fldconv(f *types.Field, flag FmtFlag, mode fmtMode, depth int, funarg types
func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
switch verb {
case 'v', 'S', 'L':
- // This is an external entry point, so we pass depth 0 to tconv.
- // See comments in Type.String.
- fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode, 0))
-
+ fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode))
default:
fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
}
}
-// See #16897 before changing the implementation of tconv.
-func tconv(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
- if t == nil {
- return "<T>"
- }
- if t.Etype == types.TSSA {
- return t.Extra.(string)
- }
- if t.Etype == types.TTUPLE {
- return t.FieldType(0).String() + "," + t.FieldType(1).String()
- }
-
- // Avoid endless recursion by setting an upper limit. This also
- // limits the depths of valid composite types, but they are likely
- // artificially created.
- // TODO(gri) should have proper cycle detection here, eventually (issue #29312)
- if depth > 250 {
- return "<...>"
- }
-
- flag, mode = flag.update(mode)
- if mode == FTypeIdName {
- flag |= FmtUnsigned
- }
-
- str := typefmt(t, flag, mode, depth+1)
-
- return str
-}
-
func (n *Node) String() string { return fmt.Sprint(n) }
func (n *Node) modeString(mode fmtMode) string { return mode.Sprint(n) }
diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go
index cfe7f6f546..c8521e584d 100644
--- a/src/cmd/compile/internal/gc/inl_test.go
+++ b/src/cmd/compile/internal/gc/inl_test.go
@@ -175,7 +175,7 @@ func TestIntendedInlining(t *testing.T) {
}
switch runtime.GOARCH {
- case "386", "wasm", "arm":
+ case "386", "wasm", "arm", "riscv64":
default:
// TODO(mvdan): As explained in /test/inline_sync.go, some
// architectures don't have atomic intrinsics, so these go over
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index aa690921bc..9b89d7b8b1 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -511,8 +511,8 @@ func Main(archInit func(*Arch)) {
types.Sconv = func(s *types.Sym, flag, mode int) string {
return sconv(s, FmtFlag(flag), fmtMode(mode))
}
- types.Tconv = func(t *types.Type, flag, mode, depth int) string {
- return tconv(t, FmtFlag(flag), fmtMode(mode), depth)
+ types.Tconv = func(t *types.Type, flag, mode int) string {
+ return tconv(t, FmtFlag(flag), fmtMode(mode))
}
types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
symFormat(sym, s, verb, fmtMode(mode))
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index 5b9f31426d..d406780a79 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -705,6 +705,12 @@ func (lv *Liveness) markUnsafePoints() {
v = v.Args[0]
continue
}
+ case ssa.OpRISCV64SUB:
+ // RISCV64 lowers Neq32 to include a SUB with multiple arguments.
+ // TODO(jsing): it would be preferable not to use Neq32 for
+ // writeBuffer.enabled checks on this platform.
+ v = v.Args[0]
+ continue
case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U:
// Args[0] is the address of the write
// barrier control. Ignore Args[1],
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index bda170ec0e..b8e99f08c5 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -340,7 +340,8 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = Debug['N'] == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
- if s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386" {
+ switch {
+ case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret()
@@ -5968,23 +5969,22 @@ func genssa(f *ssa.Func, pp *Progs) {
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.next
- s.pp.nextLive = LivenessInvalid
s.lineRunStart = nil
+ // Attach a "default" liveness info. Normally this will be
+ // overwritten in the Values loop below for each Value. But
+ // for an empty block this will be used for its control
+ // instruction. We won't use the actual liveness map on a
+ // control instruction. Just mark it something that is
+ // preemptible.
+ s.pp.nextLive = LivenessIndex{-1, -1}
+
// Emit values in block
thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.next
s.DebugFriendlySetPosFrom(v)
- // Attach this safe point to the next
- // instruction.
- s.pp.nextLive = s.livenessMap.Get(v)
- // Remember the liveness index of the first defer call of
- // the last defer exit
- if v.Block.Func.LastDeferExit != nil && v == v.Block.Func.LastDeferExit {
- s.lastDeferLiveness = s.pp.nextLive
- }
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
@@ -6018,12 +6018,22 @@ func genssa(f *ssa.Func, pp *Progs) {
inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
default:
- // let the backend handle it
+ // Attach this safe point to the next
+ // instruction.
+ s.pp.nextLive = s.livenessMap.Get(v)
+
+ // Remember the liveness index of the first defer call of
+ // the last defer exit
+ if v.Block.Func.LastDeferExit != nil && v == v.Block.Func.LastDeferExit {
+ s.lastDeferLiveness = s.pp.nextLive
+ }
+
// Special case for first line in function; move it to the start.
if firstPos != src.NoXPos {
s.SetPos(firstPos)
firstPos = src.NoXPos
}
+ // let the backend handle it
thearch.SSAGenValue(&s, v)
}
@@ -6524,7 +6534,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch thearch.LinkArch.Family {
- case sys.AMD64, sys.I386, sys.PPC64, sys.S390X, sys.Wasm:
+ case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index ef88db4742..b8b954c4fc 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -1705,7 +1705,6 @@ func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node
if ddd != nil {
esc = ddd.Esc
}
-
if len(args) == 0 {
n := nodnil()
n.Type = typ
@@ -1740,6 +1739,9 @@ func walkCall(n *Node, init *Nodes) {
// then assign the remaining arguments as a slice.
if nf := params.NumFields(); nf > 0 {
if last := params.Field(nf - 1); last.IsDDD() && !n.IsDDD() {
+ // The callsite does not use a ..., but the called function is declared
+ // with a final argument that has a ... . Build the slice that we will
+ // pass as the ... argument.
tail := args[nf-1:]
slice := mkdotargslice(last.Type, tail, init, n.Right)
// Allow immediate GC.
@@ -4067,11 +4069,15 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
n = cheapexpr(n, init)
- slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals, init, nil)
- slice.Esc = EscNone
- slice.SetTransient(true)
+ ddd := nodl(n.Pos, ODDDARG, nil, nil)
+ ddd.Type = types.NewPtr(types.NewArray(types.Types[TUNSAFEPTR], int64(len(originals))))
+ ddd.Esc = EscNone
+ slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals, init, ddd)
init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
+ // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
+ // the backing store for multiple calls to checkptrArithmetic.
+
return n
}
diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go
index 4064054593..2639d27105 100644
--- a/src/cmd/compile/internal/logopt/log_opts.go
+++ b/src/cmd/compile/internal/logopt/log_opts.go
@@ -321,12 +321,14 @@ func Enabled() bool {
// byPos sorts diagnostics by source position.
type byPos struct {
ctxt *obj.Link
- a []LoggedOpt
+ a []LoggedOpt
}
-func (x byPos) Len() int { return len(x.a) }
-func (x byPos) Less(i, j int) bool { return x.ctxt.OutermostPos(x.a[i].pos).Before(x.ctxt.OutermostPos(x.a[j].pos)) }
-func (x byPos) Swap(i, j int) { x.a[i], x.a[j] = x.a[j], x.a[i] }
+func (x byPos) Len() int { return len(x.a) }
+func (x byPos) Less(i, j int) bool {
+ return x.ctxt.OutermostPos(x.a[i].pos).Before(x.ctxt.OutermostPos(x.a[j].pos))
+}
+func (x byPos) Swap(i, j int) { x.a[i], x.a[j] = x.a[j], x.a[i] }
func writerForLSP(subdirpath, file string) io.WriteCloser {
basename := file
@@ -367,7 +369,7 @@ func uriIfy(f string) DocumentURI {
// Return filename, replacing a first occurrence of $GOROOT with the
// actual value of the GOROOT (because LSP does not speak "$GOROOT").
func uprootedPath(filename string) string {
- if ! strings.HasPrefix(filename, "$GOROOT/") {
+ if !strings.HasPrefix(filename, "$GOROOT/") {
return filename
}
return objabi.GOROOT + filename[len("$GOROOT"):]
@@ -379,7 +381,7 @@ func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) {
return
}
- sort.Stable(byPos{ctxt,loggedOpts}) // Stable is necessary to preserve the per-function order, which is repeatable.
+ sort.Stable(byPos{ctxt, loggedOpts}) // Stable is necessary to preserve the per-function order, which is repeatable.
switch Format {
case Json0: // LSP 3.15
diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go
new file mode 100644
index 0000000000..4db0fac52e
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/galign.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj/riscv"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &riscv.LinkRISCV64
+
+ arch.REGSP = riscv.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+ arch.ZeroRange = zeroRange
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
new file mode 100644
index 0000000000..be31fad441
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += gc.Ctxt.FixedFrameSize()
+
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+ }
+ return p
+ }
+
+ // TODO(jsing): Add a duff zero implementation for medium sized ranges.
+
+ // Loop, zeroing pointer width bytes at a time.
+ // ADD $(off), SP, T0
+ // ADD $(cnt), T0, T1
+ // loop:
+ // MOV ZERO, (T0)
+ // ADD $Widthptr, T0
+ // BNE T0, T1, loop
+ p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+ p.Reg = riscv.REG_T0
+ p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+ loop := p
+ p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = riscv.REG_T1
+ gc.Patch(p, loop)
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go
new file mode 100644
index 0000000000..d40bdf7a1d
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/gsubr.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ // Hardware nop is ADD $0, ZERO
+ p := pp.Prog(riscv.AADD)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = riscv.REG_ZERO
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: riscv.REG_ZERO}
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
new file mode 100644
index 0000000000..d33240351f
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -0,0 +1,496 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+// ssaRegToReg maps ssa register numbers to obj register numbers.
+var ssaRegToReg = []int16{
+ riscv.REG_X0,
+ // X1 (LR): unused
+ riscv.REG_X2,
+ riscv.REG_X3,
+ riscv.REG_X4,
+ riscv.REG_X5,
+ riscv.REG_X6,
+ riscv.REG_X7,
+ riscv.REG_X8,
+ riscv.REG_X9,
+ riscv.REG_X10,
+ riscv.REG_X11,
+ riscv.REG_X12,
+ riscv.REG_X13,
+ riscv.REG_X14,
+ riscv.REG_X15,
+ riscv.REG_X16,
+ riscv.REG_X17,
+ riscv.REG_X18,
+ riscv.REG_X19,
+ riscv.REG_X20,
+ riscv.REG_X21,
+ riscv.REG_X22,
+ riscv.REG_X23,
+ riscv.REG_X24,
+ riscv.REG_X25,
+ riscv.REG_X26,
+ riscv.REG_X27,
+ riscv.REG_X28,
+ riscv.REG_X29,
+ riscv.REG_X30,
+ riscv.REG_X31,
+ riscv.REG_F0,
+ riscv.REG_F1,
+ riscv.REG_F2,
+ riscv.REG_F3,
+ riscv.REG_F4,
+ riscv.REG_F5,
+ riscv.REG_F6,
+ riscv.REG_F7,
+ riscv.REG_F8,
+ riscv.REG_F9,
+ riscv.REG_F10,
+ riscv.REG_F11,
+ riscv.REG_F12,
+ riscv.REG_F13,
+ riscv.REG_F14,
+ riscv.REG_F15,
+ riscv.REG_F16,
+ riscv.REG_F17,
+ riscv.REG_F18,
+ riscv.REG_F19,
+ riscv.REG_F20,
+ riscv.REG_F21,
+ riscv.REG_F22,
+ riscv.REG_F23,
+ riscv.REG_F24,
+ riscv.REG_F25,
+ riscv.REG_F26,
+ riscv.REG_F27,
+ riscv.REG_F28,
+ riscv.REG_F29,
+ riscv.REG_F30,
+ riscv.REG_F31,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+func loadByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ gc.Fatalf("unknown float width for load %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ if t.IsSigned() {
+ return riscv.AMOVB
+ } else {
+ return riscv.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return riscv.AMOVH
+ } else {
+ return riscv.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return riscv.AMOVW
+ } else {
+ return riscv.AMOVWU
+ }
+ case 8:
+ return riscv.AMOV
+ default:
+ gc.Fatalf("unknown width for load %d in type %v", width, t)
+ return 0
+ }
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ gc.Fatalf("unknown float width for store %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ return riscv.AMOVB
+ case 2:
+ return riscv.AMOVH
+ case 4:
+ return riscv.AMOVW
+ case 8:
+ return riscv.AMOV
+ default:
+ gc.Fatalf("unknown width for store %d in type %v", width, t)
+ return 0
+ }
+}
+
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+ switch {
+ case alignment%8 == 0:
+ return riscv.AMOV, 8
+ case alignment%4 == 0:
+ return riscv.AMOVW, 4
+ case alignment%2 == 0:
+ return riscv.AMOVH, 2
+ default:
+ return riscv.AMOVB, 1
+ }
+}
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+// RISC-V has no flags, so this is a no-op.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetPos(v.Pos)
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
+ case ssa.OpCopy, ssa.OpRISCV64MOVconvert:
+ if v.Type.IsMemory() {
+ return
+ }
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if rs == rd {
+ return
+ }
+ as := riscv.AMOV
+ if v.Type.IsFloat() {
+ as = riscv.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
+ ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
+ ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
+ ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
+ ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
+ ssa.OpRISCV64REMUW,
+ ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
+ ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
+ ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
+ ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
+ ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
+ ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
+ ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64ADDI, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
+ ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI,
+ ssa.OpRISCV64SLTIU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVBconst, ssa.OpRISCV64MOVHconst, ssa.OpRISCV64MOVWconst, ssa.OpRISCV64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVaddr:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = riscv.REG_SP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
+ ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
+ ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
+ ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
+ s.Call(v)
+ case ssa.OpRISCV64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpRISCV64LoweredZero:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov ZERO, (Rarg0)
+ // ADD $sz, Rarg0
+ // BGEU Rarg1, Rarg0, -2(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.ABGEU)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.Reg = v.Args[0].Reg()
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[1].Reg()
+ gc.Patch(p3, p)
+
+ case ssa.OpRISCV64LoweredMove:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov (Rarg1), T2
+ // mov T2, (Rarg0)
+ // ADD $sz, Rarg0
+ // ADD $sz, Rarg1
+ // BGEU Rarg2, Rarg0, -4(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_T2
+
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_T2
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.AADD)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Args[0].Reg()
+
+ p4 := s.Prog(riscv.AADD)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Args[1].Reg()
+
+ p5 := s.Prog(riscv.ABGEU)
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.Reg = v.Args[1].Reg()
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Args[2].Reg()
+ gc.Patch(p5, p)
+
+ case ssa.OpRISCV64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ // TODO: optimizations. See arm and amd64 LoweredNilCheck.
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_ZERO
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpRISCV64LoweredGetClosurePtr:
+ // Closure pointer is S4 (riscv.REG_CTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpRISCV64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ default:
+ v.Fatalf("Unhandled op %v", v.Op)
+ }
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetPos(b.Pos)
+
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in A0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(riscv.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.Reg = riscv.REG_A0
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+ case ssa.BlockRISCV64BNE:
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(riscv.ABNE, b.Succs[1].Block())
+ p.As = riscv.InvertBranch(p.As)
+ case b.Succs[1].Block():
+ p = s.Br(riscv.ABNE, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(riscv.ABNE, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(riscv.ABNE, b.Succs[1].Block())
+ p.As = riscv.InvertBranch(p.As)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ p.Reg = b.Controls[0].Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+
+ default:
+ b.Fatalf("Unhandled block: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 3cbbfcfa4e..b51dfcb1f5 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -305,6 +305,16 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.LinkReg = linkRegMIPS
c.hasGReg = true
c.noDuffDevice = true
+ case "riscv64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockRISCV64
+ c.lowerValue = rewriteValueRISCV64
+ c.registers = registersRISCV64[:]
+ c.gpRegMask = gpRegMaskRISCV64
+ c.fpRegMask = fpRegMaskRISCV64
+ c.FPReg = framepointerRegRISCV64
+ c.hasGReg = true
case "wasm":
c.PtrSize = 8
c.RegSize = 8
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index b76410d597..32f0bcf290 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -153,7 +153,7 @@ func init() {
// TODO(josharian): move universe initialization to the types package,
// so this test setup can share it.
- types.Tconv = func(t *types.Type, flag, mode, depth int) string {
+ types.Tconv = func(t *types.Type, flag, mode int) string {
return t.Etype.String()
}
types.Sconv = func(s *types.Sym, flag, mode int) string {
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index a530874b80..c2d4051da8 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -145,7 +145,7 @@ func fuseBlockIf(b *Block) bool {
// There may be false positives.
func isEmpty(b *Block) bool {
for _, v := range b.Values {
- if v.Uses > 0 || v.Type.IsVoid() {
+ if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
return false
}
}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index c3e25a80c4..77d2aad5c1 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -63,7 +63,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
t.Errorf("then was not eliminated, but should have")
}
if b == fun.blocks["else"] && b.Kind != BlockInvalid {
- t.Errorf("then was not eliminated, but should have")
+ t.Errorf("else was not eliminated, but should have")
}
}
}
@@ -97,7 +97,7 @@ func TestFuseHandlesPhis(t *testing.T) {
t.Errorf("then was not eliminated, but should have")
}
if b == fun.blocks["else"] && b.Kind != BlockInvalid {
- t.Errorf("then was not eliminated, but should have")
+ t.Errorf("else was not eliminated, but should have")
}
}
}
@@ -131,6 +131,40 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
}
}
+func TestFuseSideEffects(t *testing.T) {
+ // Test that we don't fuse branches that have side effects but
+ // have no use (e.g. followed by infinite loop).
+ // See issue #36005.
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpArg, c.config.Types.Bool, 0, nil),
+ If("b", "then", "else")),
+ Bloc("then",
+ Valu("call1", OpStaticCall, types.TypeMem, 0, nil, "mem"),
+ Goto("empty")),
+ Bloc("else",
+ Valu("call2", OpStaticCall, types.TypeMem, 0, nil, "mem"),
+ Goto("empty")),
+ Bloc("empty",
+ Goto("loop")),
+ Bloc("loop",
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ fuseAll(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind == BlockInvalid {
+ t.Errorf("then is eliminated, but should not")
+ }
+ if b == fun.blocks["else"] && b.Kind == BlockInvalid {
+ t.Errorf("else is eliminated, but should not")
+ }
+ }
+}
+
func BenchmarkFuse(b *testing.B) {
for _, n := range [...]int{1, 10, 100, 1000, 10000} {
b.Run(strconv.Itoa(n), func(b *testing.B) {
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index fc37074117..9bcb665632 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -813,7 +813,7 @@
(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem)
// Fold offsets for stores.
-(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 -> (MOVDstore [off1+off2] {sym} x val mem)
(MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
(MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
(MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
@@ -836,7 +836,7 @@
&& (ptr.Op != OpSB || p.Uses == 1) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
+ && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
@@ -856,13 +856,13 @@
&& (ptr.Op != OpSB || p.Uses == 1) ->
(MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
+ && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& (ptr.Op != OpSB || p.Uses == 1) ->
(MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
+ && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& (ptr.Op != OpSB || p.Uses == 1) ->
@@ -875,8 +875,8 @@
(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
-(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
-(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
+(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 -> (MOVDload [off1+off2] {sym} x mem)
+(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 -> (MOVWload [off1+off2] {sym} x mem)
(MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
(MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
@@ -886,9 +886,10 @@
(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
// Determine indexed loads with constant values that can be done without index
-(MOV(D|W|WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
-(MOV(D|W|WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
-
+(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 -> (MOV(D|W)load [c] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(WZ|H|HZ|BZ)load [c] ptr mem)
+(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 -> (MOV(D|W)load [c] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(WZ|H|HZ|BZ)load [c] ptr mem)
// Store of zero -> storezero
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
@@ -897,7 +898,7 @@
(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
// Fold offsets for storezero
-(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
+(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 ->
(MOVDstorezero [off1+off2] {sym} x mem)
(MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
(MOVWstorezero [off1+off2] {sym} x mem)
@@ -910,12 +911,14 @@
(MOV(D|W|H|B)store [off] {sym} p:(ADD ptr idx) val mem) && off == 0 && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem)
// Stores with constant index values can be done without indexed instructions
-(MOV(D|W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
-(MOV(D|W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 -> (MOVDstore [c] ptr val mem)
+(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(W|H|B)store [c] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 -> (MOVDstore [c] ptr val mem)
+(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(W|H|B)store [c] ptr val mem)
// Fold symbols into storezero
(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
- && (x.Op != OpSB || p.Uses == 1) ->
+ && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
(MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) ->
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
new file mode 100644
index 0000000000..5331c73259
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -0,0 +1,478 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Optimizations TODO:
+// * Somehow track when values are already zero/signed-extended, avoid re-extending.
+// * Use SLTI and SLTIU for comparisons to constants, instead of SLT/SLTU with constants in registers
+// * Find a more efficient way to do zero/sign extension than left+right shift.
+// There are many other options (store then load-extend, LUI+ANDI for zero extend, special case 32->64, ...),
+// but left+right shift is simple and uniform, and we don't have real hardware to do perf testing on anyway.
+// * Use the zero register instead of moving 0 into a register.
+// * Add rules to avoid generating a temp bool value for (If (SLT[U] ...) ...).
+// * Optimize left and right shift by simplifying SLTIU, Neg, and ADD for constants.
+// * Arrange for non-trivial Zero and Move lowerings to use aligned loads and stores.
+// * Eliminate zero immediate shifts, adds, etc.
+// * Use a Duff's device for some moves and zeros.
+// * Avoid using Neq32 for writeBarrier.enabled checks.
+
+// Lowering arithmetic
+(Add64 x y) -> (ADD x y)
+(AddPtr x y) -> (ADD x y)
+(Add32 x y) -> (ADD x y)
+(Add16 x y) -> (ADD x y)
+(Add8 x y) -> (ADD x y)
+(Add32F x y) -> (FADDS x y)
+(Add64F x y) -> (FADDD x y)
+
+(Sub64 x y) -> (SUB x y)
+(SubPtr x y) -> (SUB x y)
+(Sub32 x y) -> (SUB x y)
+(Sub16 x y) -> (SUB x y)
+(Sub8 x y) -> (SUB x y)
+(Sub32F x y) -> (FSUBS x y)
+(Sub64F x y) -> (FSUBD x y)
+
+(Mul64 x y) -> (MUL x y)
+(Mul32 x y) -> (MULW x y)
+(Mul16 x y) -> (MULW (SignExt16to32 x) (SignExt16to32 y))
+(Mul8 x y) -> (MULW (SignExt8to32 x) (SignExt8to32 y))
+(Mul32F x y) -> (FMULS x y)
+(Mul64F x y) -> (FMULD x y)
+
+(Div32F x y) -> (FDIVS x y)
+(Div64F x y) -> (FDIVD x y)
+
+(Div64 x y) -> (DIV x y)
+(Div64u x y) -> (DIVU x y)
+(Div32 x y) -> (DIVW x y)
+(Div32u x y) -> (DIVUW x y)
+(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) -> (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) -> (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul64 x y) -> (MULH x y)
+(Hmul64u x y) -> (MULHU x y)
+(Hmul32 x y) -> (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+(Hmul32u x y) -> (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+
+// (x + y) / 2 -> (x / 2) + (y / 2) + (x & y & 1)
+(Avg64u <t> x y) -> (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+
+(Mod64 x y) -> (REM x y)
+(Mod64u x y) -> (REMU x y)
+(Mod32 x y) -> (REMW x y)
+(Mod32u x y) -> (REMUW x y)
+(Mod16 x y) -> (REMW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) -> (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) -> (REMW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) -> (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And64 x y) -> (AND x y)
+(And32 x y) -> (AND x y)
+(And16 x y) -> (AND x y)
+(And8 x y) -> (AND x y)
+
+(Or64 x y) -> (OR x y)
+(Or32 x y) -> (OR x y)
+(Or16 x y) -> (OR x y)
+(Or8 x y) -> (OR x y)
+
+(Xor64 x y) -> (XOR x y)
+(Xor32 x y) -> (XOR x y)
+(Xor16 x y) -> (XOR x y)
+(Xor8 x y) -> (XOR x y)
+
+(Neg64 x) -> (SUB (MOVDconst) x)
+(Neg32 x) -> (SUB (MOVWconst) x)
+(Neg16 x) -> (SUB (MOVHconst) x)
+(Neg8 x) -> (SUB (MOVBconst) x)
+(Neg32F x) -> (FNEGS x)
+(Neg64F x) -> (FNEGD x)
+
+(Com64 x) -> (XORI [int64(-1)] x)
+(Com32 x) -> (XORI [int64(-1)] x)
+(Com16 x) -> (XORI [int64(-1)] x)
+(Com8 x) -> (XORI [int64(-1)] x)
+
+(Sqrt x) -> (FSQRTD x)
+
+// Zero and sign extension
+// Shift left until the bits we want are at the top of the register.
+// Then logical/arithmetic shift right for zero/sign extend.
+// We always extend to 64 bits; there's no reason not to,
+// and optimization rules can then collapse some extensions.
+
+(SignExt8to16 <t> x) -> (SRAI [56] (SLLI <t> [56] x))
+(SignExt8to32 <t> x) -> (SRAI [56] (SLLI <t> [56] x))
+(SignExt8to64 <t> x) -> (SRAI [56] (SLLI <t> [56] x))
+(SignExt16to32 <t> x) -> (SRAI [48] (SLLI <t> [48] x))
+(SignExt16to64 <t> x) -> (SRAI [48] (SLLI <t> [48] x))
+(SignExt32to64 <t> x) -> (SRAI [32] (SLLI <t> [32] x))
+
+(ZeroExt8to16 <t> x) -> (SRLI [56] (SLLI <t> [56] x))
+(ZeroExt8to32 <t> x) -> (SRLI [56] (SLLI <t> [56] x))
+(ZeroExt8to64 <t> x) -> (SRLI [56] (SLLI <t> [56] x))
+(ZeroExt16to32 <t> x) -> (SRLI [48] (SLLI <t> [48] x))
+(ZeroExt16to64 <t> x) -> (SRLI [48] (SLLI <t> [48] x))
+(ZeroExt32to64 <t> x) -> (SRLI [32] (SLLI <t> [32] x))
+
+(Cvt32to32F x) -> (FCVTSW x)
+(Cvt32to64F x) -> (FCVTDW x)
+(Cvt64to32F x) -> (FCVTSL x)
+(Cvt64to64F x) -> (FCVTDL x)
+
+(Cvt32Fto32 x) -> (FCVTWS x)
+(Cvt32Fto64 x) -> (FCVTLS x)
+(Cvt64Fto32 x) -> (FCVTWD x)
+(Cvt64Fto64 x) -> (FCVTLD x)
+
+(Cvt32Fto64F x) -> (FCVTDS x)
+(Cvt64Fto32F x) -> (FCVTSD x)
+
+(Round32F x) -> x
+(Round64F x) -> x
+
+// From genericOps.go:
+// "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0"
+//
+// Like other arches, we compute ~((x-1) >> 63), with arithmetic right shift.
+// For positive x, bit 63 of x-1 is always 0, so the result is -1.
+// For zero x, bit 63 of x-1 is 1, so the result is 0.
+//
+// TODO(prattmic): Use XORconst etc instead of XOR (MOVDconst).
+(Slicemask <t> x) -> (XOR (MOVDconst [-1]) (SRA <t> (SUB <t> x (MOVDconst [1])) (MOVDconst [63])))
+
+// Truncations
+// We ignore the unused high parts of registers, so truncates are just copies.
+(Trunc16to8 x) -> x
+(Trunc32to8 x) -> x
+(Trunc32to16 x) -> x
+(Trunc64to8 x) -> x
+(Trunc64to16 x) -> x
+(Trunc64to32 x) -> x
+
+// Shifts
+
+// SLL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0.
+//
+// Breaking down the operation:
+//
+// (SLL x y) generates x << (y & 63).
+//
+// If y < 64, this is the value we want. Otherwise, we want zero.
+//
+// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
+(Lsh8x8 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh8x16 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh8x32 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh8x64 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Lsh16x8 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh16x16 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh16x32 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh16x64 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Lsh32x8 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh32x16 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh32x32 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh32x64 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Lsh64x8 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh64x16 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh64x32 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh64x64 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+// SRL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0. See Lsh above for a detailed description.
+(Rsh8Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Rsh64Ux8 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+// SRA only considers the bottom 6 bits of y. If y > 64, the result should
+// be either 0 or -1 based on the sign bit.
+//
+// We implement this by performing the max shift (-1) if y >= 64.
+//
+// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
+// us with -1 (0xffff...) if y >= 64.
+//
+// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
+// more than the 6 bits SRA cares about.
+(Rsh8x8 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh64x8 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+// rotates
+(RotateLeft8 <t> x (MOVBconst [c])) -> (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
+(RotateLeft16 <t> x (MOVHconst [c])) -> (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) -> (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVDconst [c])) -> (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+
+(Less64 x y) -> (SLT x y)
+(Less32 x y) -> (SLT (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) -> (SLT (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) -> (SLT (SignExt8to64 x) (SignExt8to64 y))
+(Less64U x y) -> (SLTU x y)
+(Less32U x y) -> (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) -> (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) -> (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less64F x y) -> (FLTD x y)
+(Less32F x y) -> (FLTS x y)
+
+// Convert x <= y to !(y > x).
+(Leq64 x y) -> (Not (Less64 y x))
+(Leq32 x y) -> (Not (Less32 y x))
+(Leq16 x y) -> (Not (Less16 y x))
+(Leq8 x y) -> (Not (Less8 y x))
+(Leq64U x y) -> (Not (Less64U y x))
+(Leq32U x y) -> (Not (Less32U y x))
+(Leq16U x y) -> (Not (Less16U y x))
+(Leq8U x y) -> (Not (Less8U y x))
+(Leq64F x y) -> (FLED x y)
+(Leq32F x y) -> (FLES x y)
+
+// Convert x > y to y < x.
+(Greater64 x y) -> (Less64 y x)
+(Greater32 x y) -> (Less32 y x)
+(Greater16 x y) -> (Less16 y x)
+(Greater8 x y) -> (Less8 y x)
+(Greater64U x y) -> (Less64U y x)
+(Greater32U x y) -> (Less32U y x)
+(Greater16U x y) -> (Less16U y x)
+(Greater8U x y) -> (Less8U y x)
+(Greater64F x y) -> (FLTD y x)
+(Greater32F x y) -> (FLTS y x)
+
+// Convert x >= y to !(x < y)
+(Geq64 x y) -> (Not (Less64 x y))
+(Geq32 x y) -> (Not (Less32 x y))
+(Geq16 x y) -> (Not (Less16 x y))
+(Geq8 x y) -> (Not (Less8 x y))
+(Geq64U x y) -> (Not (Less64U x y))
+(Geq32U x y) -> (Not (Less32U x y))
+(Geq16U x y) -> (Not (Less16U x y))
+(Geq8U x y) -> (Not (Less8U x y))
+(Geq64F x y) -> (FLED y x)
+(Geq32F x y) -> (FLES y x)
+
+(EqPtr x y) -> (SEQZ (SUB <x.Type> x y))
+(Eq64 x y) -> (SEQZ (SUB <x.Type> x y))
+(Eq32 x y) -> (SEQZ (ZeroExt32to64 (SUB <x.Type> x y)))
+(Eq16 x y) -> (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
+(Eq8 x y) -> (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
+(Eq64F x y) -> (FEQD x y)
+(Eq32F x y) -> (FEQS x y)
+
+(NeqPtr x y) -> (SNEZ (SUB <x.Type> x y))
+(Neq64 x y) -> (SNEZ (SUB <x.Type> x y))
+(Neq32 x y) -> (SNEZ (ZeroExt32to64 (SUB <x.Type> x y)))
+(Neq16 x y) -> (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
+(Neq8 x y) -> (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
+(Neq64F x y) -> (FNED x y)
+(Neq32F x y) -> (FNES x y)
+
+// Loads
+(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVWload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
+
+// Stores
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+
+// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
+// knows what variables are being read/written by the ops.
+(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVBUload [off1+off2] {sym} base mem)
+(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVBload [off1+off2] {sym} base mem)
+(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVHUload [off1+off2] {sym} base mem)
+(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVHload [off1+off2] {sym} base mem)
+(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVWUload [off1+off2] {sym} base mem)
+(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVWload [off1+off2] {sym} base mem)
+(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
+ (MOVDload [off1+off2] {sym} base mem)
+
+(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
+ (MOVBstore [off1+off2] {sym} base val mem)
+(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
+ (MOVHstore [off1+off2] {sym} base val mem)
+(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
+ (MOVWstore [off1+off2] {sym} base val mem)
+(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
+ (MOVDstore [off1+off2] {sym} base val mem)
+
+// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
+// with OffPtr -> ADDI.
+(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+d) -> (MOVaddr [c+d] {s} x)
+
+// Zeroing
+// TODO: more optimized zeroing, including attempting to use aligned accesses.
+(Zero [0] _ mem) -> mem
+(Zero [1] ptr mem) -> (MOVBstore ptr (MOVBconst) mem)
+(Zero [2] ptr mem) -> (MOVHstore ptr (MOVHconst) mem)
+(Zero [4] ptr mem) -> (MOVWstore ptr (MOVWconst) mem)
+(Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst) mem)
+
+// Generic zeroing uses a loop
+(Zero [s] {t} ptr mem) ->
+ (LoweredZero [t.(*types.Type).Alignment()]
+ ptr
+ (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)]))
+ mem)
+
+(Convert x mem) -> (MOVconvert x mem)
+
+// Checks
+(IsNonNil p) -> (NeqPtr (MOVDconst) p)
+(IsInBounds idx len) -> (Less64U idx len)
+(IsSliceInBounds idx len) -> (Leq64U idx len)
+
+// Trivial lowering
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
+(GetCallerPC) -> (LoweredGetCallerPC)
+
+// Write barrier.
+(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
+
+// Moves
+// TODO: more optimized moves, including attempting to use aligned accesses.
+(Move [0] _ _ mem) -> mem
+(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) -> (MOVHstore dst (MOVHload src mem) mem)
+(Move [4] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
+(Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem)
+
+// Generic move uses a loop
+(Move [s] {t} dst src mem) ->
+ (LoweredMove [t.(*types.Type).Alignment()]
+ dst
+ src
+ (ADDI <src.Type> [s-moveSize(t.(*types.Type).Alignment(), config)] src)
+ mem)
+
+// Boolean ops; 0=false, 1=true
+(AndB x y) -> (AND x y)
+(OrB x y) -> (OR x y)
+(EqB x y) -> (XORI [1] (XOR <typ.Bool> x y))
+(NeqB x y) -> (XOR x y)
+(Not x) -> (XORI [1] x)
+
+// Lowering pointer arithmetic
+// TODO: Special handling for SP offsets, like ARM
+(OffPtr [off] ptr:(SP)) -> (MOVaddr [off] ptr)
+(OffPtr [off] ptr) && is32Bit(off) -> (ADDI [off] ptr)
+(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr)
+
+(Const8 [val]) -> (MOVBconst [val])
+(Const16 [val]) -> (MOVHconst [val])
+(Const32 [val]) -> (MOVWconst [val])
+(Const64 [val]) -> (MOVDconst [val])
+(Const32F [val]) -> (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))]))
+(Const64F [val]) -> (FMVDX (MOVDconst [val]))
+(ConstNil) -> (MOVDconst [0])
+(ConstBool [b]) -> (MOVBconst [b])
+
+// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift.
+// The lower 32 bit immediate will be treated as signed,
+// so if it is negative, adjust for the borrow by incrementing the top half.
+// We don't have to worry about overflow from the increment,
+// because if the top half is all 1s, and int32(c) is negative,
+// then the overall constant fits in an int32.
+(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
+(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
+
+// Fold ADD+MOVDconst into ADDI where possible.
+(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
+
+(Addr {sym} base) -> (MOVaddr {sym} base)
+(LocalAddr {sym} base _) -> (MOVaddr {sym} base)
+
+// Conditional branches
+//
+// cond is 1 if true. BNE compares against 0.
+//
+// TODO(prattmic): RISCV branch instructions take two operands to compare,
+// so we could generate more efficient code by computing the condition in the
+// branch itself. This should be revisited now that the compiler has support
+// for two control values (https://golang.org/cl/196557).
+(If cond yes no) -> (BNE cond yes no)
+
+// Calls
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+
+// remove redundant *const ops
+(ADDI [0] x) -> x
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
new file mode 100644
index 0000000000..88a97e43fd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -0,0 +1,338 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "fmt"
+)
+
+// Suffixes encode the bit width of various instructions:
+//
+// D (double word) = 64 bit int
+// W (word) = 32 bit int
+// H (half word) = 16 bit int
+// B (byte) = 8 bit int
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+// L = 64 bit int, used when the opcode starts with F
+
+const (
+ riscv64REG_G = 4
+ riscv64REG_CTXT = 20
+ riscv64REG_LR = 1
+ riscv64REG_SP = 2
+ riscv64REG_TMP = 31
+ riscv64REG_ZERO = 0
+)
+
+func riscv64RegName(r int) string {
+ switch {
+ case r == riscv64REG_G:
+ return "g"
+ case r == riscv64REG_SP:
+ return "SP"
+ case 0 <= r && r <= 31:
+ return fmt.Sprintf("X%d", r)
+ case 32 <= r && r <= 63:
+ return fmt.Sprintf("F%d", r-32)
+ default:
+ panic(fmt.Sprintf("unknown register %d", r))
+ }
+}
+
+func init() {
+ var regNamesRISCV64 []string
+ var gpMask, fpMask, gpspMask, gpspsbMask regMask
+ regNamed := make(map[string]regMask)
+
+ // Build the list of register names, creating an appropriately indexed
+ // regMask for the gp and fp registers as we go.
+ //
+ // If name is specified, use it rather than the riscv reg number.
+ addreg := func(r int, name string) regMask {
+ mask := regMask(1) << uint(len(regNamesRISCV64))
+ if name == "" {
+ name = riscv64RegName(r)
+ }
+ regNamesRISCV64 = append(regNamesRISCV64, name)
+ regNamed[name] = mask
+ return mask
+ }
+
+ // General purpose registers.
+ for r := 0; r <= 31; r++ {
+ if r == riscv64REG_LR {
+ // LR is not used by regalloc, so we skip it to leave
+ // room for pseudo-register SB.
+ continue
+ }
+
+ mask := addreg(r, "")
+
+ // Add general purpose registers to gpMask.
+ switch r {
+ // ZERO, g, and TMP are not in any gp mask.
+ case riscv64REG_ZERO, riscv64REG_G, riscv64REG_TMP:
+ case riscv64REG_SP:
+ gpspMask |= mask
+ gpspsbMask |= mask
+ default:
+ gpMask |= mask
+ gpspMask |= mask
+ gpspsbMask |= mask
+ }
+ }
+
+ // Floating pointer registers.
+ for r := 32; r <= 63; r++ {
+ mask := addreg(r, "")
+ fpMask |= mask
+ }
+
+ // Pseudo-register: SB
+ mask := addreg(-1, "SB")
+ gpspsbMask |= mask
+
+ if len(regNamesRISCV64) > 64 {
+ // regMask is only 64 bits.
+ panic("Too many RISCV64 registers")
+ }
+
+ regCtxt := regNamed["X20"]
+ callerSave := gpMask | fpMask | regNamed["g"]
+
+ var (
+ gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+ gp01 = regInfo{outputs: []regMask{gpMask}}
+ gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+ gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+ gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+ gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+
+ fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
+ fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
+ gpfp = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{fpMask}}
+ fpgp = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{gpMask}}
+ fpstore = regInfo{inputs: []regMask{gpspsbMask, fpMask, 0}}
+ fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}}
+ fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}}
+
+ call = regInfo{clobbers: callerSave}
+ callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave}
+ callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave}
+ )
+
+ RISCV64ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+
+ // M extension. H means high (i.e., it returns the top bits of
+ // the result). U means unsigned. W means word (i.e., 32-bit).
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true, typ: "Int64"}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true, typ: "Int32"},
+ {name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int64"},
+ {name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt64"},
+ {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", typ: "Int64"}, // arg0 / arg1
+ {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", typ: "UInt64"},
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"},
+ {name: "DIVUW", argLength: 2, reg: gp21, asm: "DIVUW", typ: "UInt32"},
+ {name: "REM", argLength: 2, reg: gp21, asm: "REM", typ: "Int64"}, // arg0 % arg1
+ {name: "REMU", argLength: 2, reg: gp21, asm: "REMU", typ: "UInt64"},
+ {name: "REMW", argLength: 2, reg: gp21, asm: "REMW", typ: "Int32"},
+ {name: "REMUW", argLength: 2, reg: gp21, asm: "REMUW", typ: "UInt32"},
+
+ {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "RdWr"}, // arg0 + auxint + offset encoded in aux
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+
+ {name: "MOVBconst", reg: gp01, asm: "MOV", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint
+ {name: "MOVHconst", reg: gp01, asm: "MOV", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint
+ {name: "MOVWconst", reg: gp01, asm: "MOV", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ // Loads: load <size> bits from arg0+auxint+aux and extend to 64 bits; arg1=mem
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, sign extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, sign extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOV", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // 64 bits
+ {name: "MOVBUload", argLength: 2, reg: gpload, asm: "MOVBU", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, zero extend
+ {name: "MOVHUload", argLength: 2, reg: gpload, asm: "MOVHU", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, zero extend
+ {name: "MOVWUload", argLength: 2, reg: gpload, asm: "MOVWU", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, zero extend
+
+ // Stores: store <size> lowest bits in arg1 to arg0+auxint+aux; arg2=mem
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Shift ops
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << aux1
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> aux1, signed
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> aux1, unsigned
+ {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint
+ {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed
+ {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned
+
+ // Bitwise ops
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORI", argLength: 1, reg: gp11, asm: "XORI", aux: "Int64"}, // arg0 ^ auxint
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORI", argLength: 1, reg: gp11, asm: "ORI", aux: "Int64"}, // arg0 | auxint
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDI", argLength: 1, reg: gp11, asm: "ANDI", aux: "Int64"}, // arg0 & auxint
+
+ // Generate boolean values
+ {name: "SEQZ", argLength: 1, reg: gp11, asm: "SEQZ"}, // arg0 == 0, result is 0 or 1
+ {name: "SNEZ", argLength: 1, reg: gp11, asm: "SNEZ"}, // arg0 != 0, result is 0 or 1
+ {name: "SLT", argLength: 2, reg: gp21, asm: "SLT"}, // arg0 < arg1, result is 0 or 1
+ {name: "SLTI", argLength: 1, reg: gp11, asm: "SLTI", aux: "Int64"}, // arg0 < auxint, result is 0 or 1
+ {name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1
+ {name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1
+
+ // MOVconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem
+
+ // Calls
+ {name: "CALLstatic", argLength: 1, reg: call, aux: "SymOff", call: true, symEffect: "None"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: callInter, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // Generic moves and zeros
+
+ // general unaligned zeroing
+ // arg0 = address of memory to zero (in X5, changed as side effect)
+ // arg1 = address of the last element to zero (inclusive)
+ // arg2 = mem
+ // auxint = element size
+ // returns mem
+ // mov ZERO, (X5)
+ // ADD $sz, X5
+ // BGEU Rarg1, X5, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], gpMask},
+ clobbers: regNamed["X5"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // general unaligned move
+ // arg0 = address of dst memory (in X5, changed as side effect)
+ // arg1 = address of src memory (in X6, changed as side effect)
+ // arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
+ // arg3 = mem
+ // auxint = alignment
+ // clobbers X7 as a tmp register.
+ // returns mem
+ // mov (X6), X7
+ // mov X7, (X5)
+ // ADD $sz, X5
+ // ADD $sz, X6
+ // BGEU Rarg2, X5, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]},
+ clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Lowering pass-throughs
+ {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers RA (LR) because it's a call
+ // and T6 (REG_TMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}, clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"]}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // F extension.
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
+ {name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
+ {name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLS", argLength: 1, reg: fpgp, asm: "FCVTLS", typ: "Int64"}, // int64(arg0)
+ {name: "FMOVWload", argLength: 2, reg: fpload, asm: "MOVF", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load float32 from arg0+auxint+aux
+ {name: "FMOVWstore", argLength: 3, reg: fpstore, asm: "MOVF", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float32 to arg0+auxint+aux
+ {name: "FEQS", argLength: 2, reg: fp2gp, asm: "FEQS", commutative: true}, // arg0 == arg1
+ {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1
+ {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1
+ {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1
+
+ // D extension.
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD", commutative: false, typ: "Float64"}, // arg0 - arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true, typ: "Float64"}, // arg0 * arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD", commutative: false, typ: "Float64"}, // arg0 / arg1
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0)
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
+ {name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLD", argLength: 1, reg: fpgp, asm: "FCVTLD", typ: "Int64"}, // int64(arg0)
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD", typ: "Float32"}, // float32(arg0)
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "MOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load float64 from arg0+auxint+aux
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float6 to arg0+auxint+aux
+ {name: "FEQD", argLength: 2, reg: fp2gp, asm: "FEQD", commutative: true}, // arg0 == arg1
+ {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1
+ {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1
+ {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1
+ }
+
+ RISCV64blocks := []blockData{
+ {name: "BNE", controls: 1}, // Control != 0 (take a register)
+ }
+
+ archs = append(archs, arch{
+ name: "RISCV64",
+ pkg: "cmd/internal/obj/riscv",
+ genfile: "../../riscv64/ssa.go",
+ ops: RISCV64ops,
+ blocks: RISCV64blocks,
+ regnames: regNamesRISCV64,
+ gpregmask: gpMask,
+ fpregmask: fpMask,
+ framepointerreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index aa9d570396..b638d98887 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -340,7 +340,7 @@ var genericOps = []opData{
// Note: ConstX are sign-extended even when the type of the value is unsigned.
// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
{name: "Const64", aux: "Int64"}, // value is auxint
- {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly prepresentable as float 32
+ {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
{name: "ConstInterface"}, // nil interface
{name: "ConstSlice"}, // nil slice
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
index 2107da4f4e..8520c68a5a 100644
--- a/src/cmd/compile/internal/ssa/gen/main.go
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -405,6 +405,7 @@ func genOp() {
fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }")
fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }")
+ fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }")
fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }")
// generate registers
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index a5951dd4e1..86428a3e84 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -8,6 +8,7 @@ import (
"cmd/internal/obj/arm64"
"cmd/internal/obj/mips"
"cmd/internal/obj/ppc64"
+ "cmd/internal/obj/riscv"
"cmd/internal/obj/s390x"
"cmd/internal/obj/wasm"
"cmd/internal/obj/x86"
@@ -111,6 +112,8 @@ const (
BlockPPC64FGT
BlockPPC64FGE
+ BlockRISCV64BNE
+
BlockS390XBRC
BlockS390XCRJ
BlockS390XCGRJ
@@ -228,6 +231,8 @@ var blockString = [...]string{
BlockPPC64FGT: "FGT",
BlockPPC64FGE: "FGE",
+ BlockRISCV64BNE: "BNE",
+
BlockS390XBRC: "BRC",
BlockS390XCRJ: "CRJ",
BlockS390XCGRJ: "CGRJ",
@@ -1877,6 +1882,106 @@ const (
OpPPC64FlagLT
OpPPC64FlagGT
+ OpRISCV64ADD
+ OpRISCV64ADDI
+ OpRISCV64SUB
+ OpRISCV64MUL
+ OpRISCV64MULW
+ OpRISCV64MULH
+ OpRISCV64MULHU
+ OpRISCV64DIV
+ OpRISCV64DIVU
+ OpRISCV64DIVW
+ OpRISCV64DIVUW
+ OpRISCV64REM
+ OpRISCV64REMU
+ OpRISCV64REMW
+ OpRISCV64REMUW
+ OpRISCV64MOVaddr
+ OpRISCV64MOVBconst
+ OpRISCV64MOVHconst
+ OpRISCV64MOVWconst
+ OpRISCV64MOVDconst
+ OpRISCV64MOVBload
+ OpRISCV64MOVHload
+ OpRISCV64MOVWload
+ OpRISCV64MOVDload
+ OpRISCV64MOVBUload
+ OpRISCV64MOVHUload
+ OpRISCV64MOVWUload
+ OpRISCV64MOVBstore
+ OpRISCV64MOVHstore
+ OpRISCV64MOVWstore
+ OpRISCV64MOVDstore
+ OpRISCV64SLL
+ OpRISCV64SRA
+ OpRISCV64SRL
+ OpRISCV64SLLI
+ OpRISCV64SRAI
+ OpRISCV64SRLI
+ OpRISCV64XOR
+ OpRISCV64XORI
+ OpRISCV64OR
+ OpRISCV64ORI
+ OpRISCV64AND
+ OpRISCV64ANDI
+ OpRISCV64SEQZ
+ OpRISCV64SNEZ
+ OpRISCV64SLT
+ OpRISCV64SLTI
+ OpRISCV64SLTU
+ OpRISCV64SLTIU
+ OpRISCV64MOVconvert
+ OpRISCV64CALLstatic
+ OpRISCV64CALLclosure
+ OpRISCV64CALLinter
+ OpRISCV64LoweredZero
+ OpRISCV64LoweredMove
+ OpRISCV64LoweredNilCheck
+ OpRISCV64LoweredGetClosurePtr
+ OpRISCV64LoweredGetCallerSP
+ OpRISCV64LoweredGetCallerPC
+ OpRISCV64LoweredWB
+ OpRISCV64LoweredPanicBoundsA
+ OpRISCV64LoweredPanicBoundsB
+ OpRISCV64LoweredPanicBoundsC
+ OpRISCV64FADDS
+ OpRISCV64FSUBS
+ OpRISCV64FMULS
+ OpRISCV64FDIVS
+ OpRISCV64FSQRTS
+ OpRISCV64FNEGS
+ OpRISCV64FMVSX
+ OpRISCV64FCVTSW
+ OpRISCV64FCVTSL
+ OpRISCV64FCVTWS
+ OpRISCV64FCVTLS
+ OpRISCV64FMOVWload
+ OpRISCV64FMOVWstore
+ OpRISCV64FEQS
+ OpRISCV64FNES
+ OpRISCV64FLTS
+ OpRISCV64FLES
+ OpRISCV64FADDD
+ OpRISCV64FSUBD
+ OpRISCV64FMULD
+ OpRISCV64FDIVD
+ OpRISCV64FSQRTD
+ OpRISCV64FNEGD
+ OpRISCV64FMVDX
+ OpRISCV64FCVTDW
+ OpRISCV64FCVTDL
+ OpRISCV64FCVTWD
+ OpRISCV64FCVTLD
+ OpRISCV64FCVTDS
+ OpRISCV64FCVTSD
+ OpRISCV64FMOVDload
+ OpRISCV64FMOVDstore
+ OpRISCV64FEQD
+ OpRISCV64FNED
+ OpRISCV64FLTD
+ OpRISCV64FLED
+
OpS390XFADDS
OpS390XFADD
OpS390XFSUBS
@@ -24893,6 +24998,1370 @@ var opcodeTable = [...]opInfo{
},
{
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: riscv.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULHU",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: riscv.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: riscv.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: riscv.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVUW",
+ argLen: 2,
+ asm: riscv.ADIVUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REM",
+ argLen: 2,
+ asm: riscv.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMU",
+ argLen: 2,
+ asm: riscv.AREMU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMW",
+ argLen: 2,
+ asm: riscv.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMUW",
+ argLen: 2,
+ asm: riscv.AREMUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymRdWr,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBconst",
+ auxType: auxInt8,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHconst",
+ auxType: auxInt16,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: riscv.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: riscv.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: riscv.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRAI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRAI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AXORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ANDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AANDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SEQZ",
+ argLen: 1,
+ asm: riscv.ASEQZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SNEZ",
+ argLen: 1,
+ asm: riscv.ASNEZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLT",
+ argLen: 2,
+ asm: riscv.ASLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTU",
+ argLen: 2,
+ asm: riscv.ASLTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTIU",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTIU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVconvert",
+ argLen: 2,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ call: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 524288}, // X20
+ {0, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ clobbers: 16, // X5
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ {2, 1073741748}, // X3 X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ clobbers: 112, // X5 X6 X7
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 524288}, // X20
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // X7
+ {1, 134217728}, // X28
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // X6
+ {1, 64}, // X7
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: riscv.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: riscv.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: riscv.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: riscv.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVSX",
+ argLen: 1,
+ asm: riscv.AFMVSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSW",
+ argLen: 1,
+ asm: riscv.AFCVTSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSL",
+ argLen: 1,
+ asm: riscv.AFCVTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWS",
+ argLen: 1,
+ asm: riscv.AFCVTWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLS",
+ argLen: 1,
+ asm: riscv.AFCVTLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FMOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNES",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTS",
+ argLen: 2,
+ asm: riscv.AFLTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLES",
+ argLen: 2,
+ asm: riscv.AFLES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: riscv.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: riscv.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: riscv.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: riscv.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVDX",
+ argLen: 1,
+ asm: riscv.AFMVDX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDW",
+ argLen: 1,
+ asm: riscv.AFCVTDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDL",
+ argLen: 1,
+ asm: riscv.AFCVTDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWD",
+ argLen: 1,
+ asm: riscv.AFCVTWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLD",
+ argLen: 1,
+ asm: riscv.AFCVTLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: riscv.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: riscv.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNED",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTD",
+ argLen: 2,
+ asm: riscv.AFLTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLED",
+ argLen: 2,
+ asm: riscv.AFLED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+
+ {
name: "FADDS",
argLen: 2,
commutative: true,
@@ -31636,6 +33105,7 @@ func (o Op) String() string { return opcodeTable[o].name }
func (o Op) UsesScratch() bool { return opcodeTable[o].usesScratch }
func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }
func (o Op) IsCall() bool { return opcodeTable[o].call }
+func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }
func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }
var registers386 = [...]Register{
@@ -32009,6 +33479,77 @@ var fpRegMaskPPC64 = regMask(576460743713488896)
var specialRegMaskPPC64 = regMask(0)
var framepointerRegPPC64 = int8(1)
var linkRegPPC64 = int8(-1)
+var registersRISCV64 = [...]Register{
+ {0, riscv.REG_X0, -1, "X0"},
+ {1, riscv.REGSP, -1, "SP"},
+ {2, riscv.REG_X3, 0, "X3"},
+ {3, riscv.REGG, -1, "g"},
+ {4, riscv.REG_X5, 1, "X5"},
+ {5, riscv.REG_X6, 2, "X6"},
+ {6, riscv.REG_X7, 3, "X7"},
+ {7, riscv.REG_X8, 4, "X8"},
+ {8, riscv.REG_X9, 5, "X9"},
+ {9, riscv.REG_X10, 6, "X10"},
+ {10, riscv.REG_X11, 7, "X11"},
+ {11, riscv.REG_X12, 8, "X12"},
+ {12, riscv.REG_X13, 9, "X13"},
+ {13, riscv.REG_X14, 10, "X14"},
+ {14, riscv.REG_X15, 11, "X15"},
+ {15, riscv.REG_X16, 12, "X16"},
+ {16, riscv.REG_X17, 13, "X17"},
+ {17, riscv.REG_X18, 14, "X18"},
+ {18, riscv.REG_X19, 15, "X19"},
+ {19, riscv.REG_X20, 16, "X20"},
+ {20, riscv.REG_X21, 17, "X21"},
+ {21, riscv.REG_X22, 18, "X22"},
+ {22, riscv.REG_X23, 19, "X23"},
+ {23, riscv.REG_X24, 20, "X24"},
+ {24, riscv.REG_X25, 21, "X25"},
+ {25, riscv.REG_X26, 22, "X26"},
+ {26, riscv.REG_X27, 23, "X27"},
+ {27, riscv.REG_X28, 24, "X28"},
+ {28, riscv.REG_X29, 25, "X29"},
+ {29, riscv.REG_X30, 26, "X30"},
+ {30, riscv.REG_X31, -1, "X31"},
+ {31, riscv.REG_F0, -1, "F0"},
+ {32, riscv.REG_F1, -1, "F1"},
+ {33, riscv.REG_F2, -1, "F2"},
+ {34, riscv.REG_F3, -1, "F3"},
+ {35, riscv.REG_F4, -1, "F4"},
+ {36, riscv.REG_F5, -1, "F5"},
+ {37, riscv.REG_F6, -1, "F6"},
+ {38, riscv.REG_F7, -1, "F7"},
+ {39, riscv.REG_F8, -1, "F8"},
+ {40, riscv.REG_F9, -1, "F9"},
+ {41, riscv.REG_F10, -1, "F10"},
+ {42, riscv.REG_F11, -1, "F11"},
+ {43, riscv.REG_F12, -1, "F12"},
+ {44, riscv.REG_F13, -1, "F13"},
+ {45, riscv.REG_F14, -1, "F14"},
+ {46, riscv.REG_F15, -1, "F15"},
+ {47, riscv.REG_F16, -1, "F16"},
+ {48, riscv.REG_F17, -1, "F17"},
+ {49, riscv.REG_F18, -1, "F18"},
+ {50, riscv.REG_F19, -1, "F19"},
+ {51, riscv.REG_F20, -1, "F20"},
+ {52, riscv.REG_F21, -1, "F21"},
+ {53, riscv.REG_F22, -1, "F22"},
+ {54, riscv.REG_F23, -1, "F23"},
+ {55, riscv.REG_F24, -1, "F24"},
+ {56, riscv.REG_F25, -1, "F25"},
+ {57, riscv.REG_F26, -1, "F26"},
+ {58, riscv.REG_F27, -1, "F27"},
+ {59, riscv.REG_F28, -1, "F28"},
+ {60, riscv.REG_F29, -1, "F29"},
+ {61, riscv.REG_F30, -1, "F30"},
+ {62, riscv.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var gpRegMaskRISCV64 = regMask(1073741812)
+var fpRegMaskRISCV64 = regMask(9223372034707292160)
+var specialRegMaskRISCV64 = regMask(0)
+var framepointerRegRISCV64 = int8(-1)
+var linkRegRISCV64 = int8(0)
var registersS390X = [...]Register{
{0, s390x.REG_R0, 0, "R0"},
{1, s390x.REG_R1, 1, "R1"},
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 65f1f12f75..63b3652366 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -9480,7 +9480,7 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool {
return true
}
// match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
@@ -9493,7 +9493,7 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool {
off2 := p.AuxInt
sym2 := p.Aux
ptr := p.Args[0]
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
@@ -9504,7 +9504,7 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool {
return true
}
// match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
+ // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
// result: (MOVDload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
@@ -9516,7 +9516,7 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool {
}
off2 := v_0.AuxInt
x := v_0.Args[0]
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
@@ -9554,7 +9554,7 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool {
// match: (MOVDloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(c) && c%4 == 0
// result: (MOVDload [c] ptr mem)
for {
mem := v.Args[2]
@@ -9564,7 +9564,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool {
break
}
c := v_1.AuxInt
- if !(is16Bit(c)) {
+ if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
@@ -9574,7 +9574,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool {
return true
}
// match: (MOVDloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(c) && c%4 == 0
// result: (MOVDload [c] ptr mem)
for {
mem := v.Args[2]
@@ -9584,7 +9584,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool {
}
c := v_0.AuxInt
ptr := v.Args[1]
- if !(is16Bit(c)) {
+ if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
@@ -9617,7 +9617,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
return true
}
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
+ // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
// result: (MOVDstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
@@ -9630,7 +9630,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
@@ -9642,7 +9642,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
return true
}
// match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
@@ -9656,7 +9656,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
sym2 := p.Aux
ptr := p.Args[0]
val := v.Args[1]
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
@@ -9713,7 +9713,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool {
// match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(c) && c%4 == 0
// result: (MOVDstore [c] ptr val mem)
for {
mem := v.Args[3]
@@ -9724,7 +9724,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool {
}
c := v_1.AuxInt
val := v.Args[2]
- if !(is16Bit(c)) {
+ if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
@@ -9735,7 +9735,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool {
return true
}
// match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(c) && c%4 == 0
// result: (MOVDstore [c] ptr val mem)
for {
mem := v.Args[3]
@@ -9746,7 +9746,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool {
c := v_0.AuxInt
ptr := v.Args[1]
val := v.Args[2]
- if !(is16Bit(c)) {
+ if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
@@ -9760,7 +9760,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool {
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
+ // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
// result: (MOVDstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
@@ -9772,7 +9772,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool {
}
off2 := v_0.AuxInt
x := v_0.Args[0]
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
@@ -9783,7 +9783,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool {
return true
}
// match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
- // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := v.AuxInt
@@ -9796,7 +9796,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool {
off2 := p.AuxInt
sym2 := p.Aux
x := p.Args[0]
- if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
@@ -11548,7 +11548,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
// match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
@@ -11561,7 +11561,7 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
off2 := p.AuxInt
sym2 := p.Aux
ptr := p.Args[0]
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
@@ -11572,7 +11572,7 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
return true
}
// match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
+ // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
// result: (MOVWload [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
@@ -11584,7 +11584,7 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
}
off2 := v_0.AuxInt
x := v_0.Args[0]
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
@@ -11622,7 +11622,7 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool {
// match: (MOVWloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(c) && c%4 == 0
// result: (MOVWload [c] ptr mem)
for {
mem := v.Args[2]
@@ -11632,7 +11632,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool {
break
}
c := v_1.AuxInt
- if !(is16Bit(c)) {
+ if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
@@ -11642,7 +11642,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool {
return true
}
// match: (MOVWloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(c) && c%4 == 0
// result: (MOVWload [c] ptr mem)
for {
mem := v.Args[2]
@@ -11652,7 +11652,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool {
}
c := v_0.AuxInt
ptr := v.Args[1]
- if !(is16Bit(c)) {
+ if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
new file mode 100644
index 0000000000..8ccfaa54f9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -0,0 +1,5561 @@
+// Code generated from gen/RISCV64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueRISCV64(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ return rewriteValueRISCV64_OpAdd16_0(v)
+ case OpAdd32:
+ return rewriteValueRISCV64_OpAdd32_0(v)
+ case OpAdd32F:
+ return rewriteValueRISCV64_OpAdd32F_0(v)
+ case OpAdd64:
+ return rewriteValueRISCV64_OpAdd64_0(v)
+ case OpAdd64F:
+ return rewriteValueRISCV64_OpAdd64F_0(v)
+ case OpAdd8:
+ return rewriteValueRISCV64_OpAdd8_0(v)
+ case OpAddPtr:
+ return rewriteValueRISCV64_OpAddPtr_0(v)
+ case OpAddr:
+ return rewriteValueRISCV64_OpAddr_0(v)
+ case OpAnd16:
+ return rewriteValueRISCV64_OpAnd16_0(v)
+ case OpAnd32:
+ return rewriteValueRISCV64_OpAnd32_0(v)
+ case OpAnd64:
+ return rewriteValueRISCV64_OpAnd64_0(v)
+ case OpAnd8:
+ return rewriteValueRISCV64_OpAnd8_0(v)
+ case OpAndB:
+ return rewriteValueRISCV64_OpAndB_0(v)
+ case OpAvg64u:
+ return rewriteValueRISCV64_OpAvg64u_0(v)
+ case OpClosureCall:
+ return rewriteValueRISCV64_OpClosureCall_0(v)
+ case OpCom16:
+ return rewriteValueRISCV64_OpCom16_0(v)
+ case OpCom32:
+ return rewriteValueRISCV64_OpCom32_0(v)
+ case OpCom64:
+ return rewriteValueRISCV64_OpCom64_0(v)
+ case OpCom8:
+ return rewriteValueRISCV64_OpCom8_0(v)
+ case OpConst16:
+ return rewriteValueRISCV64_OpConst16_0(v)
+ case OpConst32:
+ return rewriteValueRISCV64_OpConst32_0(v)
+ case OpConst32F:
+ return rewriteValueRISCV64_OpConst32F_0(v)
+ case OpConst64:
+ return rewriteValueRISCV64_OpConst64_0(v)
+ case OpConst64F:
+ return rewriteValueRISCV64_OpConst64F_0(v)
+ case OpConst8:
+ return rewriteValueRISCV64_OpConst8_0(v)
+ case OpConstBool:
+ return rewriteValueRISCV64_OpConstBool_0(v)
+ case OpConstNil:
+ return rewriteValueRISCV64_OpConstNil_0(v)
+ case OpConvert:
+ return rewriteValueRISCV64_OpConvert_0(v)
+ case OpCvt32Fto32:
+ return rewriteValueRISCV64_OpCvt32Fto32_0(v)
+ case OpCvt32Fto64:
+ return rewriteValueRISCV64_OpCvt32Fto64_0(v)
+ case OpCvt32Fto64F:
+ return rewriteValueRISCV64_OpCvt32Fto64F_0(v)
+ case OpCvt32to32F:
+ return rewriteValueRISCV64_OpCvt32to32F_0(v)
+ case OpCvt32to64F:
+ return rewriteValueRISCV64_OpCvt32to64F_0(v)
+ case OpCvt64Fto32:
+ return rewriteValueRISCV64_OpCvt64Fto32_0(v)
+ case OpCvt64Fto32F:
+ return rewriteValueRISCV64_OpCvt64Fto32F_0(v)
+ case OpCvt64Fto64:
+ return rewriteValueRISCV64_OpCvt64Fto64_0(v)
+ case OpCvt64to32F:
+ return rewriteValueRISCV64_OpCvt64to32F_0(v)
+ case OpCvt64to64F:
+ return rewriteValueRISCV64_OpCvt64to64F_0(v)
+ case OpDiv16:
+ return rewriteValueRISCV64_OpDiv16_0(v)
+ case OpDiv16u:
+ return rewriteValueRISCV64_OpDiv16u_0(v)
+ case OpDiv32:
+ return rewriteValueRISCV64_OpDiv32_0(v)
+ case OpDiv32F:
+ return rewriteValueRISCV64_OpDiv32F_0(v)
+ case OpDiv32u:
+ return rewriteValueRISCV64_OpDiv32u_0(v)
+ case OpDiv64:
+ return rewriteValueRISCV64_OpDiv64_0(v)
+ case OpDiv64F:
+ return rewriteValueRISCV64_OpDiv64F_0(v)
+ case OpDiv64u:
+ return rewriteValueRISCV64_OpDiv64u_0(v)
+ case OpDiv8:
+ return rewriteValueRISCV64_OpDiv8_0(v)
+ case OpDiv8u:
+ return rewriteValueRISCV64_OpDiv8u_0(v)
+ case OpEq16:
+ return rewriteValueRISCV64_OpEq16_0(v)
+ case OpEq32:
+ return rewriteValueRISCV64_OpEq32_0(v)
+ case OpEq32F:
+ return rewriteValueRISCV64_OpEq32F_0(v)
+ case OpEq64:
+ return rewriteValueRISCV64_OpEq64_0(v)
+ case OpEq64F:
+ return rewriteValueRISCV64_OpEq64F_0(v)
+ case OpEq8:
+ return rewriteValueRISCV64_OpEq8_0(v)
+ case OpEqB:
+ return rewriteValueRISCV64_OpEqB_0(v)
+ case OpEqPtr:
+ return rewriteValueRISCV64_OpEqPtr_0(v)
+ case OpGeq16:
+ return rewriteValueRISCV64_OpGeq16_0(v)
+ case OpGeq16U:
+ return rewriteValueRISCV64_OpGeq16U_0(v)
+ case OpGeq32:
+ return rewriteValueRISCV64_OpGeq32_0(v)
+ case OpGeq32F:
+ return rewriteValueRISCV64_OpGeq32F_0(v)
+ case OpGeq32U:
+ return rewriteValueRISCV64_OpGeq32U_0(v)
+ case OpGeq64:
+ return rewriteValueRISCV64_OpGeq64_0(v)
+ case OpGeq64F:
+ return rewriteValueRISCV64_OpGeq64F_0(v)
+ case OpGeq64U:
+ return rewriteValueRISCV64_OpGeq64U_0(v)
+ case OpGeq8:
+ return rewriteValueRISCV64_OpGeq8_0(v)
+ case OpGeq8U:
+ return rewriteValueRISCV64_OpGeq8U_0(v)
+ case OpGetCallerPC:
+ return rewriteValueRISCV64_OpGetCallerPC_0(v)
+ case OpGetCallerSP:
+ return rewriteValueRISCV64_OpGetCallerSP_0(v)
+ case OpGetClosurePtr:
+ return rewriteValueRISCV64_OpGetClosurePtr_0(v)
+ case OpGreater16:
+ return rewriteValueRISCV64_OpGreater16_0(v)
+ case OpGreater16U:
+ return rewriteValueRISCV64_OpGreater16U_0(v)
+ case OpGreater32:
+ return rewriteValueRISCV64_OpGreater32_0(v)
+ case OpGreater32F:
+ return rewriteValueRISCV64_OpGreater32F_0(v)
+ case OpGreater32U:
+ return rewriteValueRISCV64_OpGreater32U_0(v)
+ case OpGreater64:
+ return rewriteValueRISCV64_OpGreater64_0(v)
+ case OpGreater64F:
+ return rewriteValueRISCV64_OpGreater64F_0(v)
+ case OpGreater64U:
+ return rewriteValueRISCV64_OpGreater64U_0(v)
+ case OpGreater8:
+ return rewriteValueRISCV64_OpGreater8_0(v)
+ case OpGreater8U:
+ return rewriteValueRISCV64_OpGreater8U_0(v)
+ case OpHmul32:
+ return rewriteValueRISCV64_OpHmul32_0(v)
+ case OpHmul32u:
+ return rewriteValueRISCV64_OpHmul32u_0(v)
+ case OpHmul64:
+ return rewriteValueRISCV64_OpHmul64_0(v)
+ case OpHmul64u:
+ return rewriteValueRISCV64_OpHmul64u_0(v)
+ case OpInterCall:
+ return rewriteValueRISCV64_OpInterCall_0(v)
+ case OpIsInBounds:
+ return rewriteValueRISCV64_OpIsInBounds_0(v)
+ case OpIsNonNil:
+ return rewriteValueRISCV64_OpIsNonNil_0(v)
+ case OpIsSliceInBounds:
+ return rewriteValueRISCV64_OpIsSliceInBounds_0(v)
+ case OpLeq16:
+ return rewriteValueRISCV64_OpLeq16_0(v)
+ case OpLeq16U:
+ return rewriteValueRISCV64_OpLeq16U_0(v)
+ case OpLeq32:
+ return rewriteValueRISCV64_OpLeq32_0(v)
+ case OpLeq32F:
+ return rewriteValueRISCV64_OpLeq32F_0(v)
+ case OpLeq32U:
+ return rewriteValueRISCV64_OpLeq32U_0(v)
+ case OpLeq64:
+ return rewriteValueRISCV64_OpLeq64_0(v)
+ case OpLeq64F:
+ return rewriteValueRISCV64_OpLeq64F_0(v)
+ case OpLeq64U:
+ return rewriteValueRISCV64_OpLeq64U_0(v)
+ case OpLeq8:
+ return rewriteValueRISCV64_OpLeq8_0(v)
+ case OpLeq8U:
+ return rewriteValueRISCV64_OpLeq8U_0(v)
+ case OpLess16:
+ return rewriteValueRISCV64_OpLess16_0(v)
+ case OpLess16U:
+ return rewriteValueRISCV64_OpLess16U_0(v)
+ case OpLess32:
+ return rewriteValueRISCV64_OpLess32_0(v)
+ case OpLess32F:
+ return rewriteValueRISCV64_OpLess32F_0(v)
+ case OpLess32U:
+ return rewriteValueRISCV64_OpLess32U_0(v)
+ case OpLess64:
+ return rewriteValueRISCV64_OpLess64_0(v)
+ case OpLess64F:
+ return rewriteValueRISCV64_OpLess64F_0(v)
+ case OpLess64U:
+ return rewriteValueRISCV64_OpLess64U_0(v)
+ case OpLess8:
+ return rewriteValueRISCV64_OpLess8_0(v)
+ case OpLess8U:
+ return rewriteValueRISCV64_OpLess8U_0(v)
+ case OpLoad:
+ return rewriteValueRISCV64_OpLoad_0(v)
+ case OpLocalAddr:
+ return rewriteValueRISCV64_OpLocalAddr_0(v)
+ case OpLsh16x16:
+ return rewriteValueRISCV64_OpLsh16x16_0(v)
+ case OpLsh16x32:
+ return rewriteValueRISCV64_OpLsh16x32_0(v)
+ case OpLsh16x64:
+ return rewriteValueRISCV64_OpLsh16x64_0(v)
+ case OpLsh16x8:
+ return rewriteValueRISCV64_OpLsh16x8_0(v)
+ case OpLsh32x16:
+ return rewriteValueRISCV64_OpLsh32x16_0(v)
+ case OpLsh32x32:
+ return rewriteValueRISCV64_OpLsh32x32_0(v)
+ case OpLsh32x64:
+ return rewriteValueRISCV64_OpLsh32x64_0(v)
+ case OpLsh32x8:
+ return rewriteValueRISCV64_OpLsh32x8_0(v)
+ case OpLsh64x16:
+ return rewriteValueRISCV64_OpLsh64x16_0(v)
+ case OpLsh64x32:
+ return rewriteValueRISCV64_OpLsh64x32_0(v)
+ case OpLsh64x64:
+ return rewriteValueRISCV64_OpLsh64x64_0(v)
+ case OpLsh64x8:
+ return rewriteValueRISCV64_OpLsh64x8_0(v)
+ case OpLsh8x16:
+ return rewriteValueRISCV64_OpLsh8x16_0(v)
+ case OpLsh8x32:
+ return rewriteValueRISCV64_OpLsh8x32_0(v)
+ case OpLsh8x64:
+ return rewriteValueRISCV64_OpLsh8x64_0(v)
+ case OpLsh8x8:
+ return rewriteValueRISCV64_OpLsh8x8_0(v)
+ case OpMod16:
+ return rewriteValueRISCV64_OpMod16_0(v)
+ case OpMod16u:
+ return rewriteValueRISCV64_OpMod16u_0(v)
+ case OpMod32:
+ return rewriteValueRISCV64_OpMod32_0(v)
+ case OpMod32u:
+ return rewriteValueRISCV64_OpMod32u_0(v)
+ case OpMod64:
+ return rewriteValueRISCV64_OpMod64_0(v)
+ case OpMod64u:
+ return rewriteValueRISCV64_OpMod64u_0(v)
+ case OpMod8:
+ return rewriteValueRISCV64_OpMod8_0(v)
+ case OpMod8u:
+ return rewriteValueRISCV64_OpMod8u_0(v)
+ case OpMove:
+ return rewriteValueRISCV64_OpMove_0(v)
+ case OpMul16:
+ return rewriteValueRISCV64_OpMul16_0(v)
+ case OpMul32:
+ return rewriteValueRISCV64_OpMul32_0(v)
+ case OpMul32F:
+ return rewriteValueRISCV64_OpMul32F_0(v)
+ case OpMul64:
+ return rewriteValueRISCV64_OpMul64_0(v)
+ case OpMul64F:
+ return rewriteValueRISCV64_OpMul64F_0(v)
+ case OpMul8:
+ return rewriteValueRISCV64_OpMul8_0(v)
+ case OpNeg16:
+ return rewriteValueRISCV64_OpNeg16_0(v)
+ case OpNeg32:
+ return rewriteValueRISCV64_OpNeg32_0(v)
+ case OpNeg32F:
+ return rewriteValueRISCV64_OpNeg32F_0(v)
+ case OpNeg64:
+ return rewriteValueRISCV64_OpNeg64_0(v)
+ case OpNeg64F:
+ return rewriteValueRISCV64_OpNeg64F_0(v)
+ case OpNeg8:
+ return rewriteValueRISCV64_OpNeg8_0(v)
+ case OpNeq16:
+ return rewriteValueRISCV64_OpNeq16_0(v)
+ case OpNeq32:
+ return rewriteValueRISCV64_OpNeq32_0(v)
+ case OpNeq32F:
+ return rewriteValueRISCV64_OpNeq32F_0(v)
+ case OpNeq64:
+ return rewriteValueRISCV64_OpNeq64_0(v)
+ case OpNeq64F:
+ return rewriteValueRISCV64_OpNeq64F_0(v)
+ case OpNeq8:
+ return rewriteValueRISCV64_OpNeq8_0(v)
+ case OpNeqB:
+ return rewriteValueRISCV64_OpNeqB_0(v)
+ case OpNeqPtr:
+ return rewriteValueRISCV64_OpNeqPtr_0(v)
+ case OpNilCheck:
+ return rewriteValueRISCV64_OpNilCheck_0(v)
+ case OpNot:
+ return rewriteValueRISCV64_OpNot_0(v)
+ case OpOffPtr:
+ return rewriteValueRISCV64_OpOffPtr_0(v)
+ case OpOr16:
+ return rewriteValueRISCV64_OpOr16_0(v)
+ case OpOr32:
+ return rewriteValueRISCV64_OpOr32_0(v)
+ case OpOr64:
+ return rewriteValueRISCV64_OpOr64_0(v)
+ case OpOr8:
+ return rewriteValueRISCV64_OpOr8_0(v)
+ case OpOrB:
+ return rewriteValueRISCV64_OpOrB_0(v)
+ case OpPanicBounds:
+ return rewriteValueRISCV64_OpPanicBounds_0(v)
+ case OpRISCV64ADD:
+ return rewriteValueRISCV64_OpRISCV64ADD_0(v)
+ case OpRISCV64ADDI:
+ return rewriteValueRISCV64_OpRISCV64ADDI_0(v)
+ case OpRISCV64MOVBUload:
+ return rewriteValueRISCV64_OpRISCV64MOVBUload_0(v)
+ case OpRISCV64MOVBload:
+ return rewriteValueRISCV64_OpRISCV64MOVBload_0(v)
+ case OpRISCV64MOVBstore:
+ return rewriteValueRISCV64_OpRISCV64MOVBstore_0(v)
+ case OpRISCV64MOVDconst:
+ return rewriteValueRISCV64_OpRISCV64MOVDconst_0(v)
+ case OpRISCV64MOVDload:
+ return rewriteValueRISCV64_OpRISCV64MOVDload_0(v)
+ case OpRISCV64MOVDstore:
+ return rewriteValueRISCV64_OpRISCV64MOVDstore_0(v)
+ case OpRISCV64MOVHUload:
+ return rewriteValueRISCV64_OpRISCV64MOVHUload_0(v)
+ case OpRISCV64MOVHload:
+ return rewriteValueRISCV64_OpRISCV64MOVHload_0(v)
+ case OpRISCV64MOVHstore:
+ return rewriteValueRISCV64_OpRISCV64MOVHstore_0(v)
+ case OpRISCV64MOVWUload:
+ return rewriteValueRISCV64_OpRISCV64MOVWUload_0(v)
+ case OpRISCV64MOVWload:
+ return rewriteValueRISCV64_OpRISCV64MOVWload_0(v)
+ case OpRISCV64MOVWstore:
+ return rewriteValueRISCV64_OpRISCV64MOVWstore_0(v)
+ case OpRotateLeft16:
+ return rewriteValueRISCV64_OpRotateLeft16_0(v)
+ case OpRotateLeft32:
+ return rewriteValueRISCV64_OpRotateLeft32_0(v)
+ case OpRotateLeft64:
+ return rewriteValueRISCV64_OpRotateLeft64_0(v)
+ case OpRotateLeft8:
+ return rewriteValueRISCV64_OpRotateLeft8_0(v)
+ case OpRound32F:
+ return rewriteValueRISCV64_OpRound32F_0(v)
+ case OpRound64F:
+ return rewriteValueRISCV64_OpRound64F_0(v)
+ case OpRsh16Ux16:
+ return rewriteValueRISCV64_OpRsh16Ux16_0(v)
+ case OpRsh16Ux32:
+ return rewriteValueRISCV64_OpRsh16Ux32_0(v)
+ case OpRsh16Ux64:
+ return rewriteValueRISCV64_OpRsh16Ux64_0(v)
+ case OpRsh16Ux8:
+ return rewriteValueRISCV64_OpRsh16Ux8_0(v)
+ case OpRsh16x16:
+ return rewriteValueRISCV64_OpRsh16x16_0(v)
+ case OpRsh16x32:
+ return rewriteValueRISCV64_OpRsh16x32_0(v)
+ case OpRsh16x64:
+ return rewriteValueRISCV64_OpRsh16x64_0(v)
+ case OpRsh16x8:
+ return rewriteValueRISCV64_OpRsh16x8_0(v)
+ case OpRsh32Ux16:
+ return rewriteValueRISCV64_OpRsh32Ux16_0(v)
+ case OpRsh32Ux32:
+ return rewriteValueRISCV64_OpRsh32Ux32_0(v)
+ case OpRsh32Ux64:
+ return rewriteValueRISCV64_OpRsh32Ux64_0(v)
+ case OpRsh32Ux8:
+ return rewriteValueRISCV64_OpRsh32Ux8_0(v)
+ case OpRsh32x16:
+ return rewriteValueRISCV64_OpRsh32x16_0(v)
+ case OpRsh32x32:
+ return rewriteValueRISCV64_OpRsh32x32_0(v)
+ case OpRsh32x64:
+ return rewriteValueRISCV64_OpRsh32x64_0(v)
+ case OpRsh32x8:
+ return rewriteValueRISCV64_OpRsh32x8_0(v)
+ case OpRsh64Ux16:
+ return rewriteValueRISCV64_OpRsh64Ux16_0(v)
+ case OpRsh64Ux32:
+ return rewriteValueRISCV64_OpRsh64Ux32_0(v)
+ case OpRsh64Ux64:
+ return rewriteValueRISCV64_OpRsh64Ux64_0(v)
+ case OpRsh64Ux8:
+ return rewriteValueRISCV64_OpRsh64Ux8_0(v)
+ case OpRsh64x16:
+ return rewriteValueRISCV64_OpRsh64x16_0(v)
+ case OpRsh64x32:
+ return rewriteValueRISCV64_OpRsh64x32_0(v)
+ case OpRsh64x64:
+ return rewriteValueRISCV64_OpRsh64x64_0(v)
+ case OpRsh64x8:
+ return rewriteValueRISCV64_OpRsh64x8_0(v)
+ case OpRsh8Ux16:
+ return rewriteValueRISCV64_OpRsh8Ux16_0(v)
+ case OpRsh8Ux32:
+ return rewriteValueRISCV64_OpRsh8Ux32_0(v)
+ case OpRsh8Ux64:
+ return rewriteValueRISCV64_OpRsh8Ux64_0(v)
+ case OpRsh8Ux8:
+ return rewriteValueRISCV64_OpRsh8Ux8_0(v)
+ case OpRsh8x16:
+ return rewriteValueRISCV64_OpRsh8x16_0(v)
+ case OpRsh8x32:
+ return rewriteValueRISCV64_OpRsh8x32_0(v)
+ case OpRsh8x64:
+ return rewriteValueRISCV64_OpRsh8x64_0(v)
+ case OpRsh8x8:
+ return rewriteValueRISCV64_OpRsh8x8_0(v)
+ case OpSignExt16to32:
+ return rewriteValueRISCV64_OpSignExt16to32_0(v)
+ case OpSignExt16to64:
+ return rewriteValueRISCV64_OpSignExt16to64_0(v)
+ case OpSignExt32to64:
+ return rewriteValueRISCV64_OpSignExt32to64_0(v)
+ case OpSignExt8to16:
+ return rewriteValueRISCV64_OpSignExt8to16_0(v)
+ case OpSignExt8to32:
+ return rewriteValueRISCV64_OpSignExt8to32_0(v)
+ case OpSignExt8to64:
+ return rewriteValueRISCV64_OpSignExt8to64_0(v)
+ case OpSlicemask:
+ return rewriteValueRISCV64_OpSlicemask_0(v)
+ case OpSqrt:
+ return rewriteValueRISCV64_OpSqrt_0(v)
+ case OpStaticCall:
+ return rewriteValueRISCV64_OpStaticCall_0(v)
+ case OpStore:
+ return rewriteValueRISCV64_OpStore_0(v)
+ case OpSub16:
+ return rewriteValueRISCV64_OpSub16_0(v)
+ case OpSub32:
+ return rewriteValueRISCV64_OpSub32_0(v)
+ case OpSub32F:
+ return rewriteValueRISCV64_OpSub32F_0(v)
+ case OpSub64:
+ return rewriteValueRISCV64_OpSub64_0(v)
+ case OpSub64F:
+ return rewriteValueRISCV64_OpSub64F_0(v)
+ case OpSub8:
+ return rewriteValueRISCV64_OpSub8_0(v)
+ case OpSubPtr:
+ return rewriteValueRISCV64_OpSubPtr_0(v)
+ case OpTrunc16to8:
+ return rewriteValueRISCV64_OpTrunc16to8_0(v)
+ case OpTrunc32to16:
+ return rewriteValueRISCV64_OpTrunc32to16_0(v)
+ case OpTrunc32to8:
+ return rewriteValueRISCV64_OpTrunc32to8_0(v)
+ case OpTrunc64to16:
+ return rewriteValueRISCV64_OpTrunc64to16_0(v)
+ case OpTrunc64to32:
+ return rewriteValueRISCV64_OpTrunc64to32_0(v)
+ case OpTrunc64to8:
+ return rewriteValueRISCV64_OpTrunc64to8_0(v)
+ case OpWB:
+ return rewriteValueRISCV64_OpWB_0(v)
+ case OpXor16:
+ return rewriteValueRISCV64_OpXor16_0(v)
+ case OpXor32:
+ return rewriteValueRISCV64_OpXor32_0(v)
+ case OpXor64:
+ return rewriteValueRISCV64_OpXor64_0(v)
+ case OpXor8:
+ return rewriteValueRISCV64_OpXor8_0(v)
+ case OpZero:
+ return rewriteValueRISCV64_OpZero_0(v)
+ case OpZeroExt16to32:
+ return rewriteValueRISCV64_OpZeroExt16to32_0(v)
+ case OpZeroExt16to64:
+ return rewriteValueRISCV64_OpZeroExt16to64_0(v)
+ case OpZeroExt32to64:
+ return rewriteValueRISCV64_OpZeroExt32to64_0(v)
+ case OpZeroExt8to16:
+ return rewriteValueRISCV64_OpZeroExt8to16_0(v)
+ case OpZeroExt8to32:
+ return rewriteValueRISCV64_OpZeroExt8to32_0(v)
+ case OpZeroExt8to64:
+ return rewriteValueRISCV64_OpZeroExt8to64_0(v)
+ }
+ return false
+}
+func rewriteValueRISCV64_OpAdd16_0(v *Value) bool {
+ // match: (Add16 x y)
+ // result: (ADD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAdd32_0(v *Value) bool {
+ // match: (Add32 x y)
+ // result: (ADD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAdd32F_0(v *Value) bool {
+ // match: (Add32F x y)
+ // result: (FADDS x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAdd64_0(v *Value) bool {
+ // match: (Add64 x y)
+ // result: (ADD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAdd64F_0(v *Value) bool {
+ // match: (Add64F x y)
+ // result: (FADDD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAdd8_0(v *Value) bool {
+ // match: (Add8 x y)
+ // result: (ADD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAddPtr_0(v *Value) bool {
+ // match: (AddPtr x y)
+ // result: (ADD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAddr_0(v *Value) bool {
+ // match: (Addr {sym} base)
+ // result: (MOVaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpRISCV64MOVaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAnd16_0(v *Value) bool {
+ // match: (And16 x y)
+ // result: (AND x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAnd32_0(v *Value) bool {
+ // match: (And32 x y)
+ // result: (AND x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAnd64_0(v *Value) bool {
+ // match: (And64 x y)
+ // result: (AND x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAnd8_0(v *Value) bool {
+ // match: (And8 x y)
+ // result: (AND x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAndB_0(v *Value) bool {
+ // match: (AndB x y)
+ // result: (AND x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAvg64u_0(v *Value) bool {
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v1.AuxInt = 1
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v2.AuxInt = 1
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t)
+ v3.AuxInt = 1
+ v4 := b.NewValue0(v.Pos, OpRISCV64AND, t)
+ v4.AddArg(x)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpClosureCall_0(v *Value) bool {
+ // match: (ClosureCall [argwid] entry closure mem)
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[2]
+ entry := v.Args[0]
+ closure := v.Args[1]
+ v.reset(OpRISCV64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCom16_0(v *Value) bool {
+ // match: (Com16 x)
+ // result: (XORI [int64(-1)] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCom32_0(v *Value) bool {
+ // match: (Com32 x)
+ // result: (XORI [int64(-1)] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCom64_0(v *Value) bool {
+ // match: (Com64 x)
+ // result: (XORI [int64(-1)] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCom8_0(v *Value) bool {
+ // match: (Com8 x)
+ // result: (XORI [int64(-1)] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst16_0(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVHconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpRISCV64MOVHconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32_0(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpRISCV64MOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32F_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const32F [val])
+ // result: (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))]))
+ for {
+ val := v.AuxInt
+ v.reset(OpRISCV64FMVSX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v0.AuxInt = int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64_0(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64F_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64F [val])
+ // result: (FMVDX (MOVDconst [val]))
+ for {
+ val := v.AuxInt
+ v.reset(OpRISCV64FMVDX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = val
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst8_0(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVBconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpRISCV64MOVBconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstBool_0(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVBconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpRISCV64MOVBconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstNil_0(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConvert_0(v *Value) bool {
+ // match: (Convert x mem)
+ // result: (MOVconvert x mem)
+ for {
+ mem := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MOVconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt32Fto32_0(v *Value) bool {
+ // match: (Cvt32Fto32 x)
+ // result: (FCVTWS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTWS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt32Fto64_0(v *Value) bool {
+ // match: (Cvt32Fto64 x)
+ // result: (FCVTLS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTLS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt32Fto64F_0(v *Value) bool {
+ // match: (Cvt32Fto64F x)
+ // result: (FCVTDS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTDS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt32to32F_0(v *Value) bool {
+ // match: (Cvt32to32F x)
+ // result: (FCVTSW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTSW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt32to64F_0(v *Value) bool {
+ // match: (Cvt32to64F x)
+ // result: (FCVTDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt64Fto32_0(v *Value) bool {
+ // match: (Cvt64Fto32 x)
+ // result: (FCVTWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt64Fto32F_0(v *Value) bool {
+ // match: (Cvt64Fto32F x)
+ // result: (FCVTSD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTSD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt64Fto64_0(v *Value) bool {
+ // match: (Cvt64Fto64 x)
+ // result: (FCVTLD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTLD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt64to32F_0(v *Value) bool {
+ // match: (Cvt64to32F x)
+ // result: (FCVTSL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTSL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpCvt64to64F_0(v *Value) bool {
+ // match: (Cvt64to64F x)
+ // result: (FCVTDL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FCVTDL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv16u_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv32_0(v *Value) bool {
+ // match: (Div32 x y)
+ // result: (DIVW x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv32F_0(v *Value) bool {
+ // match: (Div32F x y)
+ // result: (FDIVS x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FDIVS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv32u_0(v *Value) bool {
+ // match: (Div32u x y)
+ // result: (DIVUW x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVUW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv64_0(v *Value) bool {
+ // match: (Div64 x y)
+ // result: (DIV x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv64F_0(v *Value) bool {
+ // match: (Div64F x y)
+ // result: (FDIVD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv64u_0(v *Value) bool {
+ // match: (Div64u x y)
+ // result: (DIVU x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv8u_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SEQZ (ZeroExt32to64 (SUB <x.Type> x y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq32F_0(v *Value) bool {
+ // match: (Eq32F x y)
+ // result: (FEQS x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FEQS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq64_0(v *Value) bool {
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SEQZ (SUB <x.Type> x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq64F_0(v *Value) bool {
+ // match: (Eq64F x y)
+ // result: (FEQD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FEQD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqB_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORI [1] (XOR <typ.Bool> x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqPtr_0(v *Value) bool {
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SEQZ (SUB <x.Type> x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq16 x y)
+ // result: (Not (Less16 x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq16U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq16U x y)
+ // result: (Not (Less16U x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq32 x y)
+ // result: (Not (Less32 x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq32F_0(v *Value) bool {
+ // match: (Geq32F x y)
+ // result: (FLES y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLES)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq32U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq32U x y)
+ // result: (Not (Less32U x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq64 x y)
+ // result: (Not (Less64 x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq64F_0(v *Value) bool {
+ // match: (Geq64F x y)
+ // result: (FLED y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLED)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq64U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq64U x y)
+ // result: (Not (Less64U x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq8 x y)
+ // result: (Not (Less8 x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGeq8U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Geq8U x y)
+ // result: (Not (Less8U x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGetCallerPC_0(v *Value) bool {
+ // match: (GetCallerPC)
+ // result: (LoweredGetCallerPC)
+ for {
+ v.reset(OpRISCV64LoweredGetCallerPC)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpRISCV64LoweredGetCallerSP)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGetClosurePtr_0(v *Value) bool {
+ // match: (GetClosurePtr)
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpRISCV64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater16_0(v *Value) bool {
+ // match: (Greater16 x y)
+ // result: (Less16 y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess16)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater16U_0(v *Value) bool {
+ // match: (Greater16U x y)
+ // result: (Less16U y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess16U)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater32_0(v *Value) bool {
+ // match: (Greater32 x y)
+ // result: (Less32 y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess32)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater32F_0(v *Value) bool {
+ // match: (Greater32F x y)
+ // result: (FLTS y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLTS)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater32U_0(v *Value) bool {
+ // match: (Greater32U x y)
+ // result: (Less32U y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess32U)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater64_0(v *Value) bool {
+ // match: (Greater64 x y)
+ // result: (Less64 y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess64)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater64F_0(v *Value) bool {
+ // match: (Greater64F x y)
+ // result: (FLTD y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLTD)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater64U_0(v *Value) bool {
+ // match: (Greater64U x y)
+ // result: (Less64U y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess64U)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater8_0(v *Value) bool {
+ // match: (Greater8 x y)
+ // result: (Less8 y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess8)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpGreater8U_0(v *Value) bool {
+ // match: (Greater8U x y)
+ // result: (Less8U y x)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpLess8U)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32u_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul64_0(v *Value) bool {
+ // match: (Hmul64 x y)
+ // result: (MULH x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MULH)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul64u_0(v *Value) bool {
+ // match: (Hmul64u x y)
+ // result: (MULHU x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MULHU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpInterCall_0(v *Value) bool {
+ // match: (InterCall [argwid] entry mem)
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[1]
+ entry := v.Args[0]
+ v.reset(OpRISCV64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpIsInBounds_0(v *Value) bool {
+ // match: (IsInBounds idx len)
+ // result: (Less64U idx len)
+ for {
+ len := v.Args[1]
+ idx := v.Args[0]
+ v.reset(OpLess64U)
+ v.AddArg(idx)
+ v.AddArg(len)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpIsNonNil_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (NeqPtr (MOVDconst) p)
+ for {
+ p := v.Args[0]
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v.AddArg(v0)
+ v.AddArg(p)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpIsSliceInBounds_0(v *Value) bool {
+ // match: (IsSliceInBounds idx len)
+ // result: (Leq64U idx len)
+ for {
+ len := v.Args[1]
+ idx := v.Args[0]
+ v.reset(OpLeq64U)
+ v.AddArg(idx)
+ v.AddArg(len)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (Not (Less16 y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (Not (Less16U y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (Not (Less32 y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32F_0(v *Value) bool {
+ // match: (Leq32F x y)
+ // result: (FLES x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLES)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (Not (Less32U y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (Not (Less64 y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64F_0(v *Value) bool {
+ // match: (Leq64F x y)
+ // result: (FLED x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLED)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (Not (Less64U y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (Not (Less8 y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (Not (Less8U y x))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SLT (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SLT (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32F_0(v *Value) bool {
+ // match: (Less32F x y)
+ // result: (FLTS x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLTS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess64_0(v *Value) bool {
+ // match: (Less64 x y)
+ // result: (SLT x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLT)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess64F_0(v *Value) bool {
+ // match: (Less64F x y)
+ // result: (FLTD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FLTD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess64U_0(v *Value) bool {
+ // match: (Less64U x y)
+ // result: (SLTU x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLTU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SLT (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8U_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLoad_0(v *Value) bool {
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVWload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLocalAddr_0(v *Value) bool {
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVaddr {sym} base)
+ for {
+ sym := v.Aux
+ _ = v.Args[1]
+ base := v.Args[0]
+ v.reset(OpRISCV64MOVaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x64_0(v *Value) bool {
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x64_0(v *Value) bool {
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x64_0(v *Value) bool {
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x64_0(v *Value) bool {
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (REMW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod16u_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod32_0(v *Value) bool {
+ // match: (Mod32 x y)
+ // result: (REMW x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod32u_0(v *Value) bool {
+ // match: (Mod32u x y)
+ // result: (REMUW x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMUW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod64_0(v *Value) bool {
+ // match: (Mod64 x y)
+ // result: (REM x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REM)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod64u_0(v *Value) bool {
+ // match: (Mod64u x y)
+ // result: (REMU x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (REMW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod8u_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMove_0(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ mem := v.Args[2]
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ mem := v.Args[2]
+ dst := v.Args[0]
+ src := v.Args[1]
+ v.reset(OpRISCV64MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ mem := v.Args[2]
+ dst := v.Args[0]
+ src := v.Args[1]
+ v.reset(OpRISCV64MOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ mem := v.Args[2]
+ dst := v.Args[0]
+ src := v.Args[1]
+ v.reset(OpRISCV64MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ mem := v.Args[2]
+ dst := v.Args[0]
+ src := v.Args[1]
+ v.reset(OpRISCV64MOVDstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.(*types.Type).Alignment(), config)] src) mem)
+ for {
+ s := v.AuxInt
+ t := v.Aux
+ mem := v.Args[2]
+ dst := v.Args[0]
+ src := v.Args[1]
+ v.reset(OpRISCV64LoweredMove)
+ v.AuxInt = t.(*types.Type).Alignment()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (MULW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul32_0(v *Value) bool {
+ // match: (Mul32 x y)
+ // result: (MULW x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul32F_0(v *Value) bool {
+ // match: (Mul32F x y)
+ // result: (FMULS x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FMULS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul64_0(v *Value) bool {
+ // match: (Mul64 x y)
+ // result: (MUL x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul64F_0(v *Value) bool {
+ // match: (Mul64F x y)
+ // result: (FMULD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FMULD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (MULW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeg16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg16 x)
+ // result: (SUB (MOVHconst) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeg32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32 x)
+ // result: (SUB (MOVWconst) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeg32F_0(v *Value) bool {
+ // match: (Neg32F x)
+ // result: (FNEGS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FNEGS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeg64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64 x)
+ // result: (SUB (MOVDconst) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeg64F_0(v *Value) bool {
+ // match: (Neg64F x)
+ // result: (FNEGD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FNEGD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeg8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg8 x)
+ // result: (SUB (MOVBconst) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SNEZ (ZeroExt32to64 (SUB <x.Type> x y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq32F_0(v *Value) bool {
+ // match: (Neq32F x y)
+ // result: (FNES x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FNES)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq64_0(v *Value) bool {
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (SNEZ (SUB <x.Type> x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq64F_0(v *Value) bool {
+ // match: (Neq64F x y)
+ // result: (FNED x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FNED)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeqB_0(v *Value) bool {
+ // match: (NeqB x y)
+ // result: (XOR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeqPtr_0(v *Value) bool {
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SNEZ (SUB <x.Type> x y))
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNilCheck_0(v *Value) bool {
+ // match: (NilCheck ptr mem)
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpRISCV64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNot_0(v *Value) bool {
+ // match: (Not x)
+ // result: (XORI [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOffPtr_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDI [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOr16_0(v *Value) bool {
+ // match: (Or16 x y)
+ // result: (OR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOr32_0(v *Value) bool {
+ // match: (Or32 x y)
+ // result: (OR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOr64_0(v *Value) bool {
+ // match: (Or64 x y)
+ // result: (OR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOr8_0(v *Value) bool {
+ // match: (Or8 x y)
+ // result: (OR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOrB_0(v *Value) bool {
+ // match: (OrB x y)
+ // result: (OR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpPanicBounds_0(v *Value) bool {
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := v.AuxInt
+ mem := v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsA)
+ v.AuxInt = kind
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := v.AuxInt
+ mem := v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsB)
+ v.AuxInt = kind
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := v.AuxInt
+ mem := v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsC)
+ v.AuxInt = kind
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADD_0(v *Value) bool {
+ // match: (ADD (MOVDconst [off]) ptr)
+ // cond: is32Bit(off)
+ // result: (ADDI [off] ptr)
+ for {
+ ptr := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ off := v_0.AuxInt
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADD ptr (MOVDconst [off]))
+ // cond: is32Bit(off)
+ // result: (ADDI [off] ptr)
+ for {
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ off := v_1.AuxInt
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADDI_0(v *Value) bool {
+ // match: (ADDI [c] (MOVaddr [d] {s} x))
+ // cond: is32Bit(c+d)
+ // result: (MOVaddr [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDI [0] x)
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUload_0(v *Value) bool {
+ // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBUload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBload_0(v *Value) bool {
+ // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstore_0(v *Value) bool {
+ // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDconst_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVDconst <t> [c])
+ // cond: !is32Bit(c) && int32(c) < 0
+ // result: (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ if !(!is32Bit(c) && int32(c) < 0) {
+ break
+ }
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 32
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = c>>32 + 1
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v2.AuxInt = int64(int32(c))
+ v.AddArg(v2)
+ return true
+ }
+ // match: (MOVDconst <t> [c])
+ // cond: !is32Bit(c) && int32(c) >= 0
+ // result: (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ if !(!is32Bit(c) && int32(c) >= 0) {
+ break
+ }
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 32
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = c>>32 + 0
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v2.AuxInt = int64(int32(c))
+ v.AddArg(v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDload_0(v *Value) bool {
+ // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVDload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstore_0(v *Value) bool {
+ // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVDstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUload_0(v *Value) bool {
+ // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVHUload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHload_0(v *Value) bool {
+ // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVHload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstore_0(v *Value) bool {
+ // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVHstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUload_0(v *Value) bool {
+ // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWUload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWload_0(v *Value) bool {
+ // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWload [off1+off2] {sym} base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstore_0(v *Value) bool {
+ // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVHconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
+ for {
+ t := v.Type
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpRISCV64MOVHconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v1.AuxInt = c & 15
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v3.AuxInt = -c & 15
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVWconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
+ for {
+ t := v.Type
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpRISCV64MOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v1.AuxInt = c & 31
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v3.AuxInt = -c & 31
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVDconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+ for {
+ t := v.Type
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = c & 63
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = -c & 63
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVBconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
+ for {
+ t := v.Type
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v1.AuxInt = c & 7
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v3.AuxInt = -c & 7
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRound32F_0(v *Value) bool {
+ // match: (Round32F x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRound64F_0(v *Value) bool {
+ // match: (Round64F x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux64_0(v *Value) bool {
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = -1
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = -1
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x64_0(v *Value) bool {
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = -1
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = -1
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = 64
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x16_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x32_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x64_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x8_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = -1
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSignExt16to32_0(v *Value) bool {
+ b := v.Block
+ // match: (SignExt16to32 <t> x)
+ // result: (SRAI [48] (SLLI <t> [48] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 48
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 48
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSignExt16to64_0(v *Value) bool {
+ b := v.Block
+ // match: (SignExt16to64 <t> x)
+ // result: (SRAI [48] (SLLI <t> [48] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 48
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 48
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSignExt32to64_0(v *Value) bool {
+ b := v.Block
+ // match: (SignExt32to64 <t> x)
+ // result: (SRAI [32] (SLLI <t> [32] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 32
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSignExt8to16_0(v *Value) bool {
+ b := v.Block
+ // match: (SignExt8to16 <t> x)
+ // result: (SRAI [56] (SLLI <t> [56] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 56
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 56
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSignExt8to32_0(v *Value) bool {
+ b := v.Block
+ // match: (SignExt8to32 <t> x)
+ // result: (SRAI [56] (SLLI <t> [56] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 56
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 56
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSignExt8to64_0(v *Value) bool {
+ b := v.Block
+ // match: (SignExt8to64 <t> x)
+ // result: (SRAI [56] (SLLI <t> [56] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = 56
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 56
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSlicemask_0(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Slicemask <t> x)
+ // result: (XOR (MOVDconst [-1]) (SRA <t> (SUB <t> x (MOVDconst [1])) (MOVDconst [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64XOR)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = -1
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SRA, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SUB, t)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = 1
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v4.AuxInt = 63
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSqrt_0(v *Value) bool {
+ // match: (Sqrt x)
+ // result: (FSQRTD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpRISCV64FSQRTD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpStaticCall_0(v *Value) bool {
+ // match: (StaticCall [argwid] {target} mem)
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpRISCV64CALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpStore_0(v *Value) bool {
+ // match: (Store {t} ptr val mem)
+ // cond: t.(*types.Type).Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := v.Aux
+ mem := v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ if !(t.(*types.Type).Size() == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.(*types.Type).Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := v.Aux
+ mem := v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ if !(t.(*types.Type).Size() == 2) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := v.Aux
+ mem := v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := v.Aux
+ mem := v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVWstore ptr val mem)
+ for {
+ t := v.Aux
+ mem := v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := v.Aux
+ mem := v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpSub16_0(v *Value) bool {
+ // match: (Sub16 x y)
+ // result: (SUB x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSub32_0(v *Value) bool {
+ // match: (Sub32 x y)
+ // result: (SUB x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSub32F_0(v *Value) bool {
+ // match: (Sub32F x y)
+ // result: (FSUBS x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSub64_0(v *Value) bool {
+ // match: (Sub64 x y)
+ // result: (SUB x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSub64F_0(v *Value) bool {
+ // match: (Sub64F x y)
+ // result: (FSUBD x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64FSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSub8_0(v *Value) bool {
+ // match: (Sub8 x y)
+ // result: (SUB x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSubPtr_0(v *Value) bool {
+ // match: (SubPtr x y)
+ // result: (SUB x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpTrunc16to8_0(v *Value) bool {
+ // match: (Trunc16to8 x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpTrunc32to16_0(v *Value) bool {
+ // match: (Trunc32to16 x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpTrunc32to8_0(v *Value) bool {
+ // match: (Trunc32to8 x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpTrunc64to16_0(v *Value) bool {
+ // match: (Trunc64to16 x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpTrunc64to32_0(v *Value) bool {
+ // match: (Trunc64to32 x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpTrunc64to8_0(v *Value) bool {
+ // match: (Trunc64to8 x)
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpWB_0(v *Value) bool {
+ // match: (WB {fn} destptr srcptr mem)
+ // result: (LoweredWB {fn} destptr srcptr mem)
+ for {
+ fn := v.Aux
+ mem := v.Args[2]
+ destptr := v.Args[0]
+ srcptr := v.Args[1]
+ v.reset(OpRISCV64LoweredWB)
+ v.Aux = fn
+ v.AddArg(destptr)
+ v.AddArg(srcptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpXor16_0(v *Value) bool {
+ // match: (Xor16 x y)
+ // result: (XOR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpXor32_0(v *Value) bool {
+ // match: (Xor32 x y)
+ // result: (XOR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpXor64_0(v *Value) bool {
+ // match: (Xor64 x y)
+ // result: (XOR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpXor8_0(v *Value) bool {
+ // match: (Xor8 x y)
+ // result: (XOR x y)
+ for {
+ y := v.Args[1]
+ x := v.Args[0]
+ v.reset(OpRISCV64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZero_0(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ mem := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVBconst) mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpRISCV64MOVBstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVHstore ptr (MOVHconst) mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpRISCV64MOVHstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVWstore ptr (MOVWconst) mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpRISCV64MOVWstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [8] ptr mem)
+ // result: (MOVDstore ptr (MOVDconst) mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpRISCV64MOVDstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)])) mem)
+ for {
+ s := v.AuxInt
+ t := v.Aux
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpRISCV64LoweredZero)
+ v.AuxInt = t.(*types.Type).Alignment()
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZeroExt16to32_0(v *Value) bool {
+ b := v.Block
+ // match: (ZeroExt16to32 <t> x)
+ // result: (SRLI [48] (SLLI <t> [48] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 48
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 48
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZeroExt16to64_0(v *Value) bool {
+ b := v.Block
+ // match: (ZeroExt16to64 <t> x)
+ // result: (SRLI [48] (SLLI <t> [48] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 48
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 48
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZeroExt32to64_0(v *Value) bool {
+ b := v.Block
+ // match: (ZeroExt32to64 <t> x)
+ // result: (SRLI [32] (SLLI <t> [32] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 32
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZeroExt8to16_0(v *Value) bool {
+ b := v.Block
+ // match: (ZeroExt8to16 <t> x)
+ // result: (SRLI [56] (SLLI <t> [56] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 56
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 56
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZeroExt8to32_0(v *Value) bool {
+ b := v.Block
+ // match: (ZeroExt8to32 <t> x)
+ // result: (SRLI [56] (SLLI <t> [56] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 56
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 56
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpZeroExt8to64_0(v *Value) bool {
+ b := v.Block
+ // match: (ZeroExt8to64 <t> x)
+ // result: (SRLI [56] (SLLI <t> [56] x))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = 56
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = 56
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockRISCV64(b *Block) bool {
+ switch b.Kind {
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (BNE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.Reset(BlockRISCV64BNE)
+ b.AddControl(cond)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index ff0ef25e90..5a77910bde 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -66,7 +66,7 @@ func (op Op) isLoweredGetClosurePtr() bool {
switch op {
case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr,
- OpWasmLoweredGetClosurePtr:
+ OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr:
return true
}
return false
@@ -115,7 +115,7 @@ func schedule(f *Func) {
v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck ||
- v.Op == OpWasmLoweredNilCheck:
+ v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck:
// Nil checks must come before loads from the same address.
score[v.ID] = ScoreNilCheck
case v.Op == OpPhi:
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
index 19496de660..0b9f06f85d 100644
--- a/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
@@ -2,11 +2,11 @@
6: func test() {
8: go func() {}()
10: for {
-1048575:
+1: package main
10: for {
-1048575:
+1: package main
10: for {
-1048575:
+1: package main
10: for {
-1048575:
+1: package main
10: for {
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
index caaeb889fb..e8b1073818 100644
--- a/src/cmd/compile/internal/types/utils.go
+++ b/src/cmd/compile/internal/types/utils.go
@@ -19,7 +19,7 @@ var (
Dowidth func(*Type)
Fatalf func(string, ...interface{})
Sconv func(*Sym, int, int) string // orig: func sconv(s *Sym, flag FmtFlag, mode fmtMode) string
- Tconv func(*Type, int, int, int) string // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode, depth int) string
+ Tconv func(*Type, int, int) string // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode) string
FormatSym func(*Sym, fmt.State, rune, int) // orig: func symFormat(sym *Sym, s fmt.State, verb rune, mode fmtMode)
FormatType func(*Type, fmt.State, rune, int) // orig: func typeFormat(t *Type, s fmt.State, verb rune, mode fmtMode)
TypeLinkSym func(*Type) *obj.LSym
@@ -39,25 +39,23 @@ func (sym *Sym) Format(s fmt.State, verb rune) {
}
func (t *Type) String() string {
- // This is an external entry point, so we pass depth 0 to tconv.
// The implementation of tconv (including typefmt and fldconv)
- // must take care not to use a type in a formatting string
- // to avoid resetting the recursion counter.
- return Tconv(t, 0, FErr, 0)
+ // must handle recursive types correctly.
+ return Tconv(t, 0, FErr)
}
// ShortString generates a short description of t.
// It is used in autogenerated method names, reflection,
// and itab names.
func (t *Type) ShortString() string {
- return Tconv(t, FmtLeft, FErr, 0)
+ return Tconv(t, FmtLeft, FErr)
}
// LongString generates a complete description of t.
// It is useful for reflection,
// or when a unique fingerprint or hash of a type is required.
func (t *Type) LongString() string {
- return Tconv(t, FmtLeft|FmtUnsigned, FErr, 0)
+ return Tconv(t, FmtLeft|FmtUnsigned, FErr)
}
func (t *Type) Format(s fmt.State, verb rune) {
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
index e3ec3361f9..3aa64a5ce2 100644
--- a/src/cmd/compile/main.go
+++ b/src/cmd/compile/main.go
@@ -12,6 +12,7 @@ import (
"cmd/compile/internal/mips"
"cmd/compile/internal/mips64"
"cmd/compile/internal/ppc64"
+ "cmd/compile/internal/riscv64"
"cmd/compile/internal/s390x"
"cmd/compile/internal/wasm"
"cmd/compile/internal/x86"
@@ -32,6 +33,7 @@ var archInits = map[string]func(*gc.Arch){
"mips64le": mips64.Init,
"ppc64": ppc64.Init,
"ppc64le": ppc64.Init,
+ "riscv64": riscv64.Init,
"s390x": s390x.Init,
"wasm": wasm.Init,
}
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 9eb9e8f241..62e00b0856 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -110,6 +110,9 @@ func xinit() {
fatalf("$GOROOT must be set")
}
goroot = filepath.Clean(b)
+ if modRoot := findModuleRoot(goroot); modRoot != "" {
+ fatalf("found go.mod file in %s: $GOROOT must not be inside a module", modRoot)
+ }
b = os.Getenv("GOROOT_FINAL")
if b == "" {
@@ -1532,7 +1535,7 @@ var cgoEnabled = map[string]bool{
"linux/mipsle": true,
"linux/mips64": true,
"linux/mips64le": true,
- "linux/riscv64": true,
+ "linux/riscv64": false, // Issue 36641
"linux/s390x": true,
"linux/sparc64": true,
"android/386": true,
@@ -1560,7 +1563,6 @@ var cgoEnabled = map[string]bool{
// List of platforms which are supported but not complete yet. These get
// filtered out of cgoEnabled for 'dist list'. See golang.org/issue/28944
var incomplete = map[string]bool{
- "linux/riscv64": true,
"linux/sparc64": true,
}
@@ -1590,6 +1592,20 @@ func checkCC() {
}
}
+func findModuleRoot(dir string) (root string) {
+ for {
+ if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ return dir
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return ""
+}
+
func defaulttarg() string {
// xgetwd might return a path with symlinks fully resolved, and if
// there happens to be symlinks in goroot, then the hasprefix test
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 12baccbc4f..118800e8da 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -45,10 +45,11 @@ var bootstrapDirs = []string{
"cmd/compile/internal/mips",
"cmd/compile/internal/mips64",
"cmd/compile/internal/ppc64",
- "cmd/compile/internal/types",
+ "cmd/compile/internal/riscv64",
"cmd/compile/internal/s390x",
"cmd/compile/internal/ssa",
"cmd/compile/internal/syntax",
+ "cmd/compile/internal/types",
"cmd/compile/internal/x86",
"cmd/compile/internal/wasm",
"cmd/internal/bio",
diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go
index 3e54915122..b8a8c5f2e6 100644
--- a/src/cmd/dist/main.go
+++ b/src/cmd/dist/main.go
@@ -122,6 +122,8 @@ func main() {
if elfIsLittleEndian(os.Args[0]) {
gohostarch = "mipsle"
}
+ case strings.Contains(out, "riscv64"):
+ gohostarch = "riscv64"
case strings.Contains(out, "s390x"):
gohostarch = "s390x"
case gohostos == "darwin":
diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go
index 86259e5f1f..0499c40369 100644
--- a/src/cmd/doc/main.go
+++ b/src/cmd/doc/main.go
@@ -65,9 +65,10 @@ func usage() {
fmt.Fprintf(os.Stderr, "Usage of [go] doc:\n")
fmt.Fprintf(os.Stderr, "\tgo doc\n")
fmt.Fprintf(os.Stderr, "\tgo doc <pkg>\n")
- fmt.Fprintf(os.Stderr, "\tgo doc <sym>[.<method>]\n")
- fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>].<sym>[.<method>]\n")
- fmt.Fprintf(os.Stderr, "\tgo doc <pkg> <sym>[.<method>]\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc <sym>[.<methodOrField>]\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>.]<sym>[.<methodOrField>]\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>.][<sym>.]<methodOrField>\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc <pkg> <sym>[.<methodOrField>]\n")
fmt.Fprintf(os.Stderr, "For more information run\n")
fmt.Fprintf(os.Stderr, "\tgo help doc\n\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
diff --git a/src/cmd/go.mod b/src/cmd/go.mod
index 55ef2395c5..fddae5b84d 100644
--- a/src/cmd/go.mod
+++ b/src/cmd/go.mod
@@ -6,8 +6,8 @@ require (
github.com/google/pprof v0.0.0-20191105193234-27840fff0d09
github.com/ianlancetaylor/demangle v0.0.0-20180524225900-fc6590592b44 // indirect
golang.org/x/arch v0.0.0-20190815191158-8a70ba74b3a1
- golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
- golang.org/x/mod v0.1.1-0.20191126161957-788aebd06792
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d
+ golang.org/x/mod v0.2.0
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 // indirect
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e
)
diff --git a/src/cmd/go.sum b/src/cmd/go.sum
index 040f25b617..186222f1cf 100644
--- a/src/cmd/go.sum
+++ b/src/cmd/go.sum
@@ -7,8 +7,10 @@ golang.org/x/arch v0.0.0-20190815191158-8a70ba74b3a1/go.mod h1:flIaEI6LNU6xOCD5P
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/mod v0.1.1-0.20191126161957-788aebd06792 h1:04Uqz7R2BD7irAGgQtrKNW5tLa50RgSW71y4ofoaivk=
-golang.org/x/mod v0.1.1-0.20191126161957-788aebd06792/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U=
+golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 013f7b3cfe..971a756b37 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -907,7 +907,7 @@
// Main bool // is this the main module?
// Indirect bool // is this module only an indirect dependency of main module?
// Dir string // directory holding files for this module, if any
-// GoMod string // path to go.mod file for this module, if any
+// GoMod string // path to go.mod file used when loading this module, if any
// GoVersion string // go version used in module
// Error *ModuleError // error loading module
// }
@@ -916,6 +916,9 @@
// Err string // the error itself
// }
//
+// The file GoMod refers to may be outside the module directory if the
+// module is in the module cache or if the -modfile flag is used.
+//
// The default output is to print the module path and then
// information about the version and replacement if any.
// For example, 'go list -m all' might print:
@@ -1020,7 +1023,9 @@
// execution. The "go mod download" command is useful mainly for pre-filling
// the local cache or to compute the answers for a Go module proxy.
//
-// By default, download reports errors to standard error but is otherwise silent.
+// By default, download writes nothing to standard output. It may print progress
+// messages and errors to standard error.
+//
// The -json flag causes download to print a sequence of JSON objects
// to standard output, describing each downloaded module (or failure),
// corresponding to this Go struct:
@@ -1075,12 +1080,17 @@
// add and drop an exclusion for the given module path and version.
// Note that -exclude=path@version is a no-op if that exclusion already exists.
//
-// The -replace=old[@v]=new[@v] and -dropreplace=old[@v] flags
-// add and drop a replacement of the given module path and version pair.
-// If the @v in old@v is omitted, the replacement applies to all versions
-// with the old module path. If the @v in new@v is omitted, the new path
-// should be a local module root directory, not a module path.
-// Note that -replace overrides any existing replacements for old[@v].
+// The -replace=old[@v]=new[@v] flag adds a replacement of the given
+// module path and version pair. If the @v in old@v is omitted, a
+// replacement without a version on the left side is added, which applies
+// to all versions of the old module path. If the @v in new@v is omitted,
+// the new path should be a local module root directory, not a module
+// path. Note that -replace overrides any redundant replacements for old[@v],
+// so omitting @v will drop existing replacements for specific versions.
+//
+// The -dropreplace=old[@v] flag drops a replacement of the given
+// module path and version pair. If the @v is omitted, a replacement without
+// a version on the left side is dropped.
//
// The -require, -droprequire, -exclude, -dropexclude, -replace,
// and -dropreplace editing flags may be repeated, and the changes
@@ -1721,8 +1731,10 @@
// GOHOSTOS
// The operating system (GOOS) of the Go toolchain binaries.
// GOMOD
-// The absolute path to the go.mod of the main module,
-// or the empty string if not using modules.
+// The absolute path to the go.mod of the main module.
+// If module-aware mode is enabled, but there is no go.mod, GOMOD will be
+// os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows).
+// If module-aware mode is disabled, GOMOD will be the empty string.
// GOTOOLDIR
// The directory where the go tools (compile, cover, doc, etc...) are installed.
//
@@ -2339,14 +2351,15 @@
//
// Module support
//
-// Go 1.13 includes support for Go modules. Module-aware mode is active by default
-// whenever a go.mod file is found in, or in a parent of, the current directory.
+// The go command includes support for Go modules. Module-aware mode is active
+// by default whenever a go.mod file is found in the current directory or in
+// any parent directory.
//
// The quickest way to take advantage of module support is to check out your
// repository, create a go.mod file (described in the next section) there, and run
// go commands from within that file tree.
//
-// For more fine-grained control, Go 1.13 continues to respect
+// For more fine-grained control, the go command continues to respect
// a temporary environment variable, GO111MODULE, which can be set to one
// of three string values: off, on, or auto (the default).
// If GO111MODULE=on, then the go command requires the use of modules,
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index e45e258846..8ca34d49ca 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -638,7 +638,7 @@ func (tg *testgoData) grepStderrNot(match, msg string) {
}
// grepBothNot looks for a regular expression in the test run's
-// standard output or stand error and fails, logging msg, if it is
+// standard output or standard error and fails, logging msg, if it is
// found.
func (tg *testgoData) grepBothNot(match, msg string) {
tg.t.Helper()
@@ -913,6 +913,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
// Copy the runtime packages into a temporary GOROOT
// so that we can change files.
@@ -1026,28 +1027,6 @@ func TestInternalPackagesOutsideGOROOTAreRespected(t *testing.T) {
tg.grepBoth(`testinternal2(\/|\\)p\.go\:3\:8\: use of internal package .*internal/w not allowed`, "wrote error message for testdata/testinternal2")
}
-func TestRunInternal(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- dir := filepath.Join(tg.pwd(), "testdata")
- tg.setenv("GOPATH", dir)
- tg.run("run", filepath.Join(dir, "src/run/good.go"))
- tg.runFail("run", filepath.Join(dir, "src/run/bad.go"))
- tg.grepStderr(`testdata(\/|\\)src(\/|\\)run(\/|\\)bad\.go\:3\:8\: use of internal package run/subdir/internal/private not allowed`, "unexpected error for run/bad.go")
-}
-
-func TestRunPkg(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- dir := filepath.Join(tg.pwd(), "testdata")
- tg.setenv("GOPATH", dir)
- tg.run("run", "hello")
- tg.grepStderr("hello, world", "did not find hello, world")
- tg.cd(filepath.Join(dir, "src/hello"))
- tg.run("run", ".")
- tg.grepStderr("hello, world", "did not find hello, world")
-}
-
func TestInternalPackageErrorsAreHandled(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -1062,56 +1041,6 @@ func TestInternalCache(t *testing.T) {
tg.grepStderr("internal", "did not fail to build p")
}
-func TestImportCommandMatch(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
- tg.run("build", "./testdata/importcom/works.go")
-}
-
-func TestImportCommentMismatch(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
- tg.runFail("build", "./testdata/importcom/wrongplace.go")
- tg.grepStderr(`wrongplace expects import "my/x"`, "go build did not mention incorrect import")
-}
-
-func TestImportCommentSyntaxError(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
- tg.runFail("build", "./testdata/importcom/bad.go")
- tg.grepStderr("cannot parse import comment", "go build did not mention syntax error")
-}
-
-func TestImportCommentConflict(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcom"))
- tg.runFail("build", "./testdata/importcom/conflict.go")
- tg.grepStderr("found import comments", "go build did not mention comment conflict")
-}
-
-func TestImportCycle(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.parallel()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcycle"))
- tg.runFail("build", "selfimport")
-
- count := tg.grepCountBoth("import cycle not allowed")
- if count == 0 {
- t.Fatal("go build did not mention cyclical import")
- }
- if count > 1 {
- t.Fatal("go build mentioned import cycle more than once")
- }
-
- // Don't hang forever.
- tg.run("list", "-e", "-json", "selfimport")
-}
-
// cmd/go: custom import path checking should not apply to Go packages without import comment.
func TestIssue10952(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
@@ -1217,24 +1146,6 @@ func TestAccidentalGitCheckout(t *testing.T) {
}
}
-func TestErrorMessageForSyntaxErrorInTestGoFileSaysFAIL(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("test", "syntaxerror")
- tg.grepStderr("x_test.go:", "did not diagnose error")
- tg.grepStdout("FAIL", "go test did not say FAIL")
-}
-
-func TestWildcardsDoNotLookInUselessDirectories(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("list", "...")
- tg.grepBoth("badpkg", "go list ... failure does not mention badpkg")
- tg.run("list", "m...")
-}
-
func TestRelativeImportsGoTest(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -1672,6 +1583,7 @@ func TestDefaultGOPATHGet(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
@@ -1696,6 +1608,7 @@ func TestDefaultGOPATHGet(t *testing.T) {
func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
@@ -1818,16 +1731,6 @@ func TestGoTestMutexprofileDashOControlsBinaryLocation(t *testing.T) {
tg.wantExecutable("myerrors.test"+exeSuffix, "go test -mutexprofile -o myerrors.test did not create myerrors.test")
}
-func TestGoBuildNonMain(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- // TODO: tg.parallel()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("build", "-buildmode=exe", "-o", "not_main"+exeSuffix, "not_main")
- tg.grepStderr("-buildmode=exe requires exactly one main package", "go build with -o and -buildmode=exe should on a non-main package should throw an error")
- tg.mustNotExist("not_main" + exeSuffix)
-}
-
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
@@ -2191,33 +2094,6 @@ func TestCoverageNoStatements(t *testing.T) {
tg.grepStdout("[no statements]", "expected [no statements] for pkg4")
}
-func TestCoverageImportMainLoop(t *testing.T) {
- skipIfGccgo(t, "gccgo has no cover tool")
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("test", "importmain/test")
- tg.grepStderr("not an importable package", "did not detect import main")
- tg.runFail("test", "-cover", "importmain/test")
- tg.grepStderr("not an importable package", "did not detect import main")
-}
-
-func TestCoveragePattern(t *testing.T) {
- skipIfGccgo(t, "gccgo has no cover tool")
- tooSlow(t)
- tg := testgo(t)
- defer tg.cleanup()
- tg.parallel()
- tg.makeTempdir()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
-
- // If coverpkg=sleepy... expands by package loading
- // (as opposed to pattern matching on deps)
- // then it will try to load sleepybad, which does not compile,
- // and the test command will fail.
- tg.run("test", "-coverprofile="+tg.path("cover.out"), "-coverpkg=sleepy...", "-run=^$", "sleepy1")
-}
-
func TestCoverageErrorLine(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
@@ -2290,20 +2166,6 @@ func TestCoverageDashC(t *testing.T) {
tg.wantExecutable(tg.path("coverdep"), "go -test -c -coverprofile did not create executable")
}
-func TestPluginNonMain(t *testing.T) {
- wd, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
-
- pkg := filepath.Join(wd, "testdata", "testdep", "p2")
-
- tg := testgo(t)
- defer tg.cleanup()
-
- tg.runFail("build", "-buildmode=plugin", pkg)
-}
-
func TestTestEmpty(t *testing.T) {
if !canRace {
t.Skip("no race detector")
@@ -2388,39 +2250,6 @@ func main() {
tg.grepStderrNot(`os.Stat .* no such file or directory`, "unexpected stat of archive file")
}
-func TestCoverageWithCgo(t *testing.T) {
- skipIfGccgo(t, "gccgo has no cover tool")
- tooSlow(t)
- if !canCgo {
- t.Skip("skipping because cgo not enabled")
- }
-
- for _, dir := range []string{"cgocover", "cgocover2", "cgocover3", "cgocover4"} {
- t.Run(dir, func(t *testing.T) {
- tg := testgo(t)
- tg.parallel()
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.run("test", "-short", "-cover", dir)
- data := tg.getStdout() + tg.getStderr()
- checkCoverage(tg, data)
- })
- }
-}
-
-func TestCgoAsmError(t *testing.T) {
- if !canCgo {
- t.Skip("skipping because cgo not enabled")
- }
-
- tg := testgo(t)
- tg.parallel()
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("build", "cgoasm")
- tg.grepBoth("package using cgo has Go assembly file", "did not detect Go assembly file")
-}
-
func TestCgoDependsOnSyscall(t *testing.T) {
if testing.Short() {
t.Skip("skipping test that removes $GOROOT/pkg/*_race in short mode")
@@ -2434,6 +2263,8 @@ func TestCgoDependsOnSyscall(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
+
files, err := filepath.Glob(filepath.Join(runtime.GOROOT(), "pkg", "*_race"))
tg.must(err)
for _, file := range files {
@@ -2647,14 +2478,6 @@ func TestListTemplateContextFunction(t *testing.T) {
}
}
-// cmd/go: "go test" should fail if package does not build
-func TestIssue7108(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("test", "notest")
-}
-
func TestGoBuildTestOnly(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -2676,17 +2499,6 @@ func TestGoBuildTestOnly(t *testing.T) {
tg.run("install", "./testonly...")
}
-func TestGoTestDetectsTestOnlyImportCycles(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("test", "-c", "testcycle/p3")
- tg.grepStderr("import cycle not allowed in test", "go test testcycle/p3 produced unexpected error")
-
- tg.runFail("test", "-c", "testcycle/q1")
- tg.grepStderr("import cycle not allowed in test", "go test testcycle/q1 produced unexpected error")
-}
-
func TestGoTestFooTestWorks(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -2714,29 +2526,6 @@ func TestGoTestMainAsNormalTest(t *testing.T) {
tg.grepBoth(okPattern, "go test did not say ok")
}
-func TestGoTestMainTwice(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping in short mode")
- }
- tg := testgo(t)
- defer tg.cleanup()
- tg.makeTempdir()
- tg.setenv("GOCACHE", tg.tempdir)
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.run("test", "-v", "multimain")
- if strings.Count(tg.getStdout(), "notwithstanding") != 2 {
- t.Fatal("tests did not run twice")
- }
-}
-
-func TestGoTestFlagsAfterPackage(t *testing.T) {
- tooSlow(t)
- tg := testgo(t)
- defer tg.cleanup()
- tg.run("test", "testdata/flag_test.go", "-v", "-args", "-v=7") // Two distinct -v flags.
- tg.run("test", "-v", "testdata/flag_test.go", "-args", "-v=7") // Two distinct -v flags.
-}
-
func TestGoTestXtestonlyWorks(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -2828,20 +2617,6 @@ func TestGoGenerateXTestPkgName(t *testing.T) {
}
}
-func TestGoGenerateBadImports(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("skipping because windows has no echo command")
- }
-
- // This package has an invalid import causing an import cycle,
- // but go generate is supposed to still run.
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.run("generate", "gencycle")
- tg.grepStdout("hello world", "go generate gencycle did not run generator")
-}
-
func TestGoGetCustomDomainWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
@@ -3267,43 +3042,6 @@ func TestGoTestRaceInstallCgo(t *testing.T) {
}
}
-func TestGoTestRaceFailures(t *testing.T) {
- tooSlow(t)
-
- if !canRace {
- t.Skip("skipping because race detector not supported")
- }
-
- tg := testgo(t)
- tg.parallel()
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
-
- tg.run("test", "testrace")
-
- tg.runFail("test", "-race", "testrace")
- tg.grepStdout("FAIL: TestRace", "TestRace did not fail")
- tg.grepBothNot("PASS", "something passed")
-
- tg.runFail("test", "-race", "testrace", "-run", "XXX", "-bench", ".")
- tg.grepStdout("FAIL: BenchmarkRace", "BenchmarkRace did not fail")
- tg.grepBothNot("PASS", "something passed")
-}
-
-func TestGoTestImportErrorStack(t *testing.T) {
- const out = `package testdep/p1 (test)
- imports testdep/p2
- imports testdep/p3: build constraints exclude all Go files `
-
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("test", "testdep/p1")
- if !strings.Contains(tg.stderr.String(), out) {
- t.Fatalf("did not give full import stack:\n\n%s", tg.stderr.String())
- }
-}
-
func TestGoGetUpdate(t *testing.T) {
// golang.org/issue/9224.
// The recursive updating was trying to walk to
@@ -3626,27 +3364,6 @@ func TestGoGetUpdateAllDoesNotTryToLoadDuplicates(t *testing.T) {
tg.grepStderrNot("duplicate loads of", "did not remove old packages from cache")
}
-// Issue 17119 more duplicate load errors
-func TestIssue17119(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
-
- tg := testgo(t)
- defer tg.cleanup()
- tg.parallel()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("build", "dupload")
- tg.grepBothNot("duplicate load|internal error", "internal error")
-}
-
-func TestFatalInBenchmarkCauseNonZeroExitStatus(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- // TODO: tg.parallel()
- tg.runFail("test", "-run", "^$", "-bench", ".", "./testdata/src/benchfatal")
- tg.grepBothNot("^ok", "test passed unexpectedly")
- tg.grepBoth("FAIL.*benchfatal", "test did not run everything")
-}
-
func TestBinaryOnlyPackages(t *testing.T) {
tooSlow(t)
@@ -3812,16 +3529,6 @@ func TestMatchesNoTests(t *testing.T) {
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
-func TestMatchesNoTestsDoesNotOverrideBuildFailure(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.parallel()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("test", "-run", "ThisWillNotMatch", "syntaxerror")
- tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
- tg.grepBoth("FAIL", "go test did not say FAIL")
-}
-
func TestMatchesNoBenchmarksIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -3849,18 +3556,6 @@ func TestMatchesOnlyBenchmarkIsOK(t *testing.T) {
tg.grepBoth(okPattern, "go test did not say ok")
}
-func TestBenchmarkLabels(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- // TODO: tg.parallel()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.run("test", "-run", "^$", "-bench", ".", "bench")
- tg.grepStdout(`(?m)^goos: `+runtime.GOOS, "go test did not print goos")
- tg.grepStdout(`(?m)^goarch: `+runtime.GOARCH, "go test did not print goarch")
- tg.grepStdout(`(?m)^pkg: bench`, "go test did not say pkg: bench")
- tg.grepBothNot(`(?s)pkg:.*pkg:`, "go test said pkg multiple times")
-}
-
func TestBenchmarkLabelsOutsideGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
@@ -4260,25 +3955,6 @@ func TestCgoFlagContainsSpace(t *testing.T) {
tg.grepStderrNot(`"-L[^"]+c flags".*"-L[^"]+c flags"`, "found too many quoted ld flags")
}
-// Issue #20435.
-func TestGoTestRaceCoverModeFailures(t *testing.T) {
- tooSlow(t)
- if !canRace {
- t.Skip("skipping because race detector not supported")
- }
-
- tg := testgo(t)
- tg.parallel()
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
-
- tg.run("test", "testrace")
-
- tg.runFail("test", "-race", "-covermode=set", "testrace")
- tg.grepStderr(`-covermode must be "atomic", not "set", when -race is enabled`, "-race -covermode=set was allowed")
- tg.grepBothNot("PASS", "something passed")
-}
-
// Issue 9737: verify that GOARM and GO386 affect the computed build ID.
func TestBuildIDContainsArchModeEnv(t *testing.T) {
if testing.Short() {
@@ -4318,60 +3994,6 @@ func main() {}`)
}))
}
-func TestTestRegexps(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.run("test", "-cpu=1", "-run=X/Y", "-bench=X/Y", "-count=2", "-v", "testregexp")
- var lines []string
- for _, line := range strings.SplitAfter(tg.getStdout(), "\n") {
- if strings.Contains(line, "=== RUN") || strings.Contains(line, "--- BENCH") || strings.Contains(line, "LOG") {
- lines = append(lines, line)
- }
- }
-
- // Important parts:
- // TestX is run, twice
- // TestX/Y is run, twice
- // TestXX is run, twice
- // TestZ is not run
- // BenchmarkX is run but only with N=1, once
- // BenchmarkXX is run but only with N=1, once
- // BenchmarkX/Y is run in full, twice
- want := `=== RUN TestX
- TestX: x_test.go:6: LOG: X running
-=== RUN TestX/Y
- TestX/Y: x_test.go:8: LOG: Y running
-=== RUN TestXX
- TestXX: z_test.go:10: LOG: XX running
-=== RUN TestX
- TestX: x_test.go:6: LOG: X running
-=== RUN TestX/Y
- TestX/Y: x_test.go:8: LOG: Y running
-=== RUN TestXX
- TestXX: z_test.go:10: LOG: XX running
- BenchmarkX: x_test.go:13: LOG: X running N=1
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=1
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=100
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=10000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=1000000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=100000000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=1000000000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=1
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=100
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=10000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=1000000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=100000000
- BenchmarkX/Y: x_test.go:15: LOG: Y running N=1000000000
- BenchmarkXX: z_test.go:18: LOG: XX running N=1
-`
-
- have := strings.Join(lines, "")
- if have != want {
- t.Errorf("reduced output:<<<\n%s>>> want:<<<\n%s>>>", have, want)
- }
-}
-
func TestListTests(t *testing.T) {
tooSlow(t)
var tg *testgoData
@@ -4407,6 +4029,7 @@ func TestBuildmodePIE(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
tg.tempFile("main.go", `package main; func main() { print("hello") }`)
src := tg.path("main.go")
@@ -4570,6 +4193,7 @@ func TestUpxCompression(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`)
src := tg.path("main.go")
@@ -4963,14 +4587,6 @@ func TestInstallDeps(t *testing.T) {
tg.mustExist(p1)
}
-func TestGoTestMinusN(t *testing.T) {
- // Intent here is to verify that 'go test -n' works without crashing.
- // This reuses flag_test.go, but really any test would do.
- tg := testgo(t)
- defer tg.cleanup()
- tg.run("test", "testdata/flag_test.go", "-n", "-args", "-v=7")
-}
-
func TestGoTestJSON(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tooSlow(t)
@@ -5108,6 +4724,7 @@ func init() {}
func TestBadCommandLines(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
@@ -5328,6 +4945,7 @@ func TestCgoCache(t *testing.T) {
func TestFilepathUnderCwdFormat(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
+ tg.parallel()
tg.run("test", "-x", "-cover", "log")
tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd")
}
@@ -5432,16 +5050,6 @@ func TestCDAndGOPATHAreDifferent(t *testing.T) {
}
}
-// Issue 26242.
-func TestGoTestWithoutTests(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.parallel()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.run("test", "testnorun")
- tg.grepStdout(`testnorun\t\[no test files\]`, "do not want test to run")
-}
-
// Issue 25579.
func TestGoBuildDashODevNull(t *testing.T) {
tooSlow(t)
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
index 5f4bf4e6c8..69e17482b4 100644
--- a/src/cmd/go/internal/clean/clean.go
+++ b/src/cmd/go/internal/clean/clean.go
@@ -178,7 +178,9 @@ func runClean(cmd *base.Command, args []string) {
}
}
if err != nil {
- base.Errorf("go clean -testcache: %v", err)
+ if _, statErr := os.Stat(dir); !os.IsNotExist(statErr) {
+ base.Errorf("go clean -testcache: %v", err)
+ }
}
}
}
diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go
index d73d1146f0..2e4d6388cf 100644
--- a/src/cmd/go/internal/get/vcs.go
+++ b/src/cmd/go/internal/get/vcs.go
@@ -21,6 +21,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
+ "cmd/go/internal/load"
"cmd/go/internal/web"
)
@@ -661,7 +662,7 @@ func RepoRootForImportPath(importPath string, mod ModuleMode, security web.Secur
if err == errUnknownSite {
rr, err = repoRootForImportDynamic(importPath, mod, security)
if err != nil {
- err = fmt.Errorf("unrecognized import path %q: %v", importPath, err)
+ err = load.ImportErrorf(importPath, "unrecognized import path %q: %v", importPath, err)
}
}
if err != nil {
@@ -676,7 +677,7 @@ func RepoRootForImportPath(importPath string, mod ModuleMode, security web.Secur
if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") {
// Do not allow wildcards in the repo root.
rr = nil
- err = fmt.Errorf("cannot expand ... in %q", importPath)
+ err = load.ImportErrorf(importPath, "cannot expand ... in %q", importPath)
}
return rr, err
}
@@ -700,7 +701,7 @@ func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths
m := srv.regexp.FindStringSubmatch(importPath)
if m == nil {
if srv.prefix != "" {
- return nil, fmt.Errorf("invalid %s import path %q", srv.prefix, importPath)
+ return nil, load.ImportErrorf(importPath, "invalid %s import path %q", srv.prefix, importPath)
}
continue
}
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index ac16312aaf..6a843f459a 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -621,8 +621,10 @@ Additional information available from 'go env' but not read from the environment
GOHOSTOS
The operating system (GOOS) of the Go toolchain binaries.
GOMOD
- The absolute path to the go.mod of the main module,
- or the empty string if not using modules.
+ The absolute path to the go.mod of the main module.
+ If module-aware mode is enabled, but there is no go.mod, GOMOD will be
+ os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows).
+ If module-aware mode is disabled, GOMOD will be the empty string.
GOTOOLDIR
The directory where the go tools (compile, cover, doc, etc...) are installed.
`,
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index b393c67ddb..8d979e276f 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -211,7 +211,7 @@ applied to a Go struct, but now a Module struct:
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
- GoMod string // path to go.mod file for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
GoVersion string // go version used in module
Error *ModuleError // error loading module
}
@@ -220,6 +220,9 @@ applied to a Go struct, but now a Module struct:
Err string // the error itself
}
+The file GoMod refers to may be outside the module directory if the
+module is in the module cache or if the -modfile flag is used.
+
The default output is to print the module path and then
information about the version and replacement if any.
For example, 'go list -m all' might print:
@@ -387,15 +390,24 @@ func runList(cmd *base.Command, args []string) {
modload.InitMod() // Parses go.mod and sets cfg.BuildMod.
if cfg.BuildMod == "vendor" {
+ const actionDisabledFormat = "go list -m: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)"
+
+ if *listVersions {
+ base.Fatalf(actionDisabledFormat, "determine available versions")
+ }
+ if *listU {
+ base.Fatalf(actionDisabledFormat, "determine available upgrades")
+ }
+
for _, arg := range args {
// In vendor mode, the module graph is incomplete: it contains only the
// explicit module dependencies and the modules that supply packages in
// the import graph. Reject queries that imply more information than that.
if arg == "all" {
- base.Fatalf("go list -m: can't compute 'all' using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")
+ base.Fatalf(actionDisabledFormat, "compute 'all'")
}
if strings.Contains(arg, "...") {
- base.Fatalf("go list -m: can't match module patterns using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")
+ base.Fatalf(actionDisabledFormat, "match module patterns")
}
}
}
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index 0d63187e06..369a79b716 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"go/build"
+ "go/scanner"
"go/token"
"io/ioutil"
"os"
@@ -1519,17 +1520,30 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
p.Internal.LocalPrefix = dirToImportPath(p.Dir)
}
+ // setError sets p.Error if it hasn't already been set. We may proceed
+ // after encountering some errors so that 'go list -e' has more complete
+ // output. If there's more than one error, we should report the first.
+ setError := func(err error) {
+ if p.Error == nil {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: err,
+ }
+ }
+ }
+
if err != nil {
if _, ok := err.(*build.NoGoError); ok {
err = &NoGoError{Package: p}
}
p.Incomplete = true
- err = base.ExpandScanner(err)
- p.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: err,
+
+ setError(base.ExpandScanner(err))
+ if _, isScanErr := err.(scanner.ErrorList); !isScanErr {
+ return
}
- return
+ // Fall through if there was an error parsing a file. 'go list -e' should
+ // still report imports and other metadata.
}
useBindir := p.Name == "main"
@@ -1545,7 +1559,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
if InstallTargetDir(p) == StalePath {
newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1)
e := ImportErrorf(p.ImportPath, "the %v command has moved; use %v instead.", p.ImportPath, newPath)
- p.Error = &PackageError{Err: e}
+ setError(e)
return
}
elem := p.DefaultExecName()
@@ -1658,10 +1672,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
inputs := p.AllFiles()
f1, f2 := str.FoldDup(inputs)
if f1 != "" {
- p.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2),
- }
+ setError(fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2))
return
}
@@ -1674,25 +1685,16 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
// so we shouldn't see any _cgo_ files anyway, but just be safe.
for _, file := range inputs {
if !SafeArg(file) || strings.HasPrefix(file, "_cgo_") {
- p.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: fmt.Errorf("invalid input file name %q", file),
- }
+ setError(fmt.Errorf("invalid input file name %q", file))
return
}
}
if name := pathpkg.Base(p.ImportPath); !SafeArg(name) {
- p.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: fmt.Errorf("invalid input directory name %q", name),
- }
+ setError(fmt.Errorf("invalid input directory name %q", name))
return
}
if !SafeArg(p.ImportPath) {
- p.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath),
- }
+ setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath))
return
}
@@ -1737,13 +1739,6 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
// code; see issue #16050).
}
- setError := func(err error) {
- p.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: err,
- }
- }
-
// The gc toolchain only permits C source files with cgo or SWIG.
if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" {
setError(fmt.Errorf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " ")))
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
index 5db0e46c64..7d5294dcd0 100644
--- a/src/cmd/go/internal/modcmd/download.go
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -30,7 +30,9 @@ The go command will automatically download modules as needed during ordinary
execution. The "go mod download" command is useful mainly for pre-filling
the local cache or to compute the answers for a Go module proxy.
-By default, download reports errors to standard error but is otherwise silent.
+By default, download writes nothing to standard output. It may print progress
+messages and errors to standard error.
+
The -json flag causes download to print a sequence of JSON objects
to standard output, describing each downloaded module (or failure),
corresponding to this Go struct:
diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go
index ae8966bab1..dbbfb96e42 100644
--- a/src/cmd/go/internal/modcmd/edit.go
+++ b/src/cmd/go/internal/modcmd/edit.go
@@ -55,12 +55,17 @@ The -exclude=path@version and -dropexclude=path@version flags
add and drop an exclusion for the given module path and version.
Note that -exclude=path@version is a no-op if that exclusion already exists.
-The -replace=old[@v]=new[@v] and -dropreplace=old[@v] flags
-add and drop a replacement of the given module path and version pair.
-If the @v in old@v is omitted, the replacement applies to all versions
-with the old module path. If the @v in new@v is omitted, the new path
-should be a local module root directory, not a module path.
-Note that -replace overrides any existing replacements for old[@v].
+The -replace=old[@v]=new[@v] flag adds a replacement of the given
+module path and version pair. If the @v in old@v is omitted, a
+replacement without a version on the left side is added, which applies
+to all versions of the old module path. If the @v in new@v is omitted,
+the new path should be a local module root directory, not a module
+path. Note that -replace overrides any redundant replacements for old[@v],
+so omitting @v will drop existing replacements for specific versions.
+
+The -dropreplace=old[@v] flag drops a replacement of the given
+module path and version pair. If the @v is omitted, a replacement without
+a version on the left side is dropped.
The -require, -droprequire, -exclude, -dropexclude, -replace,
and -dropreplace editing flags may be repeated, and the changes
@@ -164,7 +169,7 @@ func runEdit(cmd *base.Command, args []string) {
}
if *editModule != "" {
- if err := module.CheckPath(*editModule); err != nil {
+ if err := module.CheckImportPath(*editModule); err != nil {
base.Fatalf("go mod: invalid -module: %v", err)
}
}
@@ -242,7 +247,7 @@ func parsePathVersion(flag, arg string) (path, version string) {
base.Fatalf("go mod: -%s=%s: need path@version", flag, arg)
}
path, version = strings.TrimSpace(arg[:i]), strings.TrimSpace(arg[i+1:])
- if err := module.CheckPath(path); err != nil {
+ if err := module.CheckImportPath(path); err != nil {
base.Fatalf("go mod: -%s=%s: invalid path: %v", flag, arg, err)
}
@@ -264,7 +269,7 @@ func parsePath(flag, arg string) (path string) {
base.Fatalf("go mod: -%s=%s: need just path, not path@version", flag, arg)
}
path = arg
- if err := module.CheckPath(path); err != nil {
+ if err := module.CheckImportPath(path); err != nil {
base.Fatalf("go mod: -%s=%s: invalid path: %v", flag, arg, err)
}
return path
@@ -278,7 +283,7 @@ func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version
} else {
path, version = strings.TrimSpace(arg[:i]), strings.TrimSpace(arg[i+1:])
}
- if err := module.CheckPath(path); err != nil {
+ if err := module.CheckImportPath(path); err != nil {
if !allowDirPath || !modfile.IsDirectoryPath(path) {
return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
}
diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go
index 104fce86dd..947192bd83 100644
--- a/src/cmd/go/internal/modfetch/cache.go
+++ b/src/cmd/go/internal/modfetch/cache.go
@@ -13,7 +13,6 @@ import (
"os"
"path/filepath"
"strings"
- "time"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
@@ -28,8 +27,6 @@ import (
var PkgMod string // $GOPATH/pkg/mod; set by package modload
-const logFindingDelay = 1 * time.Second
-
func cacheDir(path string) (string, error) {
if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
@@ -140,11 +137,6 @@ func (r *cachingRepo) Versions(prefix string) ([]string, error) {
err error
}
c := r.cache.Do("versions:"+prefix, func() interface{} {
- logTimer := time.AfterFunc(logFindingDelay, func() {
- fmt.Fprintf(os.Stderr, "go: finding versions for %s\n", r.path)
- })
- defer logTimer.Stop()
-
list, err := r.r.Versions(prefix)
return cached{list, err}
}).(cached)
@@ -167,11 +159,6 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
return cachedInfo{info, nil}
}
- logTimer := time.AfterFunc(logFindingDelay, func() {
- fmt.Fprintf(os.Stderr, "go: finding %s %s\n", r.path, rev)
- })
- defer logTimer.Stop()
-
info, err = r.r.Stat(rev)
if err == nil {
// If we resolved, say, 1234abcde to v0.0.0-20180604122334-1234abcdef78,
@@ -199,11 +186,6 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
func (r *cachingRepo) Latest() (*RevInfo, error) {
c := r.cache.Do("latest:", func() interface{} {
- logTimer := time.AfterFunc(logFindingDelay, func() {
- fmt.Fprintf(os.Stderr, "go: finding %s latest\n", r.path)
- })
- defer logTimer.Stop()
-
info, err := r.r.Latest()
// Save info for likely future Stat call.
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index e329cbc58e..f08df512f0 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -682,8 +682,11 @@ func (r *gitRepo) RecentTag(rev, prefix, major string) (tag string, err error) {
semtag := line[len(prefix):]
// Consider only tags that are valid and complete (not just major.minor prefixes).
- if c := semver.Canonical(semtag); c != "" && strings.HasPrefix(semtag, c) && (major == "" || semver.Major(c) == major) {
- highest = semver.Max(highest, semtag)
+ // NOTE: Do not replace the call to semver.Compare with semver.Max.
+ // We want to return the actual tag, not a canonicalized version of it,
+ // and semver.Max currently canonicalizes (see golang.org/issue/32700).
+ if c := semver.Canonical(semtag); c != "" && strings.HasPrefix(semtag, c) && (major == "" || semver.Major(c) == major) && semver.Compare(semtag, highest) > 0 {
+ highest = semtag
}
}
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index 849e8c7ca1..d1d24a40c9 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -191,22 +191,6 @@ func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]st
return list, nil
}
- // We assume that if the latest release of any major version has a go.mod
- // file, all subsequent major versions will also have go.mod files (and thus
- // be ineligible for use as +incompatible versions).
- // If we're wrong about a major version, users will still be able to 'go get'
- // specific higher versions explicitly — they just won't affect 'latest' or
- // appear in 'go list'.
- //
- // Conversely, we assume that if the latest release of any major version lacks
- // a go.mod file, all versions also lack go.mod files. If we're wrong, we may
- // include a +incompatible version that isn't really valid, but most
- // operations won't try to use that version anyway.
- //
- // These optimizations bring
- // 'go list -versions -m github.com/openshift/origin' down from 1m58s to 0m37s.
- // That's still not great, but a substantial improvement.
-
versionHasGoMod := func(v string) (bool, error) {
_, err := r.code.ReadFile(v, "go.mod", codehost.MaxGoMod)
if err == nil {
@@ -241,32 +225,41 @@ func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]st
}
}
- var lastMajor string
+ var (
+ lastMajor string
+ lastMajorHasGoMod bool
+ )
for i, v := range incompatible {
major := semver.Major(v)
- if major == lastMajor {
- list = append(list, v+"+incompatible")
- continue
- }
- rem := incompatible[i:]
- j := sort.Search(len(rem), func(j int) bool {
- return semver.Major(rem[j]) != major
- })
- latestAtMajor := rem[j-1]
+ if major != lastMajor {
+ rem := incompatible[i:]
+ j := sort.Search(len(rem), func(j int) bool {
+ return semver.Major(rem[j]) != major
+ })
+ latestAtMajor := rem[j-1]
- ok, err := versionHasGoMod(latestAtMajor)
- if err != nil {
- return nil, err
- }
- if ok {
- // This major version has a go.mod file, so it is not allowed as
- // +incompatible. Subsequent major versions are likely to also have
- // go.mod files, so stop here.
- break
+ var err error
+ lastMajor = major
+ lastMajorHasGoMod, err = versionHasGoMod(latestAtMajor)
+ if err != nil {
+ return nil, err
+ }
}
- lastMajor = major
+ if lastMajorHasGoMod {
+ // The latest release of this major version has a go.mod file, so it is
+ // not allowed as +incompatible. It would be confusing to include some
+ // minor versions of this major version as +incompatible but require
+ // semantic import versioning for others, so drop all +incompatible
+ // versions for this major version.
+ //
+ // If we're wrong about a minor version in the middle, users will still be
+ // able to 'go get' specific tags for that version explicitly — they just
+ // won't appear in 'go list' or as the results for queries with inequality
+ // bounds.
+ continue
+ }
list = append(list, v+"+incompatible")
}
@@ -359,7 +352,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
Path: r.modPath,
Err: &module.InvalidVersionError{
Version: info2.Version,
- Err: notExistError(err.Error()),
+ Err: notExistError{err: err},
},
}
}
@@ -708,7 +701,7 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file1, rev, err1)
}
mpath1 := modfile.ModulePath(gomod1)
- found1 := err1 == nil && isMajor(mpath1, r.pathMajor)
+ found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1))
var file2 string
if r.pathMajor != "" && r.codeRoot != r.modPath && !strings.HasPrefix(r.pathMajor, ".") {
@@ -817,6 +810,17 @@ func isMajor(mpath, pathMajor string) bool {
return pathMajor[1:] == mpathMajor[1:]
}
+// canReplaceMismatchedVersionDueToBug reports whether versions of r
+// could replace versions of mpath with otherwise-mismatched major versions
+// due to a historical bug in the Go command (golang.org/issue/34254).
+func (r *codeRepo) canReplaceMismatchedVersionDueToBug(mpath string) bool {
+ // The bug caused us to erroneously accept unversioned paths as replacements
+ // for versioned gopkg.in paths.
+ unversioned := r.pathMajor == ""
+ replacingGopkgIn := strings.HasPrefix(mpath, "gopkg.in/")
+ return unversioned && replacingGopkgIn
+}
+
func (r *codeRepo) GoMod(version string) (data []byte, err error) {
if version != module.CanonicalVersion(version) {
return nil, fmt.Errorf("version %s is not canonical", version)
diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go
index 4273da0317..f03bdd8d03 100644
--- a/src/cmd/go/internal/modfetch/repo.go
+++ b/src/cmd/go/internal/modfetch/repo.go
@@ -250,9 +250,9 @@ func (lookupDisabledError) Error() string {
var errLookupDisabled error = lookupDisabledError{}
var (
- errProxyOff = notExistError("module lookup disabled by GOPROXY=off")
- errNoproxy error = notExistError("disabled by GOPRIVATE/GONOPROXY")
- errUseProxy error = notExistError("path does not match GOPRIVATE/GONOPROXY")
+ errProxyOff = notExistErrorf("module lookup disabled by GOPROXY=off")
+ errNoproxy error = notExistErrorf("disabled by GOPRIVATE/GONOPROXY")
+ errUseProxy error = notExistErrorf("path does not match GOPRIVATE/GONOPROXY")
)
func lookupDirect(path string) (Repo, error) {
@@ -264,7 +264,7 @@ func lookupDirect(path string) (Repo, error) {
rr, err := get.RepoRootForImportPath(path, get.PreferMod, security)
if err != nil {
// We don't know where to find code for a module with this path.
- return nil, notExistError(err.Error())
+ return nil, notExistError{err: err}
}
if rr.VCS == "mod" {
@@ -408,11 +408,22 @@ func (l *loggingRepo) Zip(dst io.Writer, version string) error {
}
// A notExistError is like os.ErrNotExist, but with a custom message
-type notExistError string
+type notExistError struct {
+ err error
+}
+
+func notExistErrorf(format string, args ...interface{}) error {
+ return notExistError{fmt.Errorf(format, args...)}
+}
func (e notExistError) Error() string {
- return string(e)
+ return e.err.Error()
}
+
func (notExistError) Is(target error) bool {
return target == os.ErrNotExist
}
+
+func (e notExistError) Unwrap() error {
+ return e.err
+}
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index 5a281a9304..6fa47d7400 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -112,7 +112,7 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic {
}
if HasModRoot() {
info.Dir = ModRoot()
- info.GoMod = filepath.Join(info.Dir, "go.mod")
+ info.GoMod = ModFilePath()
if modFile.Go != nil {
info.GoVersion = modFile.Go.Version
}
diff --git a/src/cmd/go/internal/modload/help.go b/src/cmd/go/internal/modload/help.go
index b47f3dedb3..66c1f70db7 100644
--- a/src/cmd/go/internal/modload/help.go
+++ b/src/cmd/go/internal/modload/help.go
@@ -21,14 +21,15 @@ which source files are used in a given build.
Module support
-Go 1.13 includes support for Go modules. Module-aware mode is active by default
-whenever a go.mod file is found in, or in a parent of, the current directory.
+The go command includes support for Go modules. Module-aware mode is active
+by default whenever a go.mod file is found in the current directory or in
+any parent directory.
The quickest way to take advantage of module support is to check out your
repository, create a go.mod file (described in the next section) there, and run
go commands from within that file tree.
-For more fine-grained control, Go 1.13 continues to respect
+For more fine-grained control, the go command continues to respect
a temporary environment variable, GO111MODULE, which can be set to one
of three string values: off, on, or auto (the default).
If GO111MODULE=on, then the go command requires the use of modules,
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index a01ef62d55..5906d648b4 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -20,7 +20,6 @@ import (
"cmd/go/internal/modfetch"
"cmd/go/internal/par"
"cmd/go/internal/search"
- "cmd/go/internal/str"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
@@ -40,7 +39,7 @@ var _ load.ImportPathError = (*ImportMissingError)(nil)
func (e *ImportMissingError) Error() string {
if e.Module.Path == "" {
- if str.HasPathPrefix(e.Path, "cmd") {
+ if search.IsStandardImportPath(e.Path) {
return fmt.Sprintf("package %s is not in GOROOT (%s)", e.Path, filepath.Join(cfg.GOROOT, "src", e.Path))
}
if e.QueryErr != nil {
@@ -118,8 +117,8 @@ func Import(path string) (m module.Version, dir string, err error) {
}
// Is the package in the standard library?
- if search.IsStandardImportPath(path) &&
- goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ pathIsStd := search.IsStandardImportPath(path)
+ if pathIsStd && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
if targetInGorootSrc {
if dir, ok := dirInModule(path, targetPrefix, ModRoot(), true); ok {
return Target, dir, nil
@@ -128,9 +127,6 @@ func Import(path string) (m module.Version, dir string, err error) {
dir := filepath.Join(cfg.GOROOT, "src", path)
return module.Version{}, dir, nil
}
- if str.HasPathPrefix(path, "cmd") {
- return module.Version{}, "", &ImportMissingError{Path: path}
- }
// -mod=vendor is special.
// Everything must be in the main module or the main module's vendor directory.
@@ -184,7 +180,14 @@ func Import(path string) (m module.Version, dir string, err error) {
// Look up module containing the package, for addition to the build list.
// Goal is to determine the module, download it to dir, and return m, dir, ErrMissing.
if cfg.BuildMod == "readonly" {
- return module.Version{}, "", fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod)
+ var queryErr error
+ if !pathIsStd {
+ if cfg.BuildModReason == "" {
+ queryErr = fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod)
+ }
+ queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
+ }
+ return module.Version{}, "", &ImportMissingError{Path: path, QueryErr: queryErr}
}
if modRoot == "" && !allowMissingModuleImports {
return module.Version{}, "", &ImportMissingError{
@@ -200,7 +203,12 @@ func Import(path string) (m module.Version, dir string, err error) {
latest := map[string]string{} // path -> version
for _, r := range modFile.Replace {
if maybeInModule(path, r.Old.Path) {
- latest[r.Old.Path] = semver.Max(r.Old.Version, latest[r.Old.Path])
+ // Don't use semver.Max here; need to preserve +incompatible suffix.
+ v := latest[r.Old.Path]
+ if semver.Compare(r.Old.Version, v) > 0 {
+ v = r.Old.Version
+ }
+ latest[r.Old.Path] = v
}
}
@@ -250,6 +258,19 @@ func Import(path string) (m module.Version, dir string, err error) {
}
}
+ if pathIsStd {
+ // This package isn't in the standard library, isn't in any module already
+ // in the build list, and isn't in any other module that the user has
+ // shimmed in via a "replace" directive.
+ // Moreover, the import path is reserved for the standard library, so
+ // QueryPackage cannot possibly find a module containing this package.
+ //
+ // Instead of trying QueryPackage, report an ImportMissingError immediately.
+ return module.Version{}, "", &ImportMissingError{Path: path}
+ }
+
+ fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path)
+
candidates, err := QueryPackage(path, "latest", Allowed)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index 2df7bd04b7..58e2141f65 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -1320,6 +1320,21 @@ func fetch(mod module.Version) (dir string, isLocal bool, err error) {
if !filepath.IsAbs(dir) {
dir = filepath.Join(ModRoot(), dir)
}
+ // Ensure that the replacement directory actually exists:
+ // dirInModule does not report errors for missing modules,
+ // so if we don't report the error now, later failures will be
+ // very mysterious.
+ if _, err := os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ // Semantically the module version itself “exists” — we just don't
+ // have its source code. Remove the equivalence to os.ErrNotExist,
+ // and make the message more concise while we're at it.
+ err = fmt.Errorf("replacement directory %s does not exist", r.Path)
+ } else {
+ err = fmt.Errorf("replacement directory %s: %w", r.Path, err)
+ }
+ return dir, true, module.VersionError(mod, err)
+ }
return dir, true, nil
}
mod = r
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index 53278b9100..031e45938a 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -79,7 +79,7 @@ func queryProxy(proxy, path, query, current string, allowed func(module.Version)
if current != "" && !semver.IsValid(current) {
return nil, fmt.Errorf("invalid previous version %q", current)
}
- if cfg.BuildMod != "" && cfg.BuildMod != "mod" {
+ if cfg.BuildMod == "vendor" {
return nil, errQueryDisabled
}
if allowed == nil {
diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go
index 9c91c05e5f..15470e2685 100644
--- a/src/cmd/go/internal/modload/query_test.go
+++ b/src/cmd/go/internal/modload/query_test.go
@@ -64,7 +64,7 @@ var queryTests = []struct {
git add go.mod
git commit -m v1 go.mod
git tag start
- for i in v0.0.0-pre1 v0.0.0 v0.0.1 v0.0.2 v0.0.3 v0.1.0 v0.1.1 v0.1.2 v0.3.0 v1.0.0 v1.1.0 v1.9.0 v1.9.9 v1.9.10-pre1 v1.9.10-pre2+metadata; do
+ for i in v0.0.0-pre1 v0.0.0 v0.0.1 v0.0.2 v0.0.3 v0.1.0 v0.1.1 v0.1.2 v0.3.0 v1.0.0 v1.1.0 v1.9.0 v1.9.9 v1.9.10-pre1 v1.9.10-pre2+metadata unversioned; do
echo before $i >status
git add status
git commit -m "before $i" status
@@ -107,6 +107,7 @@ var queryTests = []struct {
{path: queryRepo, query: "v0.2", err: `no matching versions for query "v0.2"`},
{path: queryRepo, query: "v0.0", vers: "v0.0.3"},
{path: queryRepo, query: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "ed5ffdaa", vers: "v1.9.10-pre2.0.20191220134614-ed5ffdaa1f5e"},
// golang.org/issue/29262: The major version for for a module without a suffix
// should be based on the most recent tag (v1 as appropriate, not v0
@@ -162,10 +163,14 @@ var queryTests = []struct {
{path: queryRepoV2, query: "v2.6.0-pre1", vers: "v2.6.0-pre1"},
{path: queryRepoV2, query: "latest", vers: "v2.5.5"},
- // e0cf3de987e6 is the latest commit on the master branch, and it's actually
- // v1.19.10-pre1, not anything resembling v3: attempting to query it as such
- // should fail.
+ // Commit e0cf3de987e6 is actually v1.19.10-pre1, not anything resembling v3,
+ // and it has a go.mod file with a non-v3 module path. Attempting to query it
+ // as the v3 module should fail.
{path: queryRepoV3, query: "e0cf3de987e6", err: `vcs-test.golang.org/git/querytest.git/v3@v3.0.0-20180704024501-e0cf3de987e6: invalid version: go.mod has non-.../v3 module path "vcs-test.golang.org/git/querytest.git" (and .../v3/go.mod does not exist) at revision e0cf3de987e6`},
+
+ // The querytest repo does not have any commits tagged with major version 3,
+ // and the latest commit in the repo has a go.mod file specifying a non-v3 path.
+ // That should prevent us from resolving any version for the /v3 path.
{path: queryRepoV3, query: "latest", err: `no matching versions for query "latest"`},
{path: emptyRepo, query: "latest", vers: "v0.0.0-20180704023549-7bb914627242"},
diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go
index 79dc5eb2a0..e214b1532b 100644
--- a/src/cmd/go/internal/test/testflag.go
+++ b/src/cmd/go/internal/test/testflag.go
@@ -88,7 +88,8 @@ func init() {
// go test fmt -custom-flag-for-fmt-test
// go test -x math
func testFlags(usage func(), args []string) (packageNames, passToTest []string) {
- args = str.StringList(cmdflag.FindGOFLAGS(testFlagDefn), args)
+ goflags := cmdflag.FindGOFLAGS(testFlagDefn)
+ args = str.StringList(goflags, args)
inPkg := false
var explicitArgs []string
for i := 0; i < len(args); i++ {
@@ -127,6 +128,9 @@ func testFlags(usage func(), args []string) (packageNames, passToTest []string)
passToTest = append(passToTest, args[i])
continue
}
+ if i < len(goflags) {
+ f.Present = false // Not actually present on the command line.
+ }
if f.Value != nil {
if err := f.Value.Set(value); err != nil {
base.Fatalf("invalid flag argument for -%s: %v", f.Name, err)
diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go
index 327b761c3c..660a739fbb 100644
--- a/src/cmd/go/internal/vet/vet.go
+++ b/src/cmd/go/internal/vet/vet.go
@@ -51,6 +51,7 @@ func runVet(cmd *base.Command, args []string) {
work.BuildInit()
work.VetFlags = vetFlags
+ work.VetExplicit = true
if vetTool != "" {
var err error
work.VetTool, err = filepath.Abs(vetTool)
diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go
index 7179f73cfc..e3de48bbff 100644
--- a/src/cmd/go/internal/vet/vetflag.go
+++ b/src/cmd/go/internal/vet/vetflag.go
@@ -126,7 +126,8 @@ func vetFlags(usage func(), args []string) (passToVet, packageNames []string) {
})
// Process args.
- args = str.StringList(cmdflag.FindGOFLAGS(vetFlagDefn), args)
+ goflags := cmdflag.FindGOFLAGS(vetFlagDefn)
+ args = str.StringList(goflags, args)
for i := 0; i < len(args); i++ {
if !strings.HasPrefix(args[i], "-") {
return args[:i], args[i:]
@@ -139,6 +140,9 @@ func vetFlags(usage func(), args []string) (passToVet, packageNames []string) {
base.SetExitStatus(2)
base.Exit()
}
+ if i < len(goflags) {
+ f.Present = false // Not actually present on the command line.
+ }
if f.Value != nil {
if err := f.Value.Set(value); err != nil {
base.Fatalf("invalid flag argument for -%s: %v", f.Name, err)
diff --git a/src/cmd/go/internal/web/file_test.go b/src/cmd/go/internal/web/file_test.go
index e31ad71d4d..6339469045 100644
--- a/src/cmd/go/internal/web/file_test.go
+++ b/src/cmd/go/internal/web/file_test.go
@@ -19,6 +19,8 @@ func TestGetFileURL(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ defer os.Remove(f.Name())
+
if _, err := f.WriteString(content); err != nil {
t.Error(err)
}
diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go
index 391306a8d9..e3cb87fbb9 100644
--- a/src/cmd/go/internal/work/action.go
+++ b/src/cmd/go/internal/work/action.go
@@ -291,7 +291,7 @@ func (b *Builder) Init() {
}
if err := CheckGOOSARCHPair(cfg.Goos, cfg.Goarch); err != nil {
- fmt.Fprintf(os.Stderr, "cmd/go: %v", err)
+ fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err)
base.SetExitStatus(2)
base.Exit()
}
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index d0f07dec43..1bba3a5329 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -1036,7 +1036,7 @@ func (b *Builder) vet(a *Action) error {
// There's too much unsafe.Pointer code
// that vet doesn't like in low-level packages
// like runtime, sync, and reflect.
- vetFlags = append(vetFlags, string("-unsafeptr=false"))
+ vetFlags = []string{"-unsafeptr=false"}
}
// Note: We could decide that vet should compute export data for
@@ -1774,6 +1774,11 @@ func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string
}
if b.WorkDir != "" {
cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK")
+ escaped := strconv.Quote(b.WorkDir)
+ escaped = escaped[1 : len(escaped)-1] // strip quote characters
+ if escaped != b.WorkDir {
+ cmd = strings.ReplaceAll(cmd, escaped, "$WORK")
+ }
}
return cmd
}
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index 57024694cf..7d17c0c01e 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -227,8 +227,8 @@ func (a *Action) trimpath() string {
// For "go build -trimpath", rewrite package source directory
// to a file system-independent path (just the import path).
if cfg.BuildTrimpath {
- if m := a.Package.Module; m != nil {
- rewrite += ";" + m.Dir + "=>" + m.Path + "@" + m.Version
+ if m := a.Package.Module; m != nil && m.Version != "" {
+ rewrite += ";" + a.Package.Dir + "=>" + m.Path + "@" + m.Version + strings.TrimPrefix(a.Package.ImportPath, m.Path)
} else {
rewrite += ";" + a.Package.Dir + "=>" + a.Package.ImportPath
}
diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
index d02630155b..0ce1664c16 100644
--- a/src/cmd/go/internal/work/security.go
+++ b/src/cmd/go/internal/work/security.go
@@ -52,7 +52,7 @@ var validCompilerFlags = []*lazyregexp.Regexp{
re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
re(`-Wa,-mbig-obj`),
re(`-Wp,-D([A-Za-z_].*)`),
- re(`-Wp, -U([A-Za-z_]*)`),
+ re(`-Wp,-U([A-Za-z_]*)`),
re(`-ansi`),
re(`-f(no-)?asynchronous-unwind-tables`),
re(`-f(no-)?blocks`),
diff --git a/src/cmd/go/internal/work/security_test.go b/src/cmd/go/internal/work/security_test.go
index 3a02db1d04..6b85c40b13 100644
--- a/src/cmd/go/internal/work/security_test.go
+++ b/src/cmd/go/internal/work/security_test.go
@@ -22,6 +22,8 @@ var goodCompilerFlags = [][]string{
{"-Osmall"},
{"-W"},
{"-Wall"},
+ {"-Wp,-Dfoo=bar"},
+ {"-Wp,-Ufoo"},
{"-fobjc-arc"},
{"-fno-objc-arc"},
{"-fomit-frame-pointer"},
diff --git a/src/cmd/go/testdata/badmod/go.mod b/src/cmd/go/testdata/badmod/go.mod
deleted file mode 100644
index f7f6423870..0000000000
--- a/src/cmd/go/testdata/badmod/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module m
diff --git a/src/cmd/go/testdata/badmod/x.go b/src/cmd/go/testdata/badmod/x.go
deleted file mode 100644
index 579fb086ee..0000000000
--- a/src/cmd/go/testdata/badmod/x.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package x
-
-import _ "appengine"
-import _ "nonexistent.rsc.io" // domain does not exist
diff --git a/src/cmd/go/testdata/importcom/bad.go b/src/cmd/go/testdata/importcom/bad.go
deleted file mode 100644
index e104c2e992..0000000000
--- a/src/cmd/go/testdata/importcom/bad.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p
-
-import "bad"
diff --git a/src/cmd/go/testdata/importcom/conflict.go b/src/cmd/go/testdata/importcom/conflict.go
deleted file mode 100644
index 995556c511..0000000000
--- a/src/cmd/go/testdata/importcom/conflict.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p
-
-import "conflict"
diff --git a/src/cmd/go/testdata/importcom/src/bad/bad.go b/src/cmd/go/testdata/importcom/src/bad/bad.go
deleted file mode 100644
index bc51fd3fde..0000000000
--- a/src/cmd/go/testdata/importcom/src/bad/bad.go
+++ /dev/null
@@ -1 +0,0 @@
-package bad // import
diff --git a/src/cmd/go/testdata/importcom/src/conflict/a.go b/src/cmd/go/testdata/importcom/src/conflict/a.go
deleted file mode 100644
index 2d67703511..0000000000
--- a/src/cmd/go/testdata/importcom/src/conflict/a.go
+++ /dev/null
@@ -1 +0,0 @@
-package conflict // import "a"
diff --git a/src/cmd/go/testdata/importcom/src/conflict/b.go b/src/cmd/go/testdata/importcom/src/conflict/b.go
deleted file mode 100644
index 8fcfb3c8bd..0000000000
--- a/src/cmd/go/testdata/importcom/src/conflict/b.go
+++ /dev/null
@@ -1 +0,0 @@
-package conflict /* import "b" */
diff --git a/src/cmd/go/testdata/importcom/src/works/x/x.go b/src/cmd/go/testdata/importcom/src/works/x/x.go
deleted file mode 100644
index 044c6eca80..0000000000
--- a/src/cmd/go/testdata/importcom/src/works/x/x.go
+++ /dev/null
@@ -1 +0,0 @@
-package x // import "works/x"
diff --git a/src/cmd/go/testdata/importcom/src/works/x/x1.go b/src/cmd/go/testdata/importcom/src/works/x/x1.go
deleted file mode 100644
index 2449b29df5..0000000000
--- a/src/cmd/go/testdata/importcom/src/works/x/x1.go
+++ /dev/null
@@ -1 +0,0 @@
-package x // important! not an import comment
diff --git a/src/cmd/go/testdata/importcom/src/wrongplace/x.go b/src/cmd/go/testdata/importcom/src/wrongplace/x.go
deleted file mode 100644
index b89849da78..0000000000
--- a/src/cmd/go/testdata/importcom/src/wrongplace/x.go
+++ /dev/null
@@ -1 +0,0 @@
-package x // import "my/x"
diff --git a/src/cmd/go/testdata/importcom/works.go b/src/cmd/go/testdata/importcom/works.go
deleted file mode 100644
index 31b55d08a3..0000000000
--- a/src/cmd/go/testdata/importcom/works.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p
-
-import _ "works/x"
diff --git a/src/cmd/go/testdata/importcom/wrongplace.go b/src/cmd/go/testdata/importcom/wrongplace.go
deleted file mode 100644
index e2535e01ae..0000000000
--- a/src/cmd/go/testdata/importcom/wrongplace.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p
-
-import "wrongplace"
diff --git a/src/cmd/go/testdata/importcycle/src/selfimport/selfimport.go b/src/cmd/go/testdata/importcycle/src/selfimport/selfimport.go
deleted file mode 100644
index dc63c4b9f2..0000000000
--- a/src/cmd/go/testdata/importcycle/src/selfimport/selfimport.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package selfimport
-
-import "selfimport"
diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README
index 2782a09707..7dba6b394c 100644
--- a/src/cmd/go/testdata/script/README
+++ b/src/cmd/go/testdata/script/README
@@ -40,7 +40,7 @@ Scripts also have access to these other environment variables:
goversion=<current Go version; for example, 1.12>
:=<OS-specific path list separator>
-The scripts supporting files are unpacked relative to $GOPATH/src (aka $WORK/gopath/src)
+The scripts' supporting files are unpacked relative to $GOPATH/src (aka $WORK/gopath/src)
and then the script begins execution in that directory as well. Thus the example above runs
in $WORK/gopath/src with GOPATH=$WORK/gopath and $WORK/gopath/src/hello.go
containing the listed contents.
diff --git a/src/cmd/go/testdata/script/build_exe.txt b/src/cmd/go/testdata/script/build_exe.txt
new file mode 100644
index 0000000000..fd13259fcc
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_exe.txt
@@ -0,0 +1,21 @@
+# go build with -o and -buildmode=exe should on a non-main package should throw an error
+
+! go build -buildmode=exe -o out$GOEXE not_main
+stderr '-buildmode=exe requires exactly one main package'
+! exists out$GOEXE
+! go build -buildmode=exe -o out$GOEXE main_one main_two
+stderr '-buildmode=exe requires exactly one main package'
+! exists out$GOEXE
+
+-- not_main/not_main.go --
+package not_main
+
+func F() {}
+-- main_one/main_one.go --
+package main
+
+func main() {}
+-- main_two/main_two.go --
+package main
+
+func main() {} \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/build_import_comment.txt b/src/cmd/go/testdata/script/build_import_comment.txt
new file mode 100644
index 0000000000..0ab643914d
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_import_comment.txt
@@ -0,0 +1,47 @@
+# TODO: add a go.mod file and test with GO111MODULE explicitly on and off.
+# We only report the 'expects import' error when modules are disabled.
+# Do we report comment parse errors or conflicts in module mode? We shouldn't.
+
+# Import comment matches
+go build -n works.go
+
+# Import comment mismatch
+! go build -n wrongplace.go
+stderr 'wrongplace expects import "my/x"'
+
+# Import comment syntax error
+! go build -n bad.go
+stderr 'cannot parse import comment'
+
+# Import comment conflict
+! go build -n conflict.go
+stderr 'found import comments'
+
+-- bad.go --
+package p
+
+import "bad"
+-- conflict.go --
+package p
+
+import "conflict"
+-- works.go --
+package p
+
+import _ "works/x"
+-- wrongplace.go --
+package p
+
+import "wrongplace"
+-- bad/bad.go --
+package bad // import
+-- conflict/a.go --
+package conflict // import "a"
+-- conflict/b.go --
+package conflict /* import "b" */
+-- works/x/x.go --
+package x // import "works/x"
+-- works/x/x1.go --
+package x // important! not an import comment
+-- wrongplace/x.go --
+package x // import "my/x"
diff --git a/src/cmd/go/testdata/script/build_import_cycle.txt b/src/cmd/go/testdata/script/build_import_cycle.txt
new file mode 100644
index 0000000000..0154305c27
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_import_cycle.txt
@@ -0,0 +1,10 @@
+! go build selfimport
+stderr -count=1 'import cycle not allowed'
+
+go list -e -f '{{.Error}}' selfimport # Don't hang forever
+stdout -count=1 'import cycle not allowed'
+
+-- selfimport/selfimport.go --
+package selfimport
+
+import "selfimport"
diff --git a/src/cmd/go/testdata/script/build_plugin_non_main.txt b/src/cmd/go/testdata/script/build_plugin_non_main.txt
new file mode 100644
index 0000000000..996d87d961
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_plugin_non_main.txt
@@ -0,0 +1,17 @@
+# Plugins are only supported on linux,cgo and darwin,cgo.
+[!linux] [!darwin] skip
+[!cgo] skip
+
+go build -n testdep/p2
+! go build -buildmode=plugin testdep/p2
+stderr '-buildmode=plugin requires exactly one main package'
+
+-- testdep/p1/p1.go --
+package p1
+-- testdep/p1/p1_test.go --
+package p1
+
+import _ "testdep/p2"
+-- testdep/p2/p2.go --
+package p2
+
diff --git a/src/cmd/go/testdata/script/cgo_asm_error.txt b/src/cmd/go/testdata/script/cgo_asm_error.txt
new file mode 100644
index 0000000000..e656106940
--- /dev/null
+++ b/src/cmd/go/testdata/script/cgo_asm_error.txt
@@ -0,0 +1,21 @@
+[!cgo] skip
+
+# Test that cgo package can't contain a go assembly file.
+
+# Ensure the build fails and reports that the package has a Go assembly file.
+! go build cgoasm
+stderr 'package using cgo has Go assembly file'
+
+-- cgoasm/p.go --
+package p
+
+/*
+// hi
+*/
+import "C"
+
+func F() {}
+-- cgoasm/p.s --
+TEXT asm(SB),$0
+ RET
+
diff --git a/src/cmd/go/testdata/script/clean_testcache.txt b/src/cmd/go/testdata/script/clean_testcache.txt
index dd7846462e..b3f32fe696 100644
--- a/src/cmd/go/testdata/script/clean_testcache.txt
+++ b/src/cmd/go/testdata/script/clean_testcache.txt
@@ -9,6 +9,13 @@ go clean -testcache
go test x_test.go
! stdout 'cached'
+# golang.org/issue/29100: 'go clean -testcache' should succeed
+# if the cache directory doesn't exist at all.
+# It should not write a testexpire.txt file, since there are no
+# test results that need to be invalidated in the first place.
+env GOCACHE=$WORK/nonexistent
+go clean -testcache
+! exists $WORK/nonexistent
-- x/x_test.go --
package x_test
@@ -16,4 +23,4 @@ import (
"testing"
)
func TestMain(t *testing.T) {
-} \ No newline at end of file
+}
diff --git a/src/cmd/go/testdata/script/cover_cgo.txt b/src/cmd/go/testdata/script/cover_cgo.txt
new file mode 100644
index 0000000000..8016358eef
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_cgo.txt
@@ -0,0 +1,37 @@
+[!cgo] skip
+[gccgo] skip # gccgo has no cover tool
+
+# Test coverage on cgo code.
+
+go test -short -cover cgocover
+stdout 'coverage:.*[1-9][0-9.]+%'
+! stderr '[^0-9]0\.0%'
+
+-- cgocover/p.go --
+package p
+
+/*
+void
+f(void)
+{
+}
+*/
+import "C"
+
+var b bool
+
+func F() {
+ if b {
+ for {
+ }
+ }
+ C.f()
+}
+-- cgocover/p_test.go --
+package p
+
+import "testing"
+
+func TestF(t *testing.T) {
+ F()
+}
diff --git a/src/cmd/go/testdata/script/cover_cgo_extra_file.txt b/src/cmd/go/testdata/script/cover_cgo_extra_file.txt
new file mode 100644
index 0000000000..65376f8e6e
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_cgo_extra_file.txt
@@ -0,0 +1,43 @@
+[!cgo] skip
+[gccgo] skip # gccgo has no cover tool
+
+# Test coverage on cgo code. This test case includes an
+# extra empty non-cgo file in the package being checked.
+
+go test -short -cover cgocover4
+stdout 'coverage:.*[1-9][0-9.]+%'
+! stderr '[^0-9]0\.0%'
+
+-- cgocover4/notcgo.go --
+package p
+-- cgocover4/p.go --
+package p
+
+/*
+void
+f(void)
+{
+}
+*/
+import "C"
+
+var b bool
+
+func F() {
+ if b {
+ for {
+ }
+ }
+ C.f()
+}
+-- cgocover4/x_test.go --
+package p_test
+
+import (
+ . "cgocover4"
+ "testing"
+)
+
+func TestF(t *testing.T) {
+ F()
+} \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/cover_cgo_extra_test.txt b/src/cmd/go/testdata/script/cover_cgo_extra_test.txt
new file mode 100644
index 0000000000..1c4257846f
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_cgo_extra_test.txt
@@ -0,0 +1,44 @@
+[!cgo] skip
+[gccgo] skip # gccgo has no cover tool
+
+# Test coverage on cgo code. This test case has an external
+# test that tests the code and an in-package test file with
+# no test cases.
+
+go test -short -cover cgocover3
+stdout 'coverage:.*[1-9][0-9.]+%'
+! stderr '[^0-9]0\.0%'
+
+-- cgocover3/p.go --
+package p
+
+/*
+void
+f(void)
+{
+}
+*/
+import "C"
+
+var b bool
+
+func F() {
+ if b {
+ for {
+ }
+ }
+ C.f()
+}
+-- cgocover3/p_test.go --
+package p
+-- cgocover3/x_test.go --
+package p_test
+
+import (
+ . "cgocover3"
+ "testing"
+)
+
+func TestF(t *testing.T) {
+ F()
+} \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/cover_cgo_xtest.txt b/src/cmd/go/testdata/script/cover_cgo_xtest.txt
new file mode 100644
index 0000000000..40e2a8d6bd
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_cgo_xtest.txt
@@ -0,0 +1,40 @@
+[!cgo] skip
+[gccgo] skip # gccgo has no cover tool
+
+# Test cgo coverage with an external test.
+
+go test -short -cover cgocover2
+stdout 'coverage:.*[1-9][0-9.]+%'
+! stderr '[^0-9]0\.0%'
+
+-- cgocover2/p.go --
+package p
+
+/*
+void
+f(void)
+{
+}
+*/
+import "C"
+
+var b bool
+
+func F() {
+ if b {
+ for {
+ }
+ }
+ C.f()
+}
+-- cgocover2/x_test.go --
+package p_test
+
+import (
+ . "cgocover2"
+ "testing"
+)
+
+func TestF(t *testing.T) {
+ F()
+} \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/cover_import_main_loop.txt b/src/cmd/go/testdata/script/cover_import_main_loop.txt
new file mode 100644
index 0000000000..83eef0c8a8
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_import_main_loop.txt
@@ -0,0 +1,22 @@
+[gccgo] skip # gccgo has no cover tool
+
+! go test -n importmain/test
+stderr 'not an importable package' # check that import main was detected
+! go test -n -cover importmain/test
+stderr 'not an importable package' # check that import main was detected
+
+-- importmain/ismain/main.go --
+package main
+
+import _ "importmain/test"
+
+func main() {}
+-- importmain/test/test.go --
+package test
+-- importmain/test/test_test.go --
+package test_test
+
+import "testing"
+import _ "importmain/ismain"
+
+func TestCase(t *testing.T) {}
diff --git a/src/cmd/go/testdata/script/cover_pattern.txt b/src/cmd/go/testdata/script/cover_pattern.txt
new file mode 100644
index 0000000000..0b7f2d70a2
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_pattern.txt
@@ -0,0 +1,37 @@
+[gccgo] skip
+
+# If coverpkg=sleepy... expands by package loading
+# (as opposed to pattern matching on deps)
+# then it will try to load sleepybad, which does not compile,
+# and the test command will fail.
+! go list sleepy...
+go test -c -n -coverprofile=$TMPDIR/cover.out -coverpkg=sleepy... -run=^$ sleepy1
+
+-- sleepy1/p_test.go --
+package p
+
+import (
+ "testing"
+ "time"
+)
+
+func Test1(t *testing.T) {
+ time.Sleep(200 * time.Millisecond)
+}
+-- sleepy2/p_test.go --
+package p
+
+import (
+ "testing"
+ "time"
+)
+
+func Test1(t *testing.T) {
+ time.Sleep(200 * time.Millisecond)
+}
+-- sleepybad/p.go --
+package p
+
+import ^
+
+var _ = io.DoesNotExist
diff --git a/src/cmd/go/testdata/script/doc.txt b/src/cmd/go/testdata/script/doc.txt
new file mode 100644
index 0000000000..3ff1aab093
--- /dev/null
+++ b/src/cmd/go/testdata/script/doc.txt
@@ -0,0 +1,75 @@
+# go doc --help
+! go doc --help
+stderr 'go doc'
+stderr 'go doc <pkg>'
+stderr 'go doc <sym>\[\.<methodOrField>\]'
+stderr 'go doc \[<pkg>\.\]<sym>\[\.<methodOrField>\]'
+stderr 'go doc \[<pkg>\.\]\[<sym>\.\]<methodOrField>'
+stderr 'go doc <pkg> <sym>\[\.<methodOrField>\]'
+
+# go help doc
+go help doc
+stdout 'go doc'
+stdout 'go doc <pkg>'
+stdout 'go doc <sym>\[\.<methodOrField>\]'
+stdout 'go doc \[<pkg>\.\]<sym>\[\.<methodOrField>\]'
+stdout 'go doc \[<pkg>\.\]\[<sym>\.\]<methodOrField>'
+stdout 'go doc <pkg> <sym>\[\.<methodOrField>\]'
+
+# go doc <pkg>
+go doc p/v2
+stdout .
+
+# go doc <pkg> <sym>
+go doc p/v2 Symbol
+stdout .
+
+# go doc <pkg> <sym> <method>
+! go doc p/v2 Symbol Method
+stderr .
+
+# go doc <pkg>.<sym>
+go doc p/v2.Symbol
+stdout .
+
+# go doc <pkg>.<sym>.<method>
+go doc p/v2.Symbol.Method
+stdout .
+
+# go doc <sym>
+go doc Symbol
+stdout .
+
+# go doc <sym> <method>
+! go doc Symbol Method
+stderr .
+
+# go doc <sym>.<method>
+go doc Symbol.Method
+stdout .
+
+# go doc <pkg>.<method>
+go doc p/v2.Method
+stdout .
+
+# go doc <pkg> <method>
+go doc p/v2 Method
+stdout .
+
+# go doc <method>
+go doc Method
+stdout .
+
+-- go.mod --
+module p/v2
+
+go 1.13
+
+-- p.go --
+package p
+
+type Symbol struct{}
+
+func (Symbol) Method() error {
+ return nil
+}
diff --git a/src/cmd/go/testdata/script/generate_bad_imports.txt b/src/cmd/go/testdata/script/generate_bad_imports.txt
new file mode 100644
index 0000000000..59a2f5786b
--- /dev/null
+++ b/src/cmd/go/testdata/script/generate_bad_imports.txt
@@ -0,0 +1,11 @@
+[windows] skip # skip because windows has no echo command
+
+go generate gencycle
+stdout 'hello world' # check go generate gencycle ran the generator
+
+-- gencycle/gencycle.go --
+//go:generate echo hello world
+
+package gencycle
+
+import _ "gencycle"
diff --git a/src/cmd/go/testdata/script/goflags.txt b/src/cmd/go/testdata/script/goflags.txt
index fac6d80720..686d1138b8 100644
--- a/src/cmd/go/testdata/script/goflags.txt
+++ b/src/cmd/go/testdata/script/goflags.txt
@@ -49,3 +49,11 @@ stderr '^go: invalid boolean value \"asdf\" for flag -e \(from (\$GOFLAGS|%GOFLA
go env
stdout GOFLAGS
+# Flags listed in GOFLAGS should be safe to duplicate on the command line.
+env GOFLAGS=-tags=magic
+go list -tags=magic
+go test -tags=magic -c -o $devnull
+go vet -tags=magic
+
+-- foo_test.go --
+package foo
diff --git a/src/cmd/go/testdata/script/gopath_vendor_dup_err.txt b/src/cmd/go/testdata/script/gopath_vendor_dup_err.txt
new file mode 100644
index 0000000000..5096195c70
--- /dev/null
+++ b/src/cmd/go/testdata/script/gopath_vendor_dup_err.txt
@@ -0,0 +1,24 @@
+[!net] skip
+
+# Issue 17119: Test more duplicate load errors.
+! go build dupload
+! stderr 'duplicate load|internal error'
+stderr 'dupload/vendor/p must be imported as p'
+
+-- dupload/dupload.go --
+package main
+
+import (
+ _ "dupload/p2"
+ _ "p"
+)
+
+func main() {}
+-- dupload/p/p.go --
+package p
+-- dupload/p2/p2.go --
+package p2
+
+import _ "dupload/vendor/p"
+-- dupload/vendor/p/p.go --
+package p
diff --git a/src/cmd/go/testdata/script/link_syso_issue33139.txt b/src/cmd/go/testdata/script/link_syso_issue33139.txt
index c2ca27acbf..03169bf5e9 100644
--- a/src/cmd/go/testdata/script/link_syso_issue33139.txt
+++ b/src/cmd/go/testdata/script/link_syso_issue33139.txt
@@ -8,6 +8,10 @@
# See: https://github.com/golang/go/issues/8912
[linux] [ppc64] skip
+# External linking is not supported on linux/riscv64.
+# See: https://github.com/golang/go/issues/36739
+[linux] [riscv64] skip
+
# External linking is not supported on darwin/386 (10.14+).
# See: https://github.com/golang/go/issues/31751
[darwin] [386] skip
diff --git a/src/cmd/go/testdata/script/list_constraints.txt b/src/cmd/go/testdata/script/list_constraints.txt
new file mode 100644
index 0000000000..7115c365f0
--- /dev/null
+++ b/src/cmd/go/testdata/script/list_constraints.txt
@@ -0,0 +1,86 @@
+# Check that files and their imports are not included in 'go list' output
+# when they are excluded by build constraints.
+
+# Linux and cgo files should be included when building in that configuration.
+env GOOS=linux
+env GOARCH=amd64
+env CGO_ENABLED=1
+go list -f '{{range .GoFiles}}{{.}} {{end}}'
+stdout '^cgotag.go empty.go suffix_linux.go tag.go $'
+go list -f '{{range .CgoFiles}}{{.}} {{end}}'
+stdout '^cgoimport.go $'
+go list -f '{{range .Imports}}{{.}} {{end}}'
+stdout '^C cgoimport cgotag suffix tag $'
+
+# Disabling cgo should exclude cgo files and their imports.
+env CGO_ENABLED=0
+go list -f '{{range .GoFiles}}{{.}} {{end}}'
+stdout 'empty.go suffix_linux.go tag.go'
+go list -f '{{range .CgoFiles}}{{.}} {{end}}'
+! stdout .
+go list -f '{{range .Imports}}{{.}} {{end}}'
+stdout '^suffix tag $'
+
+# Changing OS should exclude linux sources.
+env GOOS=darwin
+go list -f '{{range .GoFiles}}{{.}} {{end}}'
+stdout '^empty.go $'
+go list -f '{{range .Imports}}{{.}} {{end}}'
+stdout '^$'
+
+# Enabling a tag should include files that require it.
+go list -tags=extra -f '{{range .GoFiles}}{{.}} {{end}}'
+stdout '^empty.go extra.go $'
+go list -tags=extra -f '{{range .Imports}}{{.}} {{end}}'
+stdout '^extra $'
+
+# Packages that require a tag should not be listed unless the tag is on.
+! go list ./tagonly
+go list -tags=extra ./tagonly
+stdout m/tagonly
+
+-- go.mod --
+module m
+
+go 1.13
+
+-- empty.go --
+package p
+
+-- extra.go --
+// +build extra
+
+package p
+
+import _ "extra"
+
+-- suffix_linux.go --
+package p
+
+import _ "suffix"
+
+-- tag.go --
+// +build linux
+
+package p
+
+import _ "tag"
+
+-- cgotag.go --
+// +build cgo
+
+package p
+
+import _ "cgotag"
+
+-- cgoimport.go --
+package p
+
+import "C"
+
+import _ "cgoimport"
+
+-- tagonly/tagonly.go --
+// +build extra
+
+package tagonly
diff --git a/src/cmd/go/testdata/script/list_parse_err.txt b/src/cmd/go/testdata/script/list_parse_err.txt
new file mode 100644
index 0000000000..5aacaa88fa
--- /dev/null
+++ b/src/cmd/go/testdata/script/list_parse_err.txt
@@ -0,0 +1,17 @@
+# 'go list' should report imports, even if some files have parse errors
+# before the import block.
+go list -e -f '{{range .Imports}}{{.}} {{end}}'
+stdout '^fmt '
+
+-- go.mod --
+module m
+
+go 1.13
+
+-- a.go --
+package a
+
+import "fmt"
+
+-- b.go --
+// no package statement
diff --git a/src/cmd/go/testdata/script/list_tags.txt b/src/cmd/go/testdata/script/list_tags.txt
deleted file mode 100644
index 49069bd213..0000000000
--- a/src/cmd/go/testdata/script/list_tags.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-env GO111MODULE=off
-
-# go list supports -tags
-go list -tags=thetag ./my...
-stdout mypkg
-
--- mypkg/x.go --
-// +build thetag
-
-package mypkg
diff --git a/src/cmd/go/testdata/script/list_wildcard_skip_nonmatching.txt b/src/cmd/go/testdata/script/list_wildcard_skip_nonmatching.txt
new file mode 100644
index 0000000000..74ca315a72
--- /dev/null
+++ b/src/cmd/go/testdata/script/list_wildcard_skip_nonmatching.txt
@@ -0,0 +1,13 @@
+# Test that wildcards don't look in useless directories.
+
+# First make sure that badpkg fails the list of '...'.
+! go list ...
+stderr badpkg
+
+# Check that the list of 'm...' succeeds. That implies badpkg was skipped.
+go list m...
+
+-- m/x.go --
+package m
+-- badpkg/x.go --
+pkg badpkg \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/mod_bad_domain.txt b/src/cmd/go/testdata/script/mod_bad_domain.txt
index c9fd044cdc..ec0d474382 100644
--- a/src/cmd/go/testdata/script/mod_bad_domain.txt
+++ b/src/cmd/go/testdata/script/mod_bad_domain.txt
@@ -2,10 +2,16 @@ env GO111MODULE=on
# explicit get should report errors about bad names
! go get appengine
-stderr 'malformed module path "appengine": missing dot in first path element'
+stderr '^go get appengine: package appengine is not in GOROOT \(.*\)$'
! go get x/y.z
stderr 'malformed module path "x/y.z": missing dot in first path element'
+# 'go list -m' should report errors about module names, never GOROOT.
+! go list -m -versions appengine
+stderr 'malformed module path "appengine": missing dot in first path element'
+! go list -m -versions x/y.z
+stderr 'malformed module path "x/y.z": missing dot in first path element'
+
# build should report all unsatisfied imports,
# but should be more definitive about non-module import paths
! go build ./useappengine
diff --git a/src/cmd/go/testdata/script/mod_build_info_err.txt b/src/cmd/go/testdata/script/mod_build_info_err.txt
index 5ceb154a48..87a099b219 100644
--- a/src/cmd/go/testdata/script/mod_build_info_err.txt
+++ b/src/cmd/go/testdata/script/mod_build_info_err.txt
@@ -2,7 +2,7 @@
# Verifies golang.org/issue/34393.
go list -e -deps -f '{{with .Error}}{{.Pos}}: {{.Err}}{{end}}' ./main
-stdout 'bad[/\\]bad.go:3:8: malformed module path "string": missing dot in first path element'
+stdout 'bad[/\\]bad.go:3:8: malformed module path "🐧.example.com/string": invalid char ''🐧'''
-- go.mod --
module m
@@ -19,4 +19,4 @@ func main() {}
-- bad/bad.go --
package bad
-import _ "string"
+import _ "🐧.example.com/string"
diff --git a/src/cmd/go/testdata/script/mod_edit.txt b/src/cmd/go/testdata/script/mod_edit.txt
index 42007b13d0..898d8524ac 100644
--- a/src/cmd/go/testdata/script/mod_edit.txt
+++ b/src/cmd/go/testdata/script/mod_edit.txt
@@ -52,6 +52,12 @@ go mod init a.a/b/c
go mod edit -module x.x/y/z
cmpenv go.mod go.mod.edit
+# golang.org/issue/30513: don't require go-gettable module paths.
+cd $WORK/local
+go mod init foo
+go mod edit -module local-only -require=other-local@v1.0.0 -replace other-local@v1.0.0=./other
+cmpenv go.mod go.mod.edit
+
-- x.go --
package x
@@ -159,6 +165,14 @@ exclude x.1 v1.2.0
replace x.1 => y.1/v2 v2.3.6
require x.3 v1.99.0
+-- $WORK/local/go.mod.edit --
+module local-only
+
+go $goversion
+
+require other-local v1.0.0
+
+replace other-local v1.0.0 => ./other
-- $WORK/go.mod.badfmt --
module x.x/y/z
diff --git a/src/cmd/go/testdata/script/mod_get_test.txt b/src/cmd/go/testdata/script/mod_get_test.txt
index f921168ad4..3680ca273d 100644
--- a/src/cmd/go/testdata/script/mod_get_test.txt
+++ b/src/cmd/go/testdata/script/mod_get_test.txt
@@ -33,7 +33,7 @@ grep 'rsc.io/quote v1.5.1$' go.mod
# 'go get all' should consider test dependencies with or without -t.
cp go.mod.empty go.mod
-go get all
+go get -d all
grep 'rsc.io/quote v1.5.2$' go.mod
-- go.mod.empty --
diff --git a/src/cmd/go/testdata/script/mod_goroot_errors.txt b/src/cmd/go/testdata/script/mod_goroot_errors.txt
new file mode 100644
index 0000000000..255844408a
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_goroot_errors.txt
@@ -0,0 +1,53 @@
+env GO111MODULE=on
+
+# Regression test for https://golang.org/issue/34769.
+# Missing standard-library imports should refer to GOROOT rather than
+# complaining about a malformed module path.
+# This is especially important when GOROOT is set incorrectly,
+# since such an error will occur for every package in std.
+
+# Building a nonexistent std package directly should fail usefully.
+
+! go build -mod=readonly nonexist
+! stderr 'import lookup disabled'
+! stderr 'missing dot'
+stderr '^can''t load package: package nonexist is not in GOROOT \('$GOROOT'[/\\]src[/\\]nonexist\)$'
+
+! go build nonexist
+! stderr 'import lookup disabled'
+! stderr 'missing dot'
+stderr '^can''t load package: package nonexist is not in GOROOT \('$GOROOT'[/\\]src[/\\]nonexist\)$'
+
+# Building a nonexistent std package indirectly should also fail usefully.
+
+! go build -mod=readonly ./importnonexist
+! stderr 'import lookup disabled'
+! stderr 'missing dot'
+stderr '^importnonexist[/\\]x.go:2:8: package nonexist is not in GOROOT \('$GOROOT'[/\\]src[/\\]nonexist\)$'
+
+! go build ./importnonexist
+! stderr 'import lookup disabled'
+! stderr 'missing dot'
+stderr '^importnonexist[/\\]x.go:2:8: package nonexist is not in GOROOT \('$GOROOT'[/\\]src[/\\]nonexist\)$'
+
+# Building an *actual* std package should fail if GOROOT is set to something bogus.
+
+[!short] go build ./importjson # Prove that it works when GOROOT is valid.
+
+env GOROOT=$WORK/not-a-valid-goroot
+! go build ./importjson
+! stderr 'import lookup disabled'
+! stderr 'missing dot'
+stderr 'importjson[/\\]x.go:2:8: package encoding/json is not in GOROOT \('$WORK'[/\\]not-a-valid-goroot[/\\]src[/\\]encoding[/\\]json\)$'
+
+-- go.mod --
+module example.com
+go 1.14
+-- importnonexist/x.go --
+package importnonexist
+import _ "nonexist"
+-- importjson/x.go --
+package importjson
+import _ "encoding/json"
+-- $WORK/not-a-valid-goroot/README --
+This directory is not a valid GOROOT.
diff --git a/src/cmd/go/testdata/script/mod_list_e_readonly.txt b/src/cmd/go/testdata/script/mod_list_e_readonly.txt
new file mode 100644
index 0000000000..4969434e52
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_list_e_readonly.txt
@@ -0,0 +1,15 @@
+# 'go list -mod=readonly -e should attribute errors
+# to individual missing packages.
+# Verifies golang.org/issue/34829.
+go list -mod=readonly -e -deps -f '{{if .Error}}{{.ImportPath}}: {{.Error}}{{end}}' .
+stdout 'example.com/missing: use.go:3:8: cannot find module providing package example.com/missing: import lookup disabled by -mod=readonly'
+
+-- go.mod --
+module example.com/m
+
+go 1.14
+
+-- use.go --
+package use
+
+import _ "example.com/missing"
diff --git a/src/cmd/go/testdata/script/mod_load_badchain.txt b/src/cmd/go/testdata/script/mod_load_badchain.txt
index 2c532f1fda..67d9a1584f 100644
--- a/src/cmd/go/testdata/script/mod_load_badchain.txt
+++ b/src/cmd/go/testdata/script/mod_load_badchain.txt
@@ -75,12 +75,14 @@ go: example.com/badchain/a@v1.1.0 requires
module declares its path as: badchain.example.com/c
but was required as: example.com/badchain/c
-- list-missing-expected --
+go: finding module for package example.com/badchain/c
go: found example.com/badchain/c in example.com/badchain/c v1.1.0
go: m/use imports
example.com/badchain/c: example.com/badchain/c@v1.1.0: parsing go.mod:
module declares its path as: badchain.example.com/c
but was required as: example.com/badchain/c
-- list-missing-test-expected --
+go: finding module for package example.com/badchain/c
go: found example.com/badchain/c in example.com/badchain/c v1.1.0
go: m/testuse tested by
m/testuse.test imports
diff --git a/src/cmd/go/testdata/script/mod_readonly.txt b/src/cmd/go/testdata/script/mod_readonly.txt
index 1c89b49f51..77fc735d57 100644
--- a/src/cmd/go/testdata/script/mod_readonly.txt
+++ b/src/cmd/go/testdata/script/mod_readonly.txt
@@ -6,13 +6,14 @@ env GOFLAGS=-mod=readonly
go mod edit -fmt
cp go.mod go.mod.empty
! go list all
-stderr 'import lookup disabled by -mod=readonly'
+stderr '^can''t load package: x.go:2:8: cannot find module providing package rsc\.io/quote: import lookup disabled by -mod=readonly'
cmp go.mod go.mod.empty
# -mod=readonly should be set implicitly if the go.mod file is read-only
chmod 0400 go.mod
env GOFLAGS=
! go list all
+stderr '^can''t load package: x.go:2:8: cannot find module providing package rsc\.io/quote: import lookup disabled by -mod=readonly\n\t\(go.mod file is read-only\.\)$'
chmod 0600 go.mod
env GOFLAGS=-mod=readonly
@@ -33,6 +34,11 @@ go list all
go clean -modcache
go list all
+# -mod=readonly must not cause 'go list -m' to fail.
+# (golang.org/issue/36478)
+go list -m all
+! stderr 'cannot query module'
+
# -mod=readonly should reject inconsistent go.mod files
# (ones that would be rewritten).
go mod edit -require rsc.io/sampler@v1.2.0
diff --git a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt
index 6608fb1b80..28c1196284 100644
--- a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt
+++ b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt
@@ -15,10 +15,28 @@ env GOSUMDB=off
# Replacing gopkg.in/[…].vN with a repository with a root go.mod file
# specifying […].vN and a compatible version should succeed, even if
# the replacement path is not a gopkg.in path.
-cd dot-to-dot
-go list gopkg.in/src-d/go-git.v4
+cd 4-to-4
+go list -m gopkg.in/src-d/go-git.v4
--- dot-to-dot/go.mod --
+# Previous versions of the "go" command accepted v0 and v1 pseudo-versions
+# as replacements for gopkg.in/[…].v4.
+# As a special case, we continue to accept those.
+
+cd ../4-to-0
+go list -m gopkg.in/src-d/go-git.v4
+
+cd ../4-to-1
+go list -m gopkg.in/src-d/go-git.v4
+
+cd ../4-to-incompatible
+go list -m gopkg.in/src-d/go-git.v4
+
+# A mismatched gopkg.in path should not be able to replace a different major version.
+cd ../3-to-gomod-4
+! go list -m gopkg.in/src-d/go-git.v3
+stderr '^go: gopkg\.in/src-d/go-git\.v3@v3.0.0-20190801152248-0d1a009cbb60: invalid version: go\.mod has non-\.\.\.\.v3 module path "gopkg\.in/src-d/go-git\.v4" at revision 0d1a009cbb60$'
+
+-- 4-to-4/go.mod --
module golang.org/issue/34254
go 1.13
@@ -26,3 +44,36 @@ go 1.13
require gopkg.in/src-d/go-git.v4 v4.13.1
replace gopkg.in/src-d/go-git.v4 v4.13.1 => github.com/src-d/go-git/v4 v4.13.1
+-- 4-to-1/go.mod --
+module golang.org/issue/34254
+
+go 1.13
+
+require gopkg.in/src-d/go-git.v4 v4.13.1
+
+replace gopkg.in/src-d/go-git.v4 v4.13.1 => github.com/src-d/go-git v1.0.1-0.20190801152248-0d1a009cbb60
+-- 4-to-0/go.mod --
+module golang.org/issue/34254
+
+go 1.13
+
+require gopkg.in/src-d/go-git.v4 v4.13.1
+
+replace gopkg.in/src-d/go-git.v4 v4.13.1 => github.com/src-d/go-git v0.0.0-20190801152248-0d1a009cbb60
+-- 4-to-incompatible/go.mod --
+module golang.org/issue/34254
+
+go 1.13
+
+require gopkg.in/src-d/go-git.v4 v4.13.1
+
+replace gopkg.in/src-d/go-git.v4 v4.13.1 => github.com/src-d/go-git v4.6.0+incompatible
+-- 3-to-gomod-4/go.mod --
+module golang.org/issue/34254
+go 1.13
+
+require gopkg.in/src-d/go-git.v3 v3.2.0
+
+// This replacement has a go.mod file declaring its path to be
+// gopkg.in/src-d/go-git.v4, so it cannot be used as a replacement for v3.
+replace gopkg.in/src-d/go-git.v3 v3.2.0 => gopkg.in/src-d/go-git.v3 v3.0.0-20190801152248-0d1a009cbb60
diff --git a/src/cmd/go/testdata/script/mod_replace_import.txt b/src/cmd/go/testdata/script/mod_replace_import.txt
index 941ef61d35..fd5b04a498 100644
--- a/src/cmd/go/testdata/script/mod_replace_import.txt
+++ b/src/cmd/go/testdata/script/mod_replace_import.txt
@@ -28,7 +28,8 @@ stdout 'example.com/v v1.12.0 => ./v12'
cd fail
! go list all
stdout 'localhost.fail'
-stderr '^can.t load package: m.go:3:8: module w@latest found \(v0.0.0-00010101000000-000000000000, replaced by ../w\), but does not contain package w$'
+stderr '^can''t load package: m.go:4:2: module w@latest found \(v0.0.0-00010101000000-000000000000, replaced by ../w\), but does not contain package w$'
+stderr '^can''t load package: m.go:5:2: nonexist@v0.1.0: replacement directory ../nonexist does not exist$'
-- go.mod --
module example.com/m
@@ -54,6 +55,10 @@ replace (
example.com/v => ./v
)
+replace (
+ example.com/i v2.0.0+incompatible => ./i2
+)
+
-- m.go --
package main
import (
@@ -61,6 +66,7 @@ import (
_ "example.com/x/v3"
_ "example.com/y/z/w"
_ "example.com/v"
+ _ "example.com/i"
)
func main() {}
@@ -115,10 +121,18 @@ module v.localhost
-- v/v.go --
package v
+-- i2/go.mod --
+module example.com/i
+-- i2/i.go --
+package i
+
-- fail/m.go --
package main
-import _ "w"
+import (
+ _ "w"
+ _ "nonexist"
+)
func main() {}
@@ -127,3 +141,4 @@ module localhost.fail
replace w => ../w
+replace nonexist v0.1.0 => ../nonexist
diff --git a/src/cmd/go/testdata/script/mod_tidy_error.txt b/src/cmd/go/testdata/script/mod_tidy_error.txt
index 9bb8528cb0..b6c24ceaf7 100644
--- a/src/cmd/go/testdata/script/mod_tidy_error.txt
+++ b/src/cmd/go/testdata/script/mod_tidy_error.txt
@@ -4,12 +4,12 @@ env GO111MODULE=on
# 'go mod tidy' and 'go mod vendor' should not hide loading errors.
! go mod tidy
-stderr '^issue27063 imports\n\tnonexist: malformed module path "nonexist": missing dot in first path element'
+stderr '^issue27063 imports\n\tnonexist: package nonexist is not in GOROOT \(.*\)'
stderr '^issue27063 imports\n\tnonexist.example.com: cannot find module providing package nonexist.example.com'
stderr '^issue27063 imports\n\tissue27063/other imports\n\tother.example.com/nonexist: cannot find module providing package other.example.com/nonexist'
! go mod vendor
-stderr '^issue27063 imports\n\tnonexist: malformed module path "nonexist": missing dot in first path element'
+stderr '^issue27063 imports\n\tnonexist: package nonexist is not in GOROOT \(.*\)'
stderr '^issue27063 imports\n\tnonexist.example.com: cannot find module providing package nonexist.example.com'
stderr '^issue27063 imports\n\tissue27063/other imports\n\tother.example.com/nonexist: cannot find module providing package other.example.com/nonexist'
diff --git a/src/cmd/go/testdata/script/mod_vendor.txt b/src/cmd/go/testdata/script/mod_vendor.txt
index bb3e634b3a..2622916f61 100644
--- a/src/cmd/go/testdata/script/mod_vendor.txt
+++ b/src/cmd/go/testdata/script/mod_vendor.txt
@@ -38,6 +38,12 @@ stdout 'src[\\/]vendor[\\/]x'
go list -mod=vendor -f '{{.Version}} {{.Dir}}' -m x
stdout '^v1.0.0 $'
+# -mod=vendor should cause 'go list' flags that look up versions to fail.
+! go list -mod=vendor -versions -m x
+stderr '^go list -m: can''t determine available versions using the vendor directory\n\t\(Use -mod=mod or -mod=readonly to bypass.\)$'
+! go list -mod=vendor -u -m x
+stderr '^go list -m: can''t determine available upgrades using the vendor directory\n\t\(Use -mod=mod or -mod=readonly to bypass.\)$'
+
# 'go list -mod=vendor -m' on a transitive dependency that does not
# provide vendored packages should give a helpful error rather than
# 'not a known dependency'.
diff --git a/src/cmd/go/testdata/script/mod_vendor_trimpath.txt b/src/cmd/go/testdata/script/mod_vendor_trimpath.txt
new file mode 100644
index 0000000000..5451aa773c
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_vendor_trimpath.txt
@@ -0,0 +1,45 @@
+# Check that when -trimpath and -mod=vendor are used together,
+# paths in vendored packages are properly trimmed.
+# Verifies golang.org/issue/36566.
+
+[short] skip
+
+# Only the main module has a root directory in vendor mode.
+go mod vendor
+go list -f {{.Module.Dir}} example.com/main
+stdout $PWD
+go list -f {{.Module.Dir}} example.com/stack
+! stdout .
+
+# The program prints a file name from a vendored package.
+# Without -trimpath, the name should include the vendor directory.
+go run main.go
+stdout vendor
+
+# With -trimpath, everything before the package path should be trimmed.
+# As with -mod=mod, the version should appear as part of the module path.
+go run -mod=vendor -trimpath main.go
+stdout '^example.com/stack@v1.0.0/stack.go$'
+
+# With pristinely vendored source code, a trimmed binary built from vendored
+# code should have the same behavior as one build from the module cache.
+go run -mod=mod -trimpath main.go
+stdout '^example.com/stack@v1.0.0/stack.go$'
+
+-- go.mod --
+module example.com/main
+
+require example.com/stack v1.0.0
+
+-- main.go --
+package main
+
+import (
+ "fmt"
+
+ "example.com/stack"
+)
+
+func main() {
+ fmt.Println(stack.TopFile())
+}
diff --git a/src/cmd/go/testdata/script/modfile_flag.txt b/src/cmd/go/testdata/script/modfile_flag.txt
index 1409be9599..f05bf03fbf 100644
--- a/src/cmd/go/testdata/script/modfile_flag.txt
+++ b/src/cmd/go/testdata/script/modfile_flag.txt
@@ -11,6 +11,15 @@ cp go.sum go.sum.orig
go mod init example.com/m
grep example.com/m go.alt.mod
+# 'go env GOMOD' should print the path to the real file.
+# 'go env' does not recognize the '-modfile' flag.
+go env GOMOD
+stdout '^\$WORK[/\\]gopath[/\\]src[/\\]go.mod$'
+
+# 'go list -m' should print the effective go.mod file as GoMod though.
+go list -m -f '{{.GoMod}}'
+stdout '^go.alt.mod$'
+
# go mod edit should operate on the alternate file
go mod edit -require rsc.io/quote@v1.5.2
grep rsc.io/quote go.alt.mod
diff --git a/src/cmd/go/testdata/script/run_hello_pkg.txt b/src/cmd/go/testdata/script/run_hello_pkg.txt
new file mode 100644
index 0000000000..03fba13c77
--- /dev/null
+++ b/src/cmd/go/testdata/script/run_hello_pkg.txt
@@ -0,0 +1,14 @@
+cd $GOPATH
+go run hello
+stderr 'hello, world'
+
+cd src/hello
+go run .
+stderr 'hello, world'
+
+-- hello/hello.go --
+package main
+
+func main() {
+ println("hello, world")
+}
diff --git a/src/cmd/go/testdata/script/mod_run_internal.txt b/src/cmd/go/testdata/script/run_internal.txt
index 653ad282be..d02185017b 100644
--- a/src/cmd/go/testdata/script/mod_run_internal.txt
+++ b/src/cmd/go/testdata/script/run_internal.txt
@@ -1,3 +1,20 @@
+env GO111MODULE=off
+
+go list -e -f '{{.Incomplete}}' m/runbad1.go
+stdout true
+! go run m/runbad1.go
+stderr 'use of internal package m/x/internal not allowed'
+
+go list -e -f '{{.Incomplete}}' m/runbad2.go
+stdout true
+! go run m/runbad2.go
+stderr 'use of internal package m/x/internal/y not allowed'
+
+go list -e -f '{{.Incomplete}}' m/runok.go
+stdout false
+go run m/runok.go
+
+cd m
env GO111MODULE=on
go list -e -f '{{.Incomplete}}' runbad1.go
@@ -14,32 +31,33 @@ go list -e -f '{{.Incomplete}}' runok.go
stdout false
go run runok.go
--- go.mod --
+
+-- m/go.mod --
module m
--- x/internal/internal.go --
+-- m/x/internal/internal.go --
package internal
--- x/internal/y/y.go --
+-- m/x/internal/y/y.go --
package y
--- internal/internal.go --
+-- m/internal/internal.go --
package internal
--- internal/z/z.go --
+-- m/internal/z/z.go --
package z
--- runbad1.go --
+-- m/runbad1.go --
package main
import _ "m/x/internal"
func main() {}
--- runbad2.go --
+-- m/runbad2.go --
package main
import _ "m/x/internal/y"
func main() {}
--- runok.go --
+-- m/runok.go --
package main
import _ "m/internal"
import _ "m/internal/z"
diff --git a/src/cmd/go/testdata/script/test_bad_example.txt b/src/cmd/go/testdata/script/test_bad_example.txt
new file mode 100644
index 0000000000..1d147b663f
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_bad_example.txt
@@ -0,0 +1,13 @@
+# Tests that invalid examples are ignored.
+# Verifies golang.org/issue/35284
+go test x_test.go
+
+-- x_test.go --
+package x
+
+import "fmt"
+
+func ExampleThisShouldNotHaveAParameter(thisShouldntExist int) {
+ fmt.Println("X")
+ // Output:
+} \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/test_badtest.txt b/src/cmd/go/testdata/script/test_badtest.txt
index f5db6941a0..e79fc511b3 100644
--- a/src/cmd/go/testdata/script/test_badtest.txt
+++ b/src/cmd/go/testdata/script/test_badtest.txt
@@ -1,11 +1,21 @@
env GO111MODULE=off
-! go test badtest/...
+! go test badtest/badexec
! stdout ^ok
stdout ^FAIL\tbadtest/badexec
+
+! go test badtest/badsyntax
+! stdout ^ok
stdout ^FAIL\tbadtest/badsyntax
+
+! go test badtest/badvar
+! stdout ^ok
stdout ^FAIL\tbadtest/badvar
+! go test notest
+! stdout ^ok
+stderr '^notest.hello.go:6:1: syntax error: non-declaration statement outside function body' # Exercise issue #7108
+
-- badtest/badexec/x_test.go --
package badexec
@@ -30,3 +40,10 @@ package badvar_test
func f() {
_ = notdefined
}
+-- notest/hello.go --
+package notest
+
+func hello() {
+ println("hello world")
+}
+Hello world \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/test_benchmark_fatal.txt b/src/cmd/go/testdata/script/test_benchmark_fatal.txt
new file mode 100644
index 0000000000..1e20c4eb61
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_benchmark_fatal.txt
@@ -0,0 +1,15 @@
+# Test that calling t.Fatal in a benchmark causes a non-zero exit status.
+
+! go test -run '^$' -bench . benchfatal
+! stdout ^ok
+! stderr ^ok
+stdout FAIL.*benchfatal
+
+-- benchfatal/x_test.go --
+package benchfatal
+
+import "testing"
+
+func BenchmarkThatCallsFatal(b *testing.B) {
+ b.Fatal("called by benchmark")
+}
diff --git a/src/cmd/go/testdata/script/test_benchmark_labels.txt b/src/cmd/go/testdata/script/test_benchmark_labels.txt
new file mode 100644
index 0000000000..affab6b806
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_benchmark_labels.txt
@@ -0,0 +1,19 @@
+# Tests that go test -bench prints out goos, goarch, and pkg.
+
+# Check for goos, goarch, and pkg.
+go test -run ^$ -bench . bench
+stdout '^goos: '$GOOS
+stdout '^goarch: '$GOARCH
+stdout '^pkg: bench'
+
+# Check go test does not print pkg multiple times
+! stdout 'pkg:.*pkg: '
+! stderr 'pkg:.*pkg:'
+
+-- bench/x_test.go --
+package bench
+
+import "testing"
+
+func Benchmark(b *testing.B) {
+}
diff --git a/src/cmd/go/testdata/flag_test.go b/src/cmd/go/testdata/script/test_flag.txt
index ddf613d870..bbcad1c59c 100644
--- a/src/cmd/go/testdata/flag_test.go
+++ b/src/cmd/go/testdata/script/test_flag.txt
@@ -1,3 +1,9 @@
+[short] skip
+
+go test flag_test.go -v -args -v=7 # Two distinct -v flags
+go test -v flag_test.go -args -v=7 # Two distinct -v flags
+
+-- flag_test.go --
package flag_test
import (
diff --git a/src/cmd/go/testdata/script/test_import_error_stack.txt b/src/cmd/go/testdata/script/test_import_error_stack.txt
new file mode 100644
index 0000000000..3b796053f7
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_import_error_stack.txt
@@ -0,0 +1,17 @@
+! go test testdep/p1
+stderr 'package testdep/p1 \(test\)\n\timports testdep/p2\n\timports testdep/p3: build constraints exclude all Go files ' # check for full import stack
+
+-- testdep/p1/p1.go --
+package p1
+-- testdep/p1/p1_test.go --
+package p1
+
+import _ "testdep/p2"
+-- testdep/p2/p2.go --
+package p2
+
+import _ "testdep/p3"
+-- testdep/p3/p3.go --
+// +build ignore
+
+package ignored
diff --git a/src/cmd/go/testdata/src/multimain/multimain_test.go b/src/cmd/go/testdata/script/test_main_twice.txt
index 007a86a5da..1e68dabec0 100644
--- a/src/cmd/go/testdata/src/multimain/multimain_test.go
+++ b/src/cmd/go/testdata/script/test_main_twice.txt
@@ -1,3 +1,10 @@
+[short] skip
+
+env GOCACHE=$WORK/tmp
+go test -v multimain
+stdout -count=2 notwithstanding # check tests ran twice
+
+-- multimain/multimain_test.go --
package multimain_test
import "testing"
diff --git a/src/cmd/go/testdata/script/test_match_no_tests_build_failure.txt b/src/cmd/go/testdata/script/test_match_no_tests_build_failure.txt
new file mode 100644
index 0000000000..92cb690dcc
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_match_no_tests_build_failure.txt
@@ -0,0 +1,15 @@
+# Test that when there's a build failure and a -run flag that doesn't match,
+# that the error for not matching tests does not override the error for
+# the build failure.
+
+! go test -run ThisWillNotMatch syntaxerror
+! stderr '(?m)^ok.*\[no tests to run\]'
+stdout 'FAIL'
+
+-- syntaxerror/x.go --
+package p
+-- syntaxerror/x_test.go --
+package p
+
+func f() (x.y, z int) {
+}
diff --git a/src/cmd/go/testdata/script/test_minus_n.txt b/src/cmd/go/testdata/script/test_minus_n.txt
new file mode 100644
index 0000000000..9900dbca0b
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_minus_n.txt
@@ -0,0 +1,14 @@
+# The intent here is to verify that 'go test -n' works without crashing.
+# Any test will do.
+
+go test -n x_test.go
+
+-- x_test.go --
+package x_test
+
+import (
+ "testing"
+)
+
+func TestEmpty(t *testing.T) {
+}
diff --git a/src/cmd/go/testdata/script/test_no_tests.txt b/src/cmd/go/testdata/script/test_no_tests.txt
new file mode 100644
index 0000000000..d75bcff934
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_no_tests.txt
@@ -0,0 +1,11 @@
+# Tests issue #26242
+
+go test testnorun
+stdout 'testnorun\t\[no test files\]'
+
+-- testnorun/p.go --
+package p
+
+func init() {
+ panic("go test must not link and run test binaries without tests")
+}
diff --git a/src/cmd/go/testdata/src/testrace/race_test.go b/src/cmd/go/testdata/script/test_race.txt
index 7ec0c6d17a..5d15189e19 100644
--- a/src/cmd/go/testdata/src/testrace/race_test.go
+++ b/src/cmd/go/testdata/script/test_race.txt
@@ -1,3 +1,19 @@
+[short] skip
+[!race] skip
+
+go test testrace
+
+! go test -race testrace
+stdout 'FAIL: TestRace'
+! stdout 'PASS'
+! stderr 'PASS'
+
+! go test -race testrace -run XXX -bench .
+stdout 'FAIL: BenchmarkRace'
+! stdout 'PASS'
+! stderr 'PASS'
+
+-- testrace/race_test.go --
package testrace
import "testing"
diff --git a/src/cmd/go/testdata/script/test_race_cover_mode_issue20435.txt b/src/cmd/go/testdata/script/test_race_cover_mode_issue20435.txt
new file mode 100644
index 0000000000..bff9502ac7
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_race_cover_mode_issue20435.txt
@@ -0,0 +1,44 @@
+[short] skip
+[!race] skip
+
+# Make sure test is functional.
+go test testrace
+
+# Now, check that -race -covermode=set is not allowed.
+! go test -race -covermode=set testrace
+stderr '-covermode must be "atomic", not "set", when -race is enabled'
+! stdout PASS
+! stderr PASS
+
+-- testrace/race_test.go --
+package testrace
+
+import "testing"
+
+func TestRace(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ c := make(chan int)
+ x := 1
+ go func() {
+ x = 2
+ c <- 1
+ }()
+ x = 3
+ <-c
+ _ = x
+ }
+}
+
+func BenchmarkRace(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ c := make(chan int)
+ x := 1
+ go func() {
+ x = 2
+ c <- 1
+ }()
+ x = 3
+ <-c
+ _ = x
+ }
+}
diff --git a/src/cmd/go/testdata/script/test_regexps.txt b/src/cmd/go/testdata/script/test_regexps.txt
new file mode 100644
index 0000000000..39dedbf06f
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_regexps.txt
@@ -0,0 +1,75 @@
+go test -cpu=1 -run=X/Y -bench=X/Y -count=2 -v testregexp
+
+# Test the following:
+
+# TestX is run, twice
+stdout -count=2 '^=== RUN TestX$'
+stdout -count=2 '^ TestX: x_test.go:6: LOG: X running$'
+
+# TestX/Y is run, twice
+stdout -count=2 '^=== RUN TestX/Y$'
+stdout -count=2 '^ TestX/Y: x_test.go:8: LOG: Y running$'
+
+# TestXX is run, twice
+stdout -count=2 '^=== RUN TestXX$'
+stdout -count=2 '^ TestXX: z_test.go:10: LOG: XX running'
+
+# TestZ is not run
+! stdout '^=== RUN TestZ$'
+
+# BenchmarkX is run with N=1 once, only to discover what sub-benchmarks it has,
+# and should not print a final summary line.
+stdout -count=1 '^\s+BenchmarkX: x_test.go:13: LOG: X running N=1$'
+! stdout '^\s+BenchmarkX: x_test.go:13: LOG: X running N=\d\d+'
+! stdout 'BenchmarkX\s+\d+'
+
+# Same for BenchmarkXX.
+stdout -count=1 '^\s+BenchmarkXX: z_test.go:18: LOG: XX running N=1$'
+! stdout '^\s+BenchmarkXX: z_test.go:18: LOG: XX running N=\d\d+'
+! stdout 'BenchmarkXX\s+\d+'
+
+# BenchmarkX/Y is run in full twice due to -count=2.
+# "Run in full" means that it runs for approximately the default benchtime,
+# but may cap out at N=1e9.
+# We don't actually care what the final iteration count is, but it should be
+# a large number, and the last iteration count prints right before the results.
+stdout -count=2 '^\s+BenchmarkX/Y: x_test.go:15: LOG: Y running N=[1-9]\d{4,}\nBenchmarkX/Y\s+\d+'
+
+-- testregexp/x_test.go --
+package x
+
+import "testing"
+
+func TestX(t *testing.T) {
+ t.Logf("LOG: X running")
+ t.Run("Y", func(t *testing.T) {
+ t.Logf("LOG: Y running")
+ })
+}
+
+func BenchmarkX(b *testing.B) {
+ b.Logf("LOG: X running N=%d", b.N)
+ b.Run("Y", func(b *testing.B) {
+ b.Logf("LOG: Y running N=%d", b.N)
+ })
+}
+-- testregexp/z_test.go --
+package x
+
+import "testing"
+
+func TestZ(t *testing.T) {
+ t.Logf("LOG: Z running")
+}
+
+func TestXX(t *testing.T) {
+ t.Logf("LOG: XX running")
+}
+
+func BenchmarkZ(b *testing.B) {
+ b.Logf("LOG: Z running N=%d", b.N)
+}
+
+func BenchmarkXX(b *testing.B) {
+ b.Logf("LOG: XX running N=%d", b.N)
+}
diff --git a/src/cmd/go/testdata/script/test_syntax_error_says_fail.txt b/src/cmd/go/testdata/script/test_syntax_error_says_fail.txt
new file mode 100644
index 0000000000..29fa805b43
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_syntax_error_says_fail.txt
@@ -0,0 +1,14 @@
+# Test that the error message for a syntax error in a test go file
+# says FAIL.
+
+! go test syntaxerror
+stderr 'x_test.go:' # check that the error is diagnosed
+stdout 'FAIL' # check that go test says FAIL
+
+-- syntaxerror/x.go --
+package p
+-- syntaxerror/x_test.go --
+package p
+
+func f() (x.y, z int) {
+}
diff --git a/src/cmd/go/testdata/script/vet_flags.txt b/src/cmd/go/testdata/script/vet_flags.txt
index d84c8a6472..6aa1413fa4 100644
--- a/src/cmd/go/testdata/script/vet_flags.txt
+++ b/src/cmd/go/testdata/script/vet_flags.txt
@@ -1,8 +1,34 @@
-env GO111MODULE=off
+env GO111MODULE=on
-# Issue 35837. Verify that "go vet -<analyzer> <std package>" works if 'pwd' is not $GOROOT/src
-# we utilize the package runtime/testdata/testprog as the issue is specific to vetting standard package
-
-go vet -n -unreachable=false runtime/testdata/testprog
+# Regression test for issue 35837: "go vet -<analyzer> <std package>"
+# did not apply the requested analyzer.
+go vet -n -unreachable=false encoding/binary
stderr '-unreachable=false'
-stderr '-unsafeptr=false'
+! stderr '-unsafeptr=false'
+
+[short] stop
+env GOCACHE=$WORK/gocache
+env GOTMPDIR=$WORK/tmp
+go env GOTMPDIR
+stdout '/tmp'
+
+# "go test" on a user package should by default enable an explicit whitelist of analyzers.
+go test -x -run=none .
+stderr '[/\\]vet'$GOEXE'["]? .* -errorsas .* ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg'
+
+# "go test" on a standard package should by default disable an explicit blacklist.
+go test -x -run=none encoding/binary
+stderr '[/\\]vet'$GOEXE'["]? -unsafeptr=false ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg'
+
+# Both should allow users to override via the -vet flag.
+go test -x -vet=unreachable -run=none .
+stderr '[/\\]vet'$GOEXE'["]? -unreachable ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg'
+go test -x -vet=unreachable -run=none encoding/binary
+stderr '[/\\]vet'$GOEXE'["]? -unreachable ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg'
+
+-- go.mod --
+module example.com/x
+-- x.go --
+package x
+-- x_test.go --
+package x
diff --git a/src/cmd/go/testdata/src/badc/x.c b/src/cmd/go/testdata/src/badc/x.c
deleted file mode 100644
index f6cbf6924d..0000000000
--- a/src/cmd/go/testdata/src/badc/x.c
+++ /dev/null
@@ -1 +0,0 @@
-// C code!
diff --git a/src/cmd/go/testdata/src/badc/x.go b/src/cmd/go/testdata/src/badc/x.go
deleted file mode 100644
index bfa1de28bd..0000000000
--- a/src/cmd/go/testdata/src/badc/x.go
+++ /dev/null
@@ -1 +0,0 @@
-package badc
diff --git a/src/cmd/go/testdata/src/badpkg/x.go b/src/cmd/go/testdata/src/badpkg/x.go
deleted file mode 100644
index dda35e8ed3..0000000000
--- a/src/cmd/go/testdata/src/badpkg/x.go
+++ /dev/null
@@ -1 +0,0 @@
-pkg badpkg
diff --git a/src/cmd/go/testdata/src/bench/x_test.go b/src/cmd/go/testdata/src/bench/x_test.go
deleted file mode 100644
index 32cabf8a7b..0000000000
--- a/src/cmd/go/testdata/src/bench/x_test.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package bench
-
-import "testing"
-
-func Benchmark(b *testing.B) {
-}
diff --git a/src/cmd/go/testdata/src/benchfatal/x_test.go b/src/cmd/go/testdata/src/benchfatal/x_test.go
deleted file mode 100644
index 8d3a5deced..0000000000
--- a/src/cmd/go/testdata/src/benchfatal/x_test.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package benchfatal
-
-import "testing"
-
-func BenchmarkThatCallsFatal(b *testing.B) {
- b.Fatal("called by benchmark")
-}
diff --git a/src/cmd/go/testdata/src/cgoasm/p.go b/src/cmd/go/testdata/src/cgoasm/p.go
deleted file mode 100644
index 148b47f6a5..0000000000
--- a/src/cmd/go/testdata/src/cgoasm/p.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package p
-
-/*
-// hi
-*/
-import "C"
-
-func F() {}
diff --git a/src/cmd/go/testdata/src/cgoasm/p.s b/src/cmd/go/testdata/src/cgoasm/p.s
deleted file mode 100644
index aaade03a43..0000000000
--- a/src/cmd/go/testdata/src/cgoasm/p.s
+++ /dev/null
@@ -1,2 +0,0 @@
-TEXT asm(SB),$0
- RET
diff --git a/src/cmd/go/testdata/src/cgocover/p.go b/src/cmd/go/testdata/src/cgocover/p.go
deleted file mode 100644
index a6a3891cd4..0000000000
--- a/src/cmd/go/testdata/src/cgocover/p.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package p
-
-/*
-void
-f(void)
-{
-}
-*/
-import "C"
-
-var b bool
-
-func F() {
- if b {
- for {
- }
- }
- C.f()
-}
diff --git a/src/cmd/go/testdata/src/cgocover/p_test.go b/src/cmd/go/testdata/src/cgocover/p_test.go
deleted file mode 100644
index a8f057e358..0000000000
--- a/src/cmd/go/testdata/src/cgocover/p_test.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package p
-
-import "testing"
-
-func TestF(t *testing.T) {
- F()
-}
diff --git a/src/cmd/go/testdata/src/cgocover2/p.go b/src/cmd/go/testdata/src/cgocover2/p.go
deleted file mode 100644
index a6a3891cd4..0000000000
--- a/src/cmd/go/testdata/src/cgocover2/p.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package p
-
-/*
-void
-f(void)
-{
-}
-*/
-import "C"
-
-var b bool
-
-func F() {
- if b {
- for {
- }
- }
- C.f()
-}
diff --git a/src/cmd/go/testdata/src/cgocover2/x_test.go b/src/cmd/go/testdata/src/cgocover2/x_test.go
deleted file mode 100644
index f4790d2367..0000000000
--- a/src/cmd/go/testdata/src/cgocover2/x_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package p_test
-
-import (
- . "cgocover2"
- "testing"
-)
-
-func TestF(t *testing.T) {
- F()
-}
diff --git a/src/cmd/go/testdata/src/cgocover3/p.go b/src/cmd/go/testdata/src/cgocover3/p.go
deleted file mode 100644
index a6a3891cd4..0000000000
--- a/src/cmd/go/testdata/src/cgocover3/p.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package p
-
-/*
-void
-f(void)
-{
-}
-*/
-import "C"
-
-var b bool
-
-func F() {
- if b {
- for {
- }
- }
- C.f()
-}
diff --git a/src/cmd/go/testdata/src/cgocover3/p_test.go b/src/cmd/go/testdata/src/cgocover3/p_test.go
deleted file mode 100644
index c89cd18d0f..0000000000
--- a/src/cmd/go/testdata/src/cgocover3/p_test.go
+++ /dev/null
@@ -1 +0,0 @@
-package p
diff --git a/src/cmd/go/testdata/src/cgocover3/x_test.go b/src/cmd/go/testdata/src/cgocover3/x_test.go
deleted file mode 100644
index 97d0e0f098..0000000000
--- a/src/cmd/go/testdata/src/cgocover3/x_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package p_test
-
-import (
- . "cgocover3"
- "testing"
-)
-
-func TestF(t *testing.T) {
- F()
-}
diff --git a/src/cmd/go/testdata/src/cgocover4/notcgo.go b/src/cmd/go/testdata/src/cgocover4/notcgo.go
deleted file mode 100644
index c89cd18d0f..0000000000
--- a/src/cmd/go/testdata/src/cgocover4/notcgo.go
+++ /dev/null
@@ -1 +0,0 @@
-package p
diff --git a/src/cmd/go/testdata/src/cgocover4/p.go b/src/cmd/go/testdata/src/cgocover4/p.go
deleted file mode 100644
index a6a3891cd4..0000000000
--- a/src/cmd/go/testdata/src/cgocover4/p.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package p
-
-/*
-void
-f(void)
-{
-}
-*/
-import "C"
-
-var b bool
-
-func F() {
- if b {
- for {
- }
- }
- C.f()
-}
diff --git a/src/cmd/go/testdata/src/cgocover4/x_test.go b/src/cmd/go/testdata/src/cgocover4/x_test.go
deleted file mode 100644
index fd9bae743c..0000000000
--- a/src/cmd/go/testdata/src/cgocover4/x_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package p_test
-
-import (
- . "cgocover4"
- "testing"
-)
-
-func TestF(t *testing.T) {
- F()
-}
diff --git a/src/cmd/go/testdata/src/dupload/dupload.go b/src/cmd/go/testdata/src/dupload/dupload.go
deleted file mode 100644
index 2f078525b9..0000000000
--- a/src/cmd/go/testdata/src/dupload/dupload.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package main
-
-import (
- _ "dupload/p2"
- _ "p"
-)
-
-func main() {}
diff --git a/src/cmd/go/testdata/src/dupload/p/p.go b/src/cmd/go/testdata/src/dupload/p/p.go
deleted file mode 100644
index c89cd18d0f..0000000000
--- a/src/cmd/go/testdata/src/dupload/p/p.go
+++ /dev/null
@@ -1 +0,0 @@
-package p
diff --git a/src/cmd/go/testdata/src/dupload/p2/p2.go b/src/cmd/go/testdata/src/dupload/p2/p2.go
deleted file mode 100644
index 8a80979b4e..0000000000
--- a/src/cmd/go/testdata/src/dupload/p2/p2.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p2
-
-import _ "dupload/vendor/p"
diff --git a/src/cmd/go/testdata/src/dupload/vendor/p/p.go b/src/cmd/go/testdata/src/dupload/vendor/p/p.go
deleted file mode 100644
index c89cd18d0f..0000000000
--- a/src/cmd/go/testdata/src/dupload/vendor/p/p.go
+++ /dev/null
@@ -1 +0,0 @@
-package p
diff --git a/src/cmd/go/testdata/src/gencycle/gencycle.go b/src/cmd/go/testdata/src/gencycle/gencycle.go
deleted file mode 100644
index 600afd93e9..0000000000
--- a/src/cmd/go/testdata/src/gencycle/gencycle.go
+++ /dev/null
@@ -1,5 +0,0 @@
-//go:generate echo hello world
-
-package gencycle
-
-import _ "gencycle"
diff --git a/src/cmd/go/testdata/src/importmain/ismain/main.go b/src/cmd/go/testdata/src/importmain/ismain/main.go
deleted file mode 100644
index bf019076dd..0000000000
--- a/src/cmd/go/testdata/src/importmain/ismain/main.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package main
-
-import _ "importmain/test"
-
-func main() {}
diff --git a/src/cmd/go/testdata/src/importmain/test/test.go b/src/cmd/go/testdata/src/importmain/test/test.go
deleted file mode 100644
index 56e5404079..0000000000
--- a/src/cmd/go/testdata/src/importmain/test/test.go
+++ /dev/null
@@ -1 +0,0 @@
-package test
diff --git a/src/cmd/go/testdata/src/importmain/test/test_test.go b/src/cmd/go/testdata/src/importmain/test/test_test.go
deleted file mode 100644
index 2268a8267e..0000000000
--- a/src/cmd/go/testdata/src/importmain/test/test_test.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package test_test
-
-import "testing"
-import _ "importmain/ismain"
-
-func TestCase(t *testing.T) {}
diff --git a/src/cmd/go/testdata/src/not_main/not_main.go b/src/cmd/go/testdata/src/not_main/not_main.go
deleted file mode 100644
index 75a397c6cb..0000000000
--- a/src/cmd/go/testdata/src/not_main/not_main.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package not_main
-
-func F() {}
diff --git a/src/cmd/go/testdata/src/notest/hello.go b/src/cmd/go/testdata/src/notest/hello.go
deleted file mode 100644
index 7c42c32fb0..0000000000
--- a/src/cmd/go/testdata/src/notest/hello.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package notest
-
-func hello() {
- println("hello world")
-}
-Hello world
diff --git a/src/cmd/go/testdata/src/run/bad.go b/src/cmd/go/testdata/src/run/bad.go
deleted file mode 100644
index c1cc3ac6c8..0000000000
--- a/src/cmd/go/testdata/src/run/bad.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package main
-
-import _ "run/subdir/internal/private"
-
-func main() {}
diff --git a/src/cmd/go/testdata/src/run/good.go b/src/cmd/go/testdata/src/run/good.go
deleted file mode 100644
index 0b67dceeee..0000000000
--- a/src/cmd/go/testdata/src/run/good.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package main
-
-import _ "run/internal"
-
-func main() {}
diff --git a/src/cmd/go/testdata/src/run/internal/internal.go b/src/cmd/go/testdata/src/run/internal/internal.go
deleted file mode 100644
index 5bf0569ce8..0000000000
--- a/src/cmd/go/testdata/src/run/internal/internal.go
+++ /dev/null
@@ -1 +0,0 @@
-package internal
diff --git a/src/cmd/go/testdata/src/run/subdir/internal/private/private.go b/src/cmd/go/testdata/src/run/subdir/internal/private/private.go
deleted file mode 100644
index 735e4dc819..0000000000
--- a/src/cmd/go/testdata/src/run/subdir/internal/private/private.go
+++ /dev/null
@@ -1 +0,0 @@
-package private
diff --git a/src/cmd/go/testdata/src/sleepy1/p_test.go b/src/cmd/go/testdata/src/sleepy1/p_test.go
deleted file mode 100644
index 333be7d8e4..0000000000
--- a/src/cmd/go/testdata/src/sleepy1/p_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package p
-
-import (
- "testing"
- "time"
-)
-
-func Test1(t *testing.T) {
- time.Sleep(200 * time.Millisecond)
-}
diff --git a/src/cmd/go/testdata/src/sleepy2/p_test.go b/src/cmd/go/testdata/src/sleepy2/p_test.go
deleted file mode 100644
index 333be7d8e4..0000000000
--- a/src/cmd/go/testdata/src/sleepy2/p_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package p
-
-import (
- "testing"
- "time"
-)
-
-func Test1(t *testing.T) {
- time.Sleep(200 * time.Millisecond)
-}
diff --git a/src/cmd/go/testdata/src/sleepybad/p.go b/src/cmd/go/testdata/src/sleepybad/p.go
deleted file mode 100644
index e05b403e39..0000000000
--- a/src/cmd/go/testdata/src/sleepybad/p.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package p
-
-// missing import
-
-var _ = io.DoesNotExist
diff --git a/src/cmd/go/testdata/src/syntaxerror/x.go b/src/cmd/go/testdata/src/syntaxerror/x.go
deleted file mode 100644
index c89cd18d0f..0000000000
--- a/src/cmd/go/testdata/src/syntaxerror/x.go
+++ /dev/null
@@ -1 +0,0 @@
-package p
diff --git a/src/cmd/go/testdata/src/syntaxerror/x_test.go b/src/cmd/go/testdata/src/syntaxerror/x_test.go
deleted file mode 100644
index 2460743e50..0000000000
--- a/src/cmd/go/testdata/src/syntaxerror/x_test.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package p
-
-func f() (x.y, z int) {
-}
diff --git a/src/cmd/go/testdata/src/testcycle/p1/p1.go b/src/cmd/go/testdata/src/testcycle/p1/p1.go
deleted file mode 100644
index 65ab76d4e1..0000000000
--- a/src/cmd/go/testdata/src/testcycle/p1/p1.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package p1
-
-import _ "testcycle/p2"
-
-func init() {
- println("p1 init")
-}
diff --git a/src/cmd/go/testdata/src/testcycle/p1/p1_test.go b/src/cmd/go/testdata/src/testcycle/p1/p1_test.go
deleted file mode 100644
index 75abb13e6d..0000000000
--- a/src/cmd/go/testdata/src/testcycle/p1/p1_test.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package p1
-
-import "testing"
-
-func Test(t *testing.T) {
-}
diff --git a/src/cmd/go/testdata/src/testcycle/p2/p2.go b/src/cmd/go/testdata/src/testcycle/p2/p2.go
deleted file mode 100644
index 7e26cdf19c..0000000000
--- a/src/cmd/go/testdata/src/testcycle/p2/p2.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package p2
-
-import _ "testcycle/p3"
-
-func init() {
- println("p2 init")
-}
diff --git a/src/cmd/go/testdata/src/testcycle/p3/p3.go b/src/cmd/go/testdata/src/testcycle/p3/p3.go
deleted file mode 100644
index bb0a2f4f65..0000000000
--- a/src/cmd/go/testdata/src/testcycle/p3/p3.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package p3
-
-func init() {
- println("p3 init")
-}
diff --git a/src/cmd/go/testdata/src/testcycle/p3/p3_test.go b/src/cmd/go/testdata/src/testcycle/p3/p3_test.go
deleted file mode 100644
index 9b4b0757f8..0000000000
--- a/src/cmd/go/testdata/src/testcycle/p3/p3_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package p3
-
-import (
- "testing"
-
- _ "testcycle/p1"
-)
-
-func Test(t *testing.T) {
-}
diff --git a/src/cmd/go/testdata/src/testcycle/q1/q1.go b/src/cmd/go/testdata/src/testcycle/q1/q1.go
deleted file mode 100644
index 7a471f0cc0..0000000000
--- a/src/cmd/go/testdata/src/testcycle/q1/q1.go
+++ /dev/null
@@ -1 +0,0 @@
-package q1
diff --git a/src/cmd/go/testdata/src/testcycle/q1/q1_test.go b/src/cmd/go/testdata/src/testcycle/q1/q1_test.go
deleted file mode 100644
index ca81bd2bf8..0000000000
--- a/src/cmd/go/testdata/src/testcycle/q1/q1_test.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package q1
-
-import "testing"
-import _ "testcycle/q1"
-
-func Test(t *testing.T) {}
diff --git a/src/cmd/go/testdata/src/testdep/p1/p1.go b/src/cmd/go/testdata/src/testdep/p1/p1.go
deleted file mode 100644
index a457035a43..0000000000
--- a/src/cmd/go/testdata/src/testdep/p1/p1.go
+++ /dev/null
@@ -1 +0,0 @@
-package p1
diff --git a/src/cmd/go/testdata/src/testdep/p1/p1_test.go b/src/cmd/go/testdata/src/testdep/p1/p1_test.go
deleted file mode 100644
index 8be7533442..0000000000
--- a/src/cmd/go/testdata/src/testdep/p1/p1_test.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p1
-
-import _ "testdep/p2"
diff --git a/src/cmd/go/testdata/src/testdep/p2/p2.go b/src/cmd/go/testdata/src/testdep/p2/p2.go
deleted file mode 100644
index 15ba2eacea..0000000000
--- a/src/cmd/go/testdata/src/testdep/p2/p2.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package p2
-
-import _ "testdep/p3"
diff --git a/src/cmd/go/testdata/src/testdep/p3/p3.go b/src/cmd/go/testdata/src/testdep/p3/p3.go
deleted file mode 100644
index 0219e7fae5..0000000000
--- a/src/cmd/go/testdata/src/testdep/p3/p3.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// +build ignore
-
-package ignored
diff --git a/src/cmd/go/testdata/src/testnorun/p.go b/src/cmd/go/testdata/src/testnorun/p.go
deleted file mode 100644
index 71a9a561ef..0000000000
--- a/src/cmd/go/testdata/src/testnorun/p.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package p
-
-func init() {
- panic("go test must not link and run test binaries without tests")
-}
diff --git a/src/cmd/go/testdata/src/testregexp/x_test.go b/src/cmd/go/testdata/src/testregexp/x_test.go
deleted file mode 100644
index 7573e79e16..0000000000
--- a/src/cmd/go/testdata/src/testregexp/x_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package x
-
-import "testing"
-
-func TestX(t *testing.T) {
- t.Logf("LOG: X running")
- t.Run("Y", func(t *testing.T) {
- t.Logf("LOG: Y running")
- })
-}
-
-func BenchmarkX(b *testing.B) {
- b.Logf("LOG: X running N=%d", b.N)
- b.Run("Y", func(b *testing.B) {
- b.Logf("LOG: Y running N=%d", b.N)
- })
-}
diff --git a/src/cmd/go/testdata/src/testregexp/z_test.go b/src/cmd/go/testdata/src/testregexp/z_test.go
deleted file mode 100644
index 4fd1979154..0000000000
--- a/src/cmd/go/testdata/src/testregexp/z_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package x
-
-import "testing"
-
-func TestZ(t *testing.T) {
- t.Logf("LOG: Z running")
-}
-
-func TestXX(t *testing.T) {
- t.Logf("LOG: XX running")
-}
-
-func BenchmarkZ(b *testing.B) {
- b.Logf("LOG: Z running N=%d", b.N)
-}
-
-func BenchmarkXX(b *testing.B) {
- b.Logf("LOG: XX running N=%d", b.N)
-}
diff --git a/src/cmd/internal/obj/riscv/asm_test.go b/src/cmd/internal/obj/riscv/asm_test.go
new file mode 100644
index 0000000000..849a87b706
--- /dev/null
+++ b/src/cmd/internal/obj/riscv/asm_test.go
@@ -0,0 +1,133 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv
+
+import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+// TestLarge generates a very large file to verify that large
+// program builds successfully, in particular, too-far
+// conditional branches are fixed.
+func TestLarge(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skip in short mode")
+ }
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := ioutil.TempDir("", "testlarge")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // Generate a very large function.
+ buf := bytes.NewBuffer(make([]byte, 0, 7000000))
+ gen(buf)
+
+ tmpfile := filepath.Join(dir, "x.s")
+ err = ioutil.WriteFile(tmpfile, buf.Bytes(), 0644)
+ if err != nil {
+ t.Fatalf("can't write output: %v\n", err)
+ }
+
+ // Build generated file.
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), tmpfile)
+ cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("Build failed: %v, output: %s", err, out)
+ }
+}
+
+// gen generates a very large program, with a very far conditional branch.
+func gen(buf *bytes.Buffer) {
+ fmt.Fprintln(buf, "TEXT f(SB),0,$0-0")
+ fmt.Fprintln(buf, "BEQ X0, X0, label")
+ for i := 0; i < 1<<19; i++ {
+ fmt.Fprintln(buf, "ADD $0, X0, X0")
+ }
+ fmt.Fprintln(buf, "label:")
+ fmt.Fprintln(buf, "ADD $0, X0, X0")
+}
+
+// Issue 20348.
+func TestNoRet(t *testing.T) {
+ dir, err := ioutil.TempDir("", "testnoret")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ tmpfile := filepath.Join(dir, "x.s")
+ if err := ioutil.WriteFile(tmpfile, []byte("TEXT ·stub(SB),$0-0\nNOP\n"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), tmpfile)
+ cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux")
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Errorf("%v\n%s", err, out)
+ }
+}
+
+func TestImmediateSplitting(t *testing.T) {
+ dir, err := ioutil.TempDir("", "testimmsplit")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ tmpfile := filepath.Join(dir, "x.s")
+ asm := `
+TEXT _stub(SB),$0-0
+ LB 4096(X5), X6
+ LH 4096(X5), X6
+ LW 4096(X5), X6
+ LD 4096(X5), X6
+ LBU 4096(X5), X6
+ LHU 4096(X5), X6
+ LWU 4096(X5), X6
+ SB X6, 4096(X5)
+ SH X6, 4096(X5)
+ SW X6, 4096(X5)
+ SD X6, 4096(X5)
+
+ FLW 4096(X5), F6
+ FLD 4096(X5), F6
+ FSW F6, 4096(X5)
+ FSD F6, 4096(X5)
+
+ MOVB 4096(X5), X6
+ MOVH 4096(X5), X6
+ MOVW 4096(X5), X6
+ MOV 4096(X5), X6
+ MOVBU 4096(X5), X6
+ MOVHU 4096(X5), X6
+ MOVWU 4096(X5), X6
+
+ MOVB X6, 4096(X5)
+ MOVH X6, 4096(X5)
+ MOVW X6, 4096(X5)
+ MOV X6, 4096(X5)
+
+ MOVF 4096(X5), F6
+ MOVD 4096(X5), F6
+ MOVF F6, 4096(X5)
+ MOVD F6, 4096(X5)
+`
+ if err := ioutil.WriteFile(tmpfile, []byte(asm), 0644); err != nil {
+ t.Fatal(err)
+ }
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), tmpfile)
+ cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux")
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Errorf("%v\n%s", err, out)
+ }
+}
diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go
index 0f33716676..61a68b91c2 100644
--- a/src/cmd/internal/obj/riscv/cpu.go
+++ b/src/cmd/internal/obj/riscv/cpu.go
@@ -141,6 +141,7 @@ const (
// Go runtime register names.
REG_G = REG_TP // G pointer.
REG_CTXT = REG_S4 // Context for closures.
+ REG_LR = REG_RA // Link register.
REG_TMP = REG_T6 // Reserved for assembler use.
// ABI names for floating point registers.
@@ -182,6 +183,77 @@ const (
REGG = REG_G
)
+// https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#dwarf-register-numbers
+var RISCV64DWARFRegisters = map[int16]int16{
+ // Integer Registers.
+ REG_X0: 0,
+ REG_X1: 1,
+ REG_X2: 2,
+ REG_X3: 3,
+ REG_X4: 4,
+ REG_X5: 5,
+ REG_X6: 6,
+ REG_X7: 7,
+ REG_X8: 8,
+ REG_X9: 9,
+ REG_X10: 10,
+ REG_X11: 11,
+ REG_X12: 12,
+ REG_X13: 13,
+ REG_X14: 14,
+ REG_X15: 15,
+ REG_X16: 16,
+ REG_X17: 17,
+ REG_X18: 18,
+ REG_X19: 19,
+ REG_X20: 20,
+ REG_X21: 21,
+ REG_X22: 22,
+ REG_X23: 23,
+ REG_X24: 24,
+ REG_X25: 25,
+ REG_X26: 26,
+ REG_X27: 27,
+ REG_X28: 28,
+ REG_X29: 29,
+ REG_X30: 30,
+ REG_X31: 31,
+
+ // Floating-Point Registers.
+ REG_F0: 32,
+ REG_F1: 33,
+ REG_F2: 34,
+ REG_F3: 35,
+ REG_F4: 36,
+ REG_F5: 37,
+ REG_F6: 38,
+ REG_F7: 39,
+ REG_F8: 40,
+ REG_F9: 41,
+ REG_F10: 42,
+ REG_F11: 43,
+ REG_F12: 44,
+ REG_F13: 45,
+ REG_F14: 46,
+ REG_F15: 47,
+ REG_F16: 48,
+ REG_F17: 49,
+ REG_F18: 50,
+ REG_F19: 51,
+ REG_F20: 52,
+ REG_F21: 53,
+ REG_F22: 54,
+ REG_F23: 55,
+ REG_F24: 56,
+ REG_F25: 57,
+ REG_F26: 58,
+ REG_F27: 59,
+ REG_F28: 60,
+ REG_F29: 61,
+ REG_F30: 62,
+ REG_F31: 63,
+}
+
// Prog.Mark flags.
const (
// NEED_PCREL_ITYPE_RELOC is set on AUIPC instructions to indicate that
diff --git a/src/cmd/internal/obj/riscv/list.go b/src/cmd/internal/obj/riscv/list.go
index f5f7ef21e4..de90961e32 100644
--- a/src/cmd/internal/obj/riscv/list.go
+++ b/src/cmd/internal/obj/riscv/list.go
@@ -11,11 +11,11 @@ import (
)
func init() {
- obj.RegisterRegister(obj.RBaseRISCV, REG_END, regName)
+ obj.RegisterRegister(obj.RBaseRISCV, REG_END, RegName)
obj.RegisterOpcode(obj.ABaseRISCV, Anames)
}
-func regName(r int) string {
+func RegName(r int) string {
switch {
case r == 0:
return "NONE"
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 0325b4d40f..1d2c498110 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -22,15 +22,52 @@ package riscv
import (
"cmd/internal/obj"
+ "cmd/internal/objabi"
"cmd/internal/sys"
"fmt"
)
-// TODO(jsing): Populate.
-var RISCV64DWARFRegisters = map[int16]int16{}
-
func buildop(ctxt *obj.Link) {}
+// jalrToSym replaces p with a set of Progs needed to jump to the Sym in p.
+// lr is the link register to use for the JALR.
+// p must be a CALL, JMP or RET.
+func jalrToSym(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, lr int16) *obj.Prog {
+ if p.As != obj.ACALL && p.As != obj.AJMP && p.As != obj.ARET {
+ ctxt.Diag("unexpected Prog in jalrToSym: %v", p)
+ return p
+ }
+
+ // TODO(jsing): Consider using a single JAL instruction and teaching
+ // the linker to provide trampolines for the case where the destination
+ // offset is too large. This would potentially reduce instructions for
+ // the common case, but would require three instructions to go via the
+ // trampoline.
+
+ to := p.To
+
+ // This offset isn't really encoded with either instruction. It will be
+ // extracted for a relocation later.
+ p.As = AAUIPC
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: to.Offset, Sym: to.Sym}
+ p.Reg = 0
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
+ p.Mark |= NEED_PCREL_ITYPE_RELOC
+ p = obj.Appendp(p, newprog)
+
+ // Leave Sym only for the CALL reloc in assemble.
+ p.As = AJALR
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = lr
+ p.From.Sym = to.Sym
+ p.Reg = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_TMP
+ lowerJALR(p)
+
+ return p
+}
+
// lowerJALR normalizes a JALR instruction.
func lowerJALR(p *obj.Prog) {
if p.As != AJALR {
@@ -54,7 +91,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
if p.Reg == 0 {
switch p.As {
case AADDI, ASLTI, ASLTIU, AANDI, AORI, AXORI, ASLLI, ASRLI, ASRAI,
- AADD, AAND, AOR, AXOR, ASLL, ASRL, ASUB, ASRA:
+ AADD, AAND, AOR, AXOR, ASLL, ASRL, ASUB, ASRA,
+ AMUL, AMULH, AMULHU, AMULHSU, AMULW, ADIV, ADIVU, ADIVW, ADIVUW,
+ AREM, AREMU, AREMW, AREMUW:
p.Reg = p.To.Reg
}
}
@@ -106,6 +145,42 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
p.Ctxt.Diag("%v\tmemory required for destination", p)
}
+ case obj.AJMP:
+ // Turn JMP into JAL ZERO or JALR ZERO.
+ // p.From is actually an _output_ for this instruction.
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_ZERO
+
+ switch p.To.Type {
+ case obj.TYPE_BRANCH:
+ p.As = AJAL
+ case obj.TYPE_MEM:
+ switch p.To.Name {
+ case obj.NAME_NONE:
+ p.As = AJALR
+ lowerJALR(p)
+ case obj.NAME_EXTERN:
+ // Handled in preprocess.
+ default:
+ ctxt.Diag("progedit: unsupported name %d for %v", p.To.Name, p)
+ }
+ default:
+ panic(fmt.Sprintf("unhandled type %+v", p.To.Type))
+ }
+
+ case obj.ACALL:
+ switch p.To.Type {
+ case obj.TYPE_MEM:
+ // Handled in preprocess.
+ case obj.TYPE_REG:
+ p.As = AJALR
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ lowerJALR(p)
+ default:
+ ctxt.Diag("unknown destination type %+v in CALL: %v", p.To.Type, p)
+ }
+
case AJALR:
lowerJALR(p)
@@ -145,6 +220,27 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
case AFCVTWS, AFCVTLS, AFCVTWUS, AFCVTLUS, AFCVTWD, AFCVTLD, AFCVTWUD, AFCVTLUD:
// Set the rounding mode in funct3 to round to zero.
p.Scond = 1
+
+ case ASEQZ:
+ // SEQZ rs, rd -> SLTIU $1, rs, rd
+ p.As = ASLTIU
+ p.Reg = p.From.Reg
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 1}
+
+ case ASNEZ:
+ // SNEZ rs, rd -> SLTU rs, x0, rd
+ p.As = ASLTU
+ p.Reg = REG_ZERO
+
+ case AFNEGS:
+ // FNEGS rs, rd -> FSGNJNS rs, rs, rd
+ p.As = AFSGNJNS
+ p.Reg = p.From.Reg
+
+ case AFNEGD:
+ // FNEGD rs, rd -> FSGNJND rs, rs, rd
+ p.As = AFSGNJND
+ p.Reg = p.From.Reg
}
}
@@ -390,6 +486,44 @@ func rewriteMOV(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog) {
}
}
+// InvertBranch inverts the condition of a conditional branch.
+func InvertBranch(i obj.As) obj.As {
+ switch i {
+ case ABEQ:
+ return ABNE
+ case ABNE:
+ return ABEQ
+ case ABLT:
+ return ABGE
+ case ABGE:
+ return ABLT
+ case ABLTU:
+ return ABGEU
+ case ABGEU:
+ return ABLTU
+ default:
+ panic("InvertBranch: not a branch")
+ }
+}
+
+// containsCall reports whether the symbol contains a CALL (or equivalent)
+// instruction. Must be called after progedit.
+func containsCall(sym *obj.LSym) bool {
+ // CALLs are CALL or JAL(R) with link register LR.
+ for p := sym.Func.Text; p != nil; p = p.Link {
+ switch p.As {
+ case obj.ACALL:
+ return true
+ case AJAL, AJALR:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_LR {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
// setPCs sets the Pc field in all instructions reachable from p.
// It uses pc as the initial value.
func setPCs(p *obj.Prog, pc int64) {
@@ -437,11 +571,20 @@ func stackOffset(a *obj.Addr, stacksize int64) {
}
}
+// preprocess generates prologue and epilogue code, computes PC-relative branch
+// and jump offsets, and resolves pseudo-registers.
+//
+// preprocess is called once per linker symbol.
+//
+// When preprocess finishes, all instructions in the symbol are either
+// concrete, real RISC-V instructions or directive pseudo-ops like TEXT,
+// PCDATA, and FUNCDATA.
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if cursym.Func.Text == nil || cursym.Func.Text.Link == nil {
return
}
+ // Generate the prologue.
text := cursym.Func.Text
if text.As != obj.ATEXT {
ctxt.Diag("preprocess: found symbol that does not start with TEXT directive")
@@ -463,10 +606,126 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
+ if !containsCall(cursym) {
+ text.From.Sym.Set(obj.AttrLeaf, true)
+ if stacksize == 0 {
+ // A leaf function with no locals has no frame.
+ text.From.Sym.Set(obj.AttrNoFrame, true)
+ }
+ }
+
+ // Save LR unless there is no frame.
+ if !text.From.Sym.NoFrame() {
+ stacksize += ctxt.FixedFrameSize()
+ }
+
cursym.Func.Args = text.To.Val.(int32)
cursym.Func.Locals = int32(stacksize)
- // TODO(jsing): Implement.
+ prologue := text
+
+ if !cursym.Func.Text.From.Sym.NoSplit() {
+ prologue = stacksplit(ctxt, prologue, cursym, newprog, stacksize) // emit split check
+ }
+
+ if stacksize != 0 {
+ prologue = ctxt.StartUnsafePoint(prologue, newprog)
+
+ // Actually save LR.
+ prologue = obj.Appendp(prologue, newprog)
+ prologue.As = AMOV
+ prologue.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
+ prologue.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: -stacksize}
+
+ // Insert stack adjustment.
+ prologue = obj.Appendp(prologue, newprog)
+ prologue.As = AADDI
+ prologue.From = obj.Addr{Type: obj.TYPE_CONST, Offset: -stacksize}
+ prologue.Reg = REG_SP
+ prologue.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP}
+ prologue.Spadj = int32(stacksize)
+
+ prologue = ctxt.EndUnsafePoint(prologue, newprog, -1)
+ }
+
+ if cursym.Func.Text.From.Sym.Wrapper() {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOV g_panic(g), X11
+ // BNE X11, ZERO, adjust
+ // end:
+ // NOP
+ // ...rest of function..
+ // adjust:
+ // MOV panic_argp(X11), X12
+ // ADD $(autosize+FIXED_FRAME), SP, X13
+ // BNE X12, X13, end
+ // ADD $FIXED_FRAME, SP, X12
+ // MOV X12, panic_argp(X11)
+ // JMP end
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+
+ ldpanic := obj.Appendp(prologue, newprog)
+
+ ldpanic.As = AMOV
+ ldpanic.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REGG, Offset: 4 * int64(ctxt.Arch.PtrSize)} // G.panic
+ ldpanic.Reg = 0
+ ldpanic.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X11}
+
+ bneadj := obj.Appendp(ldpanic, newprog)
+ bneadj.As = ABNE
+ bneadj.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X11}
+ bneadj.Reg = REG_ZERO
+ bneadj.To.Type = obj.TYPE_BRANCH
+
+ endadj := obj.Appendp(bneadj, newprog)
+ endadj.As = obj.ANOP
+
+ last := endadj
+ for last.Link != nil {
+ last = last.Link
+ }
+
+ getargp := obj.Appendp(last, newprog)
+ getargp.As = AMOV
+ getargp.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X11, Offset: 0} // Panic.argp
+ getargp.Reg = 0
+ getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12}
+
+ bneadj.Pcond = getargp
+
+ calcargp := obj.Appendp(getargp, newprog)
+ calcargp.As = AADDI
+ calcargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize + ctxt.FixedFrameSize()}
+ calcargp.Reg = REG_SP
+ calcargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X13}
+
+ testargp := obj.Appendp(calcargp, newprog)
+ testargp.As = ABNE
+ testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12}
+ testargp.Reg = REG_X13
+ testargp.To.Type = obj.TYPE_BRANCH
+ testargp.Pcond = endadj
+
+ adjargp := obj.Appendp(testargp, newprog)
+ adjargp.As = AADDI
+ adjargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(ctxt.Arch.PtrSize)}
+ adjargp.Reg = REG_SP
+ adjargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12}
+
+ setargp := obj.Appendp(adjargp, newprog)
+ setargp.As = AMOV
+ setargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X12}
+ setargp.Reg = 0
+ setargp.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X11, Offset: 0} // Panic.argp
+
+ godone := obj.Appendp(setargp, newprog)
+ godone.As = AJAL
+ godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
+ godone.To.Type = obj.TYPE_BRANCH
+ godone.Pcond = endadj
+ }
// Update stack-based offsets.
for p := cursym.Func.Text; p != nil; p = p.Link {
@@ -474,14 +733,117 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
stackOffset(&p.To, stacksize)
}
- // Additional instruction rewriting. Any rewrites that change the number
- // of instructions must occur here (before jump target resolution).
+ // Additional instruction rewriting.
+ for p := cursym.Func.Text; p != nil; p = p.Link {
+ switch p.As {
+ case obj.AGETCALLERPC:
+ if cursym.Leaf() {
+ // MOV LR, Rd
+ p.As = AMOV
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ } else {
+ // MOV (RSP), Rd
+ p.As = AMOV
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ }
+
+ case obj.ACALL:
+ switch p.To.Type {
+ case obj.TYPE_MEM:
+ jalrToSym(ctxt, p, newprog, REG_LR)
+ }
+
+ case obj.AJMP:
+ switch p.To.Type {
+ case obj.TYPE_MEM:
+ switch p.To.Name {
+ case obj.NAME_EXTERN:
+ // JMP to symbol.
+ jalrToSym(ctxt, p, newprog, REG_ZERO)
+ }
+ }
+
+ case obj.ARET:
+ // Replace RET with epilogue.
+ retJMP := p.To.Sym
+
+ if stacksize != 0 {
+ // Restore LR.
+ p.As = AMOV
+ p.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 0}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_LR}
+ p = obj.Appendp(p, newprog)
+
+ p.As = AADDI
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize}
+ p.Reg = REG_SP
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_SP}
+ p.Spadj = int32(-stacksize)
+ p = obj.Appendp(p, newprog)
+ }
+
+ if retJMP != nil {
+ p.As = obj.ARET
+ p.To.Sym = retJMP
+ p = jalrToSym(ctxt, p, newprog, REG_ZERO)
+ } else {
+ p.As = AJALR
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = REG_LR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_ZERO
+ }
+
+ // "Add back" the stack removed in the previous instruction.
+ //
+ // This is to avoid confusing pctospadj, which sums
+ // Spadj from function entry to each PC, and shouldn't
+ // count adjustments from earlier epilogues, since they
+ // won't affect later PCs.
+ p.Spadj = int32(stacksize)
+
+ // Replace FNE[SD] with FEQ[SD] and NOT.
+ case AFNES:
+ if p.To.Type != obj.TYPE_REG {
+ ctxt.Diag("progedit: FNES needs an integer register output")
+ }
+ dst := p.To.Reg
+ p.As = AFEQS
+ p = obj.Appendp(p, newprog)
+
+ p.As = AXORI // [bit] xor 1 = not [bit]
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.Reg = dst
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dst
+
+ case AFNED:
+ if p.To.Type != obj.TYPE_REG {
+ ctxt.Diag("progedit: FNED needs an integer register output")
+ }
+ dst := p.To.Reg
+ p.As = AFEQD
+ p = obj.Appendp(p, newprog)
+
+ p.As = AXORI // [bit] xor 1 = not [bit]
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.Reg = dst
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dst
+ }
+ }
+
+ // Rewrite MOV pseudo-instructions. This cannot be done in
+ // progedit, as SP offsets need to be applied before we split
+ // up some of the Addrs.
for p := cursym.Func.Text; p != nil; p = p.Link {
switch p.As {
case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
- // Rewrite MOV pseudo-instructions. This cannot be done in
- // progedit, as SP offsets need to be applied before we split
- // up some of the Addrs.
rewriteMOV(ctxt, newprog, p)
}
}
@@ -535,11 +897,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// <load> $imm, REG, TO (load $imm+(REG), TO)
// <store> $imm, REG, TO (store $imm+(TO), REG)
- case ALD, ALB, ALH, ALW, ALBU, ALHU, ALWU,
- ASD, ASB, ASH, ASW:
- // LUI $high, TMP
- // ADDI $low, TMP, TMP
- q := *p
+ case ALB, ALH, ALW, ALD, ALBU, ALHU, ALWU, AFLW, AFLD, ASB, ASH, ASW, ASD, AFSW, AFSD:
low, high, err := Split32BitImmediate(p.From.Offset)
if err != nil {
ctxt.Diag("%v: constant %d too large", p, p.From.Offset)
@@ -548,8 +906,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
break // no need to split
}
+ q := *p
switch q.As {
- case ALD, ALB, ALH, ALW, ALBU, ALHU, ALWU:
+ case ALB, ALH, ALW, ALD, ALBU, ALHU, ALWU, AFLW, AFLD:
// LUI $high, TMP
// ADD TMP, REG, TMP
// <load> $low, TMP, TO
@@ -571,7 +930,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: low}
p.Reg = REG_TMP
- case ASD, ASB, ASH, ASW:
+ case ASB, ASH, ASW, ASD, AFSW, AFSD:
// LUI $high, TMP
// ADD TMP, TO, TMP
// <store> $low, REG, TMP
@@ -596,9 +955,71 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
- setPCs(cursym.Func.Text, 0)
+ // Compute instruction addresses. Once we do that, we need to check for
+ // overextended jumps and branches. Within each iteration, Pc differences
+ // are always lower bounds (since the program gets monotonically longer,
+ // a fixed point will be reached). No attempt to handle functions > 2GiB.
+ for {
+ rescan := false
+ setPCs(cursym.Func.Text, 0)
+
+ for p := cursym.Func.Text; p != nil; p = p.Link {
+ switch p.As {
+ case ABEQ, ABNE, ABLT, ABGE, ABLTU, ABGEU:
+ if p.To.Type != obj.TYPE_BRANCH {
+ panic("assemble: instruction with branch-like opcode lacks destination")
+ }
+ offset := p.Pcond.Pc - p.Pc
+ if offset < -4096 || 4096 <= offset {
+ // Branch is long. Replace it with a jump.
+ jmp := obj.Appendp(p, newprog)
+ jmp.As = AJAL
+ jmp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
+ jmp.To = obj.Addr{Type: obj.TYPE_BRANCH}
+ jmp.Pcond = p.Pcond
+
+ p.As = InvertBranch(p.As)
+ p.Pcond = jmp.Link
+
+ // We may have made previous branches too long,
+ // so recheck them.
+ rescan = true
+ }
+ case AJAL:
+ if p.Pcond == nil {
+ panic("intersymbol jumps should be expressed as AUIPC+JALR")
+ }
+ offset := p.Pcond.Pc - p.Pc
+ if offset < -(1<<20) || (1<<20) <= offset {
+ // Replace with 2-instruction sequence. This assumes
+ // that TMP is not live across J instructions, since
+ // it is reserved by SSA.
+ jmp := obj.Appendp(p, newprog)
+ jmp.As = AJALR
+ jmp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0}
+ jmp.To = p.From
+ jmp.Reg = REG_TMP
+
+ // p.From is not generally valid, however will be
+ // fixed up in the next loop.
+ p.As = AAUIPC
+ p.From = obj.Addr{Type: obj.TYPE_BRANCH, Sym: p.From.Sym}
+ p.Reg = 0
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
+
+ rescan = true
+ }
+ }
+ }
+
+ if !rescan {
+ break
+ }
+ }
- // Resolve branch and jump targets.
+ // Now that there are no long branches, resolve branch and jump targets.
+ // At this point, instruction rewriting which changes the number of
+ // instructions will break everything--don't do it!
for p := cursym.Func.Text; p != nil; p = p.Link {
switch p.As {
case AJAL, ABEQ, ABNE, ABLT, ABLTU, ABGE, ABGEU:
@@ -608,6 +1029,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
case obj.TYPE_MEM:
panic("unhandled type")
}
+
+ case AAUIPC:
+ if p.From.Type == obj.TYPE_BRANCH {
+ low, high, err := Split32BitImmediate(p.Pcond.Pc - p.Pc)
+ if err != nil {
+ ctxt.Diag("%v: jump displacement %d too large", p, p.Pcond.Pc-p.Pc)
+ }
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym}
+ p.Link.From.Offset = low
+ }
}
}
@@ -617,6 +1048,152 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
+func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgAlloc, framesize int64) *obj.Prog {
+ // Leaf function with no frame is effectively NOSPLIT.
+ if framesize == 0 {
+ return p
+ }
+
+ // MOV g_stackguard(g), X10
+ p = obj.Appendp(p, newprog)
+ p.As = AMOV
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
+ if cursym.CFunc() {
+ p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_X10
+
+ var to_done, to_more *obj.Prog
+
+ if framesize <= objabi.StackSmall {
+ // small stack: SP < stackguard
+ // BLTU SP, stackguard, done
+ p = obj.Appendp(p, newprog)
+ p.As = ABLTU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_X10
+ p.Reg = REG_SP
+ p.To.Type = obj.TYPE_BRANCH
+ to_done = p
+ } else if framesize <= objabi.StackBig {
+ // large stack: SP-framesize < stackguard-StackSmall
+ // ADD $-(framesize-StackSmall), SP, X11
+ // BLTU X11, stackguard, done
+ p = obj.Appendp(p, newprog)
+ // TODO(sorear): logic inconsistent with comment, but both match all non-x86 arches
+ p.As = AADDI
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -(int64(framesize) - objabi.StackSmall)
+ p.Reg = REG_SP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_X11
+
+ p = obj.Appendp(p, newprog)
+ p.As = ABLTU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_X10
+ p.Reg = REG_X11
+ p.To.Type = obj.TYPE_BRANCH
+ to_done = p
+ } else {
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // // stackguard is X10
+ // MOV $StackPreempt, X11
+ // BEQ X10, X11, more
+ // ADD $StackGuard, SP, X11
+ // SUB X10, X11
+ // MOV $(framesize+(StackGuard-StackSmall)), X10
+ // BGTU X11, X10, done
+ p = obj.Appendp(p, newprog)
+ p.As = AMOV
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = objabi.StackPreempt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_X11
+
+ p = obj.Appendp(p, newprog)
+ to_more = p
+ p.As = ABEQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_X10
+ p.Reg = REG_X11
+ p.To.Type = obj.TYPE_BRANCH
+
+ p = obj.Appendp(p, newprog)
+ p.As = AADDI
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(objabi.StackGuard)
+ p.Reg = REG_SP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_X11
+
+ p = obj.Appendp(p, newprog)
+ p.As = ASUB
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_X10
+ p.Reg = REG_X11
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_X11
+
+ p = obj.Appendp(p, newprog)
+ p.As = AMOV
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_X10
+
+ p = obj.Appendp(p, newprog)
+ p.As = ABLTU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_X10
+ p.Reg = REG_X11
+ p.To.Type = obj.TYPE_BRANCH
+ to_done = p
+ }
+
+ p = ctxt.EmitEntryLiveness(cursym, p, newprog)
+
+ // CALL runtime.morestack(SB)
+ p = obj.Appendp(p, newprog)
+ p.As = obj.ACALL
+ p.To.Type = obj.TYPE_BRANCH
+ if cursym.CFunc() {
+ p.To.Sym = ctxt.Lookup("runtime.morestackc")
+ } else if !cursym.Func.Text.From.Sym.NeedCtxt() {
+ p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt")
+ } else {
+ p.To.Sym = ctxt.Lookup("runtime.morestack")
+ }
+ if to_more != nil {
+ to_more.Pcond = p
+ }
+ p = jalrToSym(ctxt, p, newprog, REG_X5)
+
+ // JMP start
+ p = obj.Appendp(p, newprog)
+ p.As = AJAL
+ p.To = obj.Addr{Type: obj.TYPE_BRANCH}
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
+ p.Pcond = cursym.Func.Text.Link
+
+ // placeholder for to_done's jump target
+ p = obj.Appendp(p, newprog)
+ p.As = obj.ANOP // zero-width place holder
+ to_done.Pcond = p
+
+ return p
+}
+
// signExtend sign extends val starting at bit bit.
func signExtend(val int64, bit uint) int64 {
return val << (64 - bit) >> (64 - bit)
@@ -701,13 +1278,6 @@ func immIFits(x int64, nbits uint) bool {
return min <= x && x <= max
}
-// immUFits reports whether immediate value x fits in nbits bits
-// as an unsigned integer.
-func immUFits(x int64, nbits uint) bool {
- var max int64 = 1<<nbits - 1
- return 0 <= x && x <= max
-}
-
// immI extracts the signed integer literal of the specified size from an Addr.
func immI(a obj.Addr, nbits uint) uint32 {
if a.Type != obj.TYPE_CONST {
@@ -719,17 +1289,6 @@ func immI(a obj.Addr, nbits uint) uint32 {
return uint32(a.Offset)
}
-// immU extracts the unsigned integer literal of the specified size from an Addr.
-func immU(a obj.Addr, nbits uint) uint32 {
- if a.Type != obj.TYPE_CONST {
- panic(fmt.Sprintf("ill typed: %+v", a))
- }
- if !immUFits(a.Offset, nbits) {
- panic(fmt.Sprintf("unsigned immediate %d in %v cannot fit in %d bits", a.Offset, a, nbits))
- }
- return uint32(a.Offset)
-}
-
func wantImmI(p *obj.Prog, pos string, a obj.Addr, nbits uint) {
if a.Type != obj.TYPE_CONST {
p.Ctxt.Diag("%v\texpected immediate in %s position but got %s", p, pos, obj.Dconv(p, &a))
@@ -740,19 +1299,9 @@ func wantImmI(p *obj.Prog, pos string, a obj.Addr, nbits uint) {
}
}
-func wantImmU(p *obj.Prog, pos string, a obj.Addr, nbits uint) {
- if a.Type != obj.TYPE_CONST {
- p.Ctxt.Diag("%v\texpected immediate in %s position but got %s", p, pos, obj.Dconv(p, &a))
- return
- }
- if !immUFits(a.Offset, nbits) {
- p.Ctxt.Diag("%v\tunsigned immediate in %s position cannot be larger than %d bits but got %d", p, pos, nbits, a.Offset)
- }
-}
-
func wantReg(p *obj.Prog, pos string, descr string, r, min, max int16) {
if r < min || r > max {
- p.Ctxt.Diag("%v\texpected %s register in %s position but got non-%s register %s", p, descr, pos, descr, regName(int(r)))
+ p.Ctxt.Diag("%v\texpected %s register in %s position but got non-%s register %s", p, descr, pos, descr, RegName(int(r)))
}
}
@@ -864,7 +1413,14 @@ func validateB(p *obj.Prog) {
}
func validateU(p *obj.Prog) {
- wantImmU(p, "from", p.From, 20)
+ if p.As == AAUIPC && p.Mark&(NEED_PCREL_ITYPE_RELOC|NEED_PCREL_STYPE_RELOC) != 0 {
+ // TODO(sorear): Hack. The Offset is being used here to temporarily
+ // store the relocation addend, not as an actual offset to assemble,
+ // so it's OK for it to be out of range. Is there a more valid way
+ // to represent this state?
+ return
+ }
+ wantImmI(p, "from", p.From, 20)
wantIntRegAddr(p, "to", &p.To)
}
@@ -985,7 +1541,7 @@ func encodeU(p *obj.Prog) uint32 {
// Rather than have the user/compiler generate a 32 bit constant, the
// bottommost bits of which must all be zero, instead accept just the
// top bits.
- imm := immU(p.From, 20)
+ imm := immI(p.From, 20)
rd := regIAddr(p.To)
ins := encode(p.As)
if ins == nil {
@@ -1034,7 +1590,7 @@ func EncodeSImmediate(imm int64) (int64, error) {
}
func EncodeUImmediate(imm int64) (int64, error) {
- if !immUFits(imm, 20) {
+ if !immIFits(imm, 20) {
return 0, fmt.Errorf("immediate %#x does not fit in 20 bits", imm)
}
return imm << 12, nil
@@ -1281,6 +1837,49 @@ func encodingForProg(p *obj.Prog) encoding {
func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var symcode []uint32
for p := cursym.Func.Text; p != nil; p = p.Link {
+ switch p.As {
+ case AJALR:
+ if p.To.Sym != nil {
+ // This is a CALL/JMP. We add a relocation only
+ // for linker stack checking. No actual
+ // relocation is needed.
+ rel := obj.Addrel(cursym)
+ rel.Off = int32(p.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+ rel.Type = objabi.R_CALLRISCV
+ }
+ case AAUIPC:
+ var rt objabi.RelocType
+ if p.Mark&NEED_PCREL_ITYPE_RELOC == NEED_PCREL_ITYPE_RELOC {
+ rt = objabi.R_RISCV_PCREL_ITYPE
+ } else if p.Mark&NEED_PCREL_STYPE_RELOC == NEED_PCREL_STYPE_RELOC {
+ rt = objabi.R_RISCV_PCREL_STYPE
+ } else {
+ break
+ }
+ if p.Link == nil {
+ ctxt.Diag("AUIPC needing PC-relative reloc missing following instruction")
+ break
+ }
+ if p.From.Sym == nil {
+ ctxt.Diag("AUIPC needing PC-relative reloc missing symbol")
+ break
+ }
+
+ // The relocation offset can be larger than the maximum
+ // size of an AUIPC, so zero p.From.Offset to avoid any
+ // attempt to assemble it.
+ rel := obj.Addrel(cursym)
+ rel.Off = int32(p.Pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ p.From.Offset = 0
+ rel.Type = rt
+ }
+
enc := encodingForProg(p)
if enc.length > 0 {
symcode = append(symcode, enc.encode(p))
diff --git a/src/cmd/internal/src/pos.go b/src/cmd/internal/src/pos.go
index 60c7c91cde..861d9188b1 100644
--- a/src/cmd/internal/src/pos.go
+++ b/src/cmd/internal/src/pos.go
@@ -305,7 +305,7 @@ type lico uint32
// because they have almost no interaction with other uses of the position.
const (
lineBits, lineMax = 20, 1<<lineBits - 2
- bogusLine = 1<<lineBits - 1 // Not a line number; used to disrupt infinite loops
+ bogusLine = 1 // Used to disrupt infinite loops to prevent debugger looping
isStmtBits, isStmtMax = 2, 1<<isStmtBits - 1
xlogueBits, xlogueMax = 2, 1<<xlogueBits - 1
colBits, colMax = 32 - lineBits - xlogueBits - isStmtBits, 1<<colBits - 1
diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go
index 37d570f08b..d4bb30399d 100644
--- a/src/cmd/link/dwarf_test.go
+++ b/src/cmd/link/dwarf_test.go
@@ -71,6 +71,7 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string)
}
cmd.Args = append(cmd.Args, dir)
if env != nil {
+ env = append(env, "CGO_CFLAGS=") // ensure CGO_CFLAGS does not contain any flags. Issue #35459
cmd.Env = append(os.Environ(), env...)
}
out, err := cmd.CombinedOutput()
diff --git a/src/cmd/link/elf_test.go b/src/cmd/link/elf_test.go
index e9f727e919..88048ed2c5 100644
--- a/src/cmd/link/elf_test.go
+++ b/src/cmd/link/elf_test.go
@@ -7,6 +7,8 @@
package main
import (
+ "cmd/internal/sys"
+ "debug/elf"
"fmt"
"internal/testenv"
"io/ioutil"
@@ -15,7 +17,9 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"testing"
+ "text/template"
)
func getCCAndCCFLAGS(t *testing.T, env []string) (string, []string) {
@@ -139,12 +143,6 @@ func TestMinusRSymsWithSameName(t *testing.T) {
testenv.MustHaveCGO(t)
t.Parallel()
- // Skip this test on MIPS for the time being since it seems to trigger
- // problems with unknown relocations.
- if strings.Contains(runtime.GOARCH, "mips") {
- testenv.SkipFlaky(t, 35779)
- }
-
dir, err := ioutil.TempDir("", "go-link-TestMinusRSymsWithSameName")
if err != nil {
t.Fatal(err)
@@ -209,3 +207,210 @@ func TestMinusRSymsWithSameName(t *testing.T) {
t.Fatal(err)
}
}
+
+const pieSourceTemplate = `
+package main
+
+import "fmt"
+
+// Force the creation of a lot of type descriptors that will go into
+// the .data.rel.ro section.
+{{range $index, $element := .}}var V{{$index}} interface{} = [{{$index}}]int{}
+{{end}}
+
+func main() {
+{{range $index, $element := .}} fmt.Println(V{{$index}})
+{{end}}
+}
+`
+
+func TestPIESize(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ if !sys.BuildModeSupported(runtime.Compiler, "pie", runtime.GOOS, runtime.GOARCH) {
+ t.Skip("-buildmode=pie not supported")
+ }
+
+ tmpl := template.Must(template.New("pie").Parse(pieSourceTemplate))
+
+ writeGo := func(t *testing.T, dir string) {
+ f, err := os.Create(filepath.Join(dir, "pie.go"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Passing a 100-element slice here will cause
+ // pieSourceTemplate to create 100 variables with
+ // different types.
+ if err := tmpl.Execute(f, make([]byte, 100)); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ for _, external := range []bool{false, true} {
+ external := external
+
+ name := "TestPieSize-"
+ if external {
+ name += "external"
+ } else {
+ name += "internal"
+ }
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ dir, err := ioutil.TempDir("", "go-link-"+name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ writeGo(t, dir)
+
+ binexe := filepath.Join(dir, "exe")
+ binpie := filepath.Join(dir, "pie")
+ if external {
+ binexe += "external"
+ binpie += "external"
+ }
+
+ build := func(bin, mode string) error {
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", bin, "-buildmode="+mode)
+ if external {
+ cmd.Args = append(cmd.Args, "-ldflags=-linkmode=external")
+ }
+ cmd.Args = append(cmd.Args, "pie.go")
+ cmd.Dir = dir
+ t.Logf("%v", cmd.Args)
+ out, err := cmd.CombinedOutput()
+ if len(out) > 0 {
+ t.Logf("%s", out)
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ return err
+ }
+
+ var errexe, errpie error
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ errexe = build(binexe, "exe")
+ }()
+ go func() {
+ defer wg.Done()
+ errpie = build(binpie, "pie")
+ }()
+ wg.Wait()
+ if errexe != nil || errpie != nil {
+ t.Fatal("link failed")
+ }
+
+ var sizeexe, sizepie uint64
+ if fi, err := os.Stat(binexe); err != nil {
+ t.Fatal(err)
+ } else {
+ sizeexe = uint64(fi.Size())
+ }
+ if fi, err := os.Stat(binpie); err != nil {
+ t.Fatal(err)
+ } else {
+ sizepie = uint64(fi.Size())
+ }
+
+ elfexe, err := elf.Open(binexe)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer elfexe.Close()
+
+ elfpie, err := elf.Open(binpie)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer elfpie.Close()
+
+ // The difference in size between exe and PIE
+ // should be approximately the difference in
+ // size of the .text section plus the size of
+ // the PIE dynamic data sections plus the
+ // difference in size of the .got and .plt
+ // sections if they exist.
+ // We ignore unallocated sections.
+ // There may be gaps between non-writeable and
+ // writable PT_LOAD segments. We also skip those
+ // gaps (see issue #36023).
+
+ textsize := func(ef *elf.File, name string) uint64 {
+ for _, s := range ef.Sections {
+ if s.Name == ".text" {
+ return s.Size
+ }
+ }
+ t.Fatalf("%s: no .text section", name)
+ return 0
+ }
+ textexe := textsize(elfexe, binexe)
+ textpie := textsize(elfpie, binpie)
+
+ dynsize := func(ef *elf.File) uint64 {
+ var ret uint64
+ for _, s := range ef.Sections {
+ if s.Flags&elf.SHF_ALLOC == 0 {
+ continue
+ }
+ switch s.Type {
+ case elf.SHT_DYNSYM, elf.SHT_STRTAB, elf.SHT_REL, elf.SHT_RELA, elf.SHT_HASH, elf.SHT_GNU_HASH, elf.SHT_GNU_VERDEF, elf.SHT_GNU_VERNEED, elf.SHT_GNU_VERSYM:
+ ret += s.Size
+ }
+ if s.Flags&elf.SHF_WRITE != 0 && (strings.Contains(s.Name, ".got") || strings.Contains(s.Name, ".plt")) {
+ ret += s.Size
+ }
+ }
+ return ret
+ }
+
+ dynexe := dynsize(elfexe)
+ dynpie := dynsize(elfpie)
+
+ extrasize := func(ef *elf.File) uint64 {
+ var ret uint64
+ // skip unallocated sections
+ for _, s := range ef.Sections {
+ if s.Flags&elf.SHF_ALLOC == 0 {
+ ret += s.Size
+ }
+ }
+ // also skip gaps between PT_LOAD segments
+ for i := range ef.Progs {
+ if i == 0 {
+ continue
+ }
+ p1 := ef.Progs[i-1]
+ p2 := ef.Progs[i]
+ if p1.Type == elf.PT_LOAD && p2.Type == elf.PT_LOAD {
+ ret += p2.Off - p1.Off - p1.Filesz
+ }
+ }
+ return ret
+ }
+
+ extraexe := extrasize(elfexe)
+ extrapie := extrasize(elfpie)
+
+ diffReal := (sizepie - extrapie) - (sizeexe - extraexe)
+ diffExpected := (textpie + dynpie) - (textexe + dynexe)
+
+ t.Logf("real size difference %#x, expected %#x", diffReal, diffExpected)
+
+ if diffReal > (diffExpected + diffExpected/10) {
+ t.Errorf("PIE unexpectedly large: got difference of %d (%d - %d), expected difference %d", diffReal, sizepie, sizeexe, diffExpected)
+ }
+ })
+ }
+}
diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
index 32d1111ea3..7ca01c8c25 100644
--- a/src/cmd/link/internal/ld/data.go
+++ b/src/cmd/link/internal/ld/data.go
@@ -1625,29 +1625,27 @@ func (ctxt *Link) dodata() {
}
if ctxt.UseRelro() {
+ segrelro := &Segrelrodata
+ if ctxt.LinkMode == LinkExternal && ctxt.HeadType != objabi.Haix {
+ // Using a separate segment with an external
+ // linker results in some programs moving
+ // their data sections unexpectedly, which
+ // corrupts the moduledata. So we use the
+ // rodata segment and let the external linker
+ // sort out a rel.ro segment.
+ segrelro = segro
+ } else {
+ // Reset datsize for new segment.
+ datsize = 0
+ }
+
addrelrosection = func(suffix string) *sym.Section {
- seg := &Segrelrodata
- if ctxt.LinkMode == LinkExternal && ctxt.HeadType != objabi.Haix {
- // Using a separate segment with an external
- // linker results in some programs moving
- // their data sections unexpectedly, which
- // corrupts the moduledata. So we use the
- // rodata segment and let the external linker
- // sort out a rel.ro segment.
- seg = &Segrodata
- }
- return addsection(ctxt.Arch, seg, ".data.rel.ro"+suffix, 06)
+ return addsection(ctxt.Arch, segrelro, ".data.rel.ro"+suffix, 06)
}
+
/* data only written by relocations */
sect = addrelrosection("")
- sect.Vaddr = 0
- if ctxt.HeadType == objabi.Haix {
- // datsize must be reset because relro datas will end up
- // in data segment.
- datsize = 0
- }
-
ctxt.Syms.Lookup("runtime.types", 0).Sect = sect
ctxt.Syms.Lookup("runtime.etypes", 0).Sect = sect
@@ -1659,7 +1657,17 @@ func (ctxt *Link) dodata() {
}
}
datsize = Rnd(datsize, int64(sect.Align))
- for _, symnro := range sym.ReadOnly {
+ sect.Vaddr = uint64(datsize)
+
+ for i, symnro := range sym.ReadOnly {
+ if i == 0 && symnro == sym.STYPE && ctxt.HeadType != objabi.Haix {
+ // Skip forward so that no type
+ // reference uses a zero offset.
+ // This is unlikely but possible in small
+ // programs with no other read-only data.
+ datsize++
+ }
+
symn := sym.RelROMap[symnro]
symnStartValue := datsize
for _, s := range data[symn] {
diff --git a/src/cmd/link/internal/ld/deadcode2.go b/src/cmd/link/internal/ld/deadcode2.go
index 915ad1d944..3d3a03215e 100644
--- a/src/cmd/link/internal/ld/deadcode2.go
+++ b/src/cmd/link/internal/ld/deadcode2.go
@@ -113,7 +113,7 @@ func (d *deadcodePass2) init() {
}
for _, name := range names {
- // Mark symbol as an data/ABI0 symbol.
+ // Mark symbol as a data/ABI0 symbol.
d.mark(d.ldr.Lookup(name, 0), 0)
// Also mark any Go functions (internal ABI).
d.mark(d.ldr.Lookup(name, sym.SymVerABIInternal), 0)
diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go
index ab703e94ee..db44c0292e 100644
--- a/src/cmd/link/internal/ld/pcln.go
+++ b/src/cmd/link/internal/ld/pcln.go
@@ -266,8 +266,13 @@ func (ctxt *Link) pclntab() {
switch ctxt.Arch.Family {
case sys.AMD64, sys.I386:
deferreturn--
- case sys.PPC64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64:
+ case sys.PPC64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
// no change
+ case sys.RISCV64:
+ // TODO(jsing): The JALR instruction is marked with
+ // R_CALLRISCV, whereas the actual reloc is currently
+ // one instruction earlier starting with the AUIPC.
+ deferreturn -= 4
case sys.S390X:
deferreturn -= 2
default:
diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go
index 7d613c7a6d..b1e420cc30 100644
--- a/src/cmd/link/internal/loadelf/ldelf.go
+++ b/src/cmd/link/internal/loadelf/ldelf.go
@@ -901,14 +901,26 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader,
p := rsect.base
for j := 0; j < n; j++ {
var add uint64
+ var symIdx int
+ var relocType uint64
+
rp := &r[j]
- var info uint64
if is64 != 0 {
// 64-bit rel/rela
rp.Off = int32(e.Uint64(p))
p = p[8:]
- info = e.Uint64(p)
+ switch arch.Family {
+ case sys.MIPS64:
+ // https://www.linux-mips.org/pub/linux/mips/doc/ABI/elf64-2.4.pdf
+ // The doc shows it's different with general Linux ELF
+ symIdx = int(e.Uint32(p))
+ relocType = uint64(p[7])
+ default:
+ info := e.Uint64(p)
+ relocType = info & 0xffffffff
+ symIdx = int(info >> 32)
+ }
p = p[8:]
if rela != 0 {
add = e.Uint64(p)
@@ -919,8 +931,9 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader,
rp.Off = int32(e.Uint32(p))
p = p[4:]
- info = uint64(e.Uint32(p))
- info = info>>8<<32 | info&0xff // convert to 64-bit info
+ info := e.Uint32(p)
+ relocType = uint64(info & 0xff)
+ symIdx = int(info >> 8)
p = p[4:]
if rela != 0 {
add = uint64(e.Uint32(p))
@@ -928,29 +941,29 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader,
}
}
- if info&0xffffffff == 0 { // skip R_*_NONE relocation
+ if relocType == 0 { // skip R_*_NONE relocation
j--
n--
continue
}
- if info>>32 == 0 { // absolute relocation, don't bother reading the null symbol
+ if symIdx == 0 { // absolute relocation, don't bother reading the null symbol
rp.Sym = 0
} else {
var elfsym ElfSym
- if err := readelfsym(newSym, lookup, l, arch, elfobj, int(info>>32), &elfsym, 0, 0); err != nil {
+ if err := readelfsym(newSym, lookup, l, arch, elfobj, int(symIdx), &elfsym, 0, 0); err != nil {
return errorf("malformed elf file: %v", err)
}
- elfsym.sym = symbols[info>>32]
+ elfsym.sym = symbols[symIdx]
if elfsym.sym == 0 {
- return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, int(info>>32), elfsym.name, elfsym.shndx, elfsym.type_)
+ return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, int(symIdx), elfsym.name, elfsym.shndx, elfsym.type_)
}
rp.Sym = elfsym.sym
}
- rp.Type = objabi.ElfRelocOffset + objabi.RelocType(info)
- rp.Size, err = relSize(arch, pn, uint32(info))
+ rp.Type = objabi.ElfRelocOffset + objabi.RelocType(relocType)
+ rp.Size, err = relSize(arch, pn, uint32(relocType))
if err != nil {
return nil, 0, err
}
@@ -1148,18 +1161,36 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) {
// performance.
const (
- AMD64 = uint32(sys.AMD64)
- ARM = uint32(sys.ARM)
- ARM64 = uint32(sys.ARM64)
- I386 = uint32(sys.I386)
- PPC64 = uint32(sys.PPC64)
- S390X = uint32(sys.S390X)
+ AMD64 = uint32(sys.AMD64)
+ ARM = uint32(sys.ARM)
+ ARM64 = uint32(sys.ARM64)
+ I386 = uint32(sys.I386)
+ PPC64 = uint32(sys.PPC64)
+ S390X = uint32(sys.S390X)
+ MIPS = uint32(sys.MIPS)
+ MIPS64 = uint32(sys.MIPS64)
)
switch uint32(arch.Family) | elftype<<16 {
default:
return 0, fmt.Errorf("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype)
+ case MIPS | uint32(elf.R_MIPS_HI16)<<16,
+ MIPS | uint32(elf.R_MIPS_LO16)<<16,
+ MIPS | uint32(elf.R_MIPS_GOT16)<<16,
+ MIPS | uint32(elf.R_MIPS_GPREL16)<<16,
+ MIPS | uint32(elf.R_MIPS_GOT_PAGE)<<16,
+ MIPS | uint32(elf.R_MIPS_JALR)<<16,
+ MIPS | uint32(elf.R_MIPS_GOT_OFST)<<16,
+ MIPS64 | uint32(elf.R_MIPS_HI16)<<16,
+ MIPS64 | uint32(elf.R_MIPS_LO16)<<16,
+ MIPS64 | uint32(elf.R_MIPS_GOT16)<<16,
+ MIPS64 | uint32(elf.R_MIPS_GPREL16)<<16,
+ MIPS64 | uint32(elf.R_MIPS_GOT_PAGE)<<16,
+ MIPS64 | uint32(elf.R_MIPS_JALR)<<16,
+ MIPS64 | uint32(elf.R_MIPS_GOT_OFST)<<16:
+ return 4, nil
+
case S390X | uint32(elf.R_390_8)<<16:
return 1, nil
diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go
index 1515f97558..4f792bd1f1 100644
--- a/src/cmd/link/link_test.go
+++ b/src/cmd/link/link_test.go
@@ -214,6 +214,7 @@ func TestBuildForTvOS(t *testing.T) {
"GOOS=darwin",
"GOARCH=arm64",
"CC="+strings.Join(CC, " "),
+ "CGO_CFLAGS=", // ensure CGO_CFLAGS does not contain any flags. Issue #35459
)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go
index b24371ddea..0c2adbdb94 100644
--- a/src/cmd/objdump/objdump_test.go
+++ b/src/cmd/objdump/objdump_test.go
@@ -168,6 +168,8 @@ func TestDisasm(t *testing.T) {
switch runtime.GOARCH {
case "mips", "mipsle", "mips64", "mips64le":
t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
+ case "riscv64":
+ t.Skipf("skipping on %s, issue 36738", runtime.GOARCH)
case "s390x":
t.Skipf("skipping on %s, issue 15255", runtime.GOARCH)
}
@@ -176,10 +178,8 @@ func TestDisasm(t *testing.T) {
func TestDisasmCode(t *testing.T) {
switch runtime.GOARCH {
- case "mips", "mipsle", "mips64", "mips64le":
- t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
- case "s390x":
- t.Skipf("skipping on %s, issue 15255", runtime.GOARCH)
+ case "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x":
+ t.Skipf("skipping on %s, issue 19160", runtime.GOARCH)
}
testDisasm(t, true)
}
@@ -194,6 +194,8 @@ func TestDisasmExtld(t *testing.T) {
t.Skipf("skipping on %s, no support for external linking, issue 9038", runtime.GOARCH)
case "mips64", "mips64le", "mips", "mipsle":
t.Skipf("skipping on %s, issue 12559 and 12560", runtime.GOARCH)
+ case "riscv64":
+ t.Skipf("skipping on %s, no support for external linking, issue 36739", runtime.GOARCH)
case "s390x":
t.Skipf("skipping on %s, issue 15255", runtime.GOARCH)
}
@@ -207,6 +209,8 @@ func TestDisasmGoobj(t *testing.T) {
switch runtime.GOARCH {
case "mips", "mipsle", "mips64", "mips64le":
t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
+ case "riscv64":
+ t.Skipf("skipping on %s, issue 36738", runtime.GOARCH)
case "s390x":
t.Skipf("skipping on %s, issue 15255", runtime.GOARCH)
}
diff --git a/src/cmd/objdump/testdata/fmthello.go b/src/cmd/objdump/testdata/fmthello.go
index e98268199d..fd16ebee1b 100644
--- a/src/cmd/objdump/testdata/fmthello.go
+++ b/src/cmd/objdump/testdata/fmthello.go
@@ -4,9 +4,15 @@ import "fmt"
func main() {
Println("hello, world")
+ if flag {
+ for {
+ }
+ }
}
//go:noinline
func Println(s string) {
fmt.Println(s)
}
+
+var flag bool
diff --git a/src/cmd/pprof/readlineui.go b/src/cmd/pprof/readlineui.go
index 5b9701a0e2..0c9fafdad7 100644
--- a/src/cmd/pprof/readlineui.go
+++ b/src/cmd/pprof/readlineui.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file contains an driver.UI implementation
+// This file contains a driver.UI implementation
// that provides the readline functionality if possible.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
diff --git a/src/cmd/trace/trace_test.go b/src/cmd/trace/trace_test.go
index 9e90f50d4b..ef2d06c961 100644
--- a/src/cmd/trace/trace_test.go
+++ b/src/cmd/trace/trace_test.go
@@ -12,7 +12,9 @@ import (
"io/ioutil"
rtrace "runtime/trace"
"strings"
+ "sync"
"testing"
+ "time"
)
// stacks is a fake stack map populated for test.
@@ -233,3 +235,34 @@ func TestFoo(t *testing.T) {
}
}
+
+func TestDirectSemaphoreHandoff(t *testing.T) {
+ prog0 := func() {
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+ mu.Lock()
+ // This is modeled after src/sync/mutex_test.go to trigger Mutex
+ // starvation mode, in which the goroutine that calls Unlock hands off
+ // both the semaphore and its remaining time slice. See issue 36186.
+ for i := 0; i < 2; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 100; i++ {
+ mu.Lock()
+ time.Sleep(100 * time.Microsecond)
+ mu.Unlock()
+ }
+ }()
+ }
+ mu.Unlock()
+ wg.Wait()
+ }
+ if err := traceProgram(t, prog0, "TestDirectSemaphoreHandoff"); err != nil {
+ t.Fatalf("failed to trace the program: %v", err)
+ }
+ _, err := parseTrace()
+ if err != nil {
+ t.Fatalf("failed to parse the trace: %v", err)
+ }
+}
diff --git a/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index 2f04ee5b5c..dd7378c8a3 100644
--- a/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -947,6 +947,10 @@ func readPasswordLine(reader io.Reader) ([]byte, error) {
n, err := reader.Read(buf[:])
if n > 0 {
switch buf[0] {
+ case '\b':
+ if len(ret) > 0 {
+ ret = ret[:len(ret)-1]
+ }
case '\n':
return ret, nil
case '\r':
diff --git a/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index 5cfdf8f3f0..f614e9cb60 100644
--- a/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/src/cmd/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) {
}
old := st
- st &^= (windows.ENABLE_ECHO_INPUT)
- st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT)
+ st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT)
if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
return nil, err
}
diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go b/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go
index 3c8e67bc3d..467d25e689 100644
--- a/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go
+++ b/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go
@@ -4,9 +4,6 @@
// Package note defines the notes signed by the Go module database server.
//
-// This package is part of a DRAFT of what the Go module database server will look like.
-// Do not assume the details here are final!
-//
// A note is text signed by one or more server keys.
// The text should be ignored unless the note is signed by
// a trusted server key and the signature has been verified
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
new file mode 100644
index 0000000000..6db717de53
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -0,0 +1,54 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build riscv64,!gccgo
+
+#include "textflag.h"
+
+//
+// System calls for linux/riscv64.
+//
+// Where available, just jump to package syscall's implementation of
+// these functions.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV $0, A3
+ MOV $0, A4
+ MOV $0, A5
+ MOV $0, A6
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP) // r1
+ MOV A1, r2+40(FP) // r2
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV ZERO, A3
+ MOV ZERO, A4
+ MOV ZERO, A5
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP)
+ MOV A1, r2+40(FP)
+ RET
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
index 5c93a4f703..e6bfe71539 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
@@ -87,6 +87,7 @@ var (
asmArchMips64LE = asmArch{name: "mips64le", bigEndian: false, stack: "R29", lr: true}
asmArchPpc64 = asmArch{name: "ppc64", bigEndian: true, stack: "R1", lr: true}
asmArchPpc64LE = asmArch{name: "ppc64le", bigEndian: false, stack: "R1", lr: true}
+ asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true}
asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true}
asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false}
@@ -101,6 +102,7 @@ var (
&asmArchMips64LE,
&asmArchPpc64,
&asmArchPpc64LE,
+ &asmArchRISCV64,
&asmArchS390X,
&asmArchWasm,
}
diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt
index 757a4c3793..67697b4bf9 100644
--- a/src/cmd/vendor/modules.txt
+++ b/src/cmd/vendor/modules.txt
@@ -24,12 +24,12 @@ golang.org/x/arch/arm/armasm
golang.org/x/arch/arm64/arm64asm
golang.org/x/arch/ppc64/ppc64asm
golang.org/x/arch/x86/x86asm
-# golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
+# golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d
## explicit
golang.org/x/crypto/ed25519
golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/ssh/terminal
-# golang.org/x/mod v0.1.1-0.20191126161957-788aebd06792
+# golang.org/x/mod v0.2.0
## explicit
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
diff --git a/src/crypto/elliptic/p256.go b/src/crypto/elliptic/p256.go
index aa9eeb5061..c23e414156 100644
--- a/src/crypto/elliptic/p256.go
+++ b/src/crypto/elliptic/p256.go
@@ -307,7 +307,7 @@ func p256Diff(out, in, in2 *[p256Limbs]uint32) {
}
// p256ReduceDegree sets out = tmp/R mod p where tmp contains 64-bit words with
-// the same 29,28,... bit positions as an field element.
+// the same 29,28,... bit positions as a field element.
//
// The values in field elements are in Montgomery form: x*R mod p where R =
// 2**257. Since we just multiplied two Montgomery values together, the result
diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go
index 029f7443d2..fac4b91473 100644
--- a/src/crypto/tls/conn.go
+++ b/src/crypto/tls/conn.go
@@ -1344,7 +1344,7 @@ func (c *Conn) Handshake() error {
if c.handshakeErr == nil {
c.handshakes++
} else {
- // If an error occurred during the hadshake try to flush the
+ // If an error occurred during the handshake try to flush the
// alert that might be left in the buffer.
c.flush()
}
diff --git a/src/crypto/tls/tls.go b/src/crypto/tls/tls.go
index 228f4a79ab..af44485f44 100644
--- a/src/crypto/tls/tls.go
+++ b/src/crypto/tls/tls.go
@@ -116,9 +116,10 @@ func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (*
if timeout != 0 {
errChannel = make(chan error, 2)
- time.AfterFunc(timeout, func() {
+ timer := time.AfterFunc(timeout, func() {
errChannel <- timeoutError{}
})
+ defer timer.Stop()
}
rawConn, err := dialer.Dial(network, addr)
diff --git a/src/crypto/x509/root_cgo_darwin.go b/src/crypto/x509/root_cgo_darwin.go
index 255a8d3525..8a54282a6b 100644
--- a/src/crypto/x509/root_cgo_darwin.go
+++ b/src/crypto/x509/root_cgo_darwin.go
@@ -159,7 +159,7 @@ static Boolean isRootCertificate(SecCertificateRef cert, CFErrorRef *errRef) {
//
// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must
// be released (using CFRelease) after we've consumed its content.
-int CopyPEMRoots(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) {
+static int CopyPEMRoots(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) {
int i;
if (debugDarwinRoots) {
diff --git a/src/crypto/x509/root_windows.go b/src/crypto/x509/root_windows.go
index 54ab1dcf9c..34d585318d 100644
--- a/src/crypto/x509/root_windows.go
+++ b/src/crypto/x509/root_windows.go
@@ -219,10 +219,26 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
if err != nil {
return nil, err
}
+ if len(chain) < 1 {
+ return nil, errors.New("x509: internal error: system verifier returned an empty chain")
+ }
- chains = append(chains, chain)
+ // Mitigate CVE-2020-0601, where the Windows system verifier might be
+ // tricked into using custom curve parameters for a trusted root, by
+ // double-checking all ECDSA signatures. If the system was tricked into
+ // using spoofed parameters, the signature will be invalid for the correct
+ // ones we parsed. (We don't support custom curves ourselves.)
+ for i, parent := range chain[1:] {
+ if parent.PublicKeyAlgorithm != ECDSA {
+ continue
+ }
+ if err := parent.CheckSignature(chain[i].SignatureAlgorithm,
+ chain[i].RawTBSCertificate, chain[i].Signature); err != nil {
+ return nil, err
+ }
+ }
- return chains, nil
+ return [][]*Certificate{chain}, nil
}
func loadSystemRoots() (*CertPool, error) {
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index ed0099e0e9..6f59260cda 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -629,7 +629,8 @@ func TestPoolExhaustOnCancel(t *testing.T) {
go func() {
rows, err := db.Query("SELECT|people|name,photo|")
if err != nil {
- t.Fatalf("Query: %v", err)
+ t.Errorf("Query: %v", err)
+ return
}
rows.Close()
saturateDone.Done()
@@ -637,6 +638,9 @@ func TestPoolExhaustOnCancel(t *testing.T) {
}
saturate.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
state = 2
// Now cancel the request while it is waiting.
diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
index 41a06b26c8..6d2c8db42d 100644
--- a/src/encoding/gob/gobencdec_test.go
+++ b/src/encoding/gob/gobencdec_test.go
@@ -707,7 +707,7 @@ func TestGobEncoderExtraIndirect(t *testing.T) {
// Another bug: this caused a crash with the new Go1 Time type.
// We throw in a gob-encoding array, to test another case of isZero,
-// and a struct containing an nil interface, to test a third.
+// and a struct containing a nil interface, to test a third.
type isZeroBug struct {
T time.Time
S string
diff --git a/src/go.mod b/src/go.mod
index bfc7ae2d20..72114080ce 100644
--- a/src/go.mod
+++ b/src/go.mod
@@ -3,8 +3,8 @@ module std
go 1.14
require (
- golang.org/x/crypto v0.0.0-20191111213947-16651526fdb4
- golang.org/x/net v0.0.0-20191105084925-a882066a44e0
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d
+ golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933
golang.org/x/sys v0.0.0-20190529130038-5219a1e1c5f8 // indirect
golang.org/x/text v0.3.3-0.20191031172631-4b67af870c6f // indirect
)
diff --git a/src/go.sum b/src/go.sum
index a71e1d276b..9f24502dc2 100644
--- a/src/go.sum
+++ b/src/go.sum
@@ -1,9 +1,9 @@
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191111213947-16651526fdb4 h1:AGVXd+IAyeAb3FuQvYDYQ9+WR2JHm0+C0oYJaU1C4rs=
-golang.org/x/crypto v0.0.0-20191111213947-16651526fdb4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U=
+golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20191105084925-a882066a44e0 h1:QPlSTtPE2k6PZPasQUbzuK3p9JbS+vMXYVto8g/yrsg=
-golang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 h1:e6HwijUxhDe+hPNjZQQn9bA5PW3vNmnN64U2ZW759Lk=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190529130038-5219a1e1c5f8 h1:2WjIC11WRITGlVWmyLXKjzIVj1ZwoWZ//tadeUUV6/o=
diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go
index 9e1da35287..aca510f6a5 100644
--- a/src/go/ast/ast.go
+++ b/src/go/ast/ast.go
@@ -292,7 +292,7 @@ type (
Rbrack token.Pos // position of "]"
}
- // An SliceExpr node represents an expression followed by slice indices.
+ // A SliceExpr node represents an expression followed by slice indices.
SliceExpr struct {
X Expr // expression
Lbrack token.Pos // position of "["
@@ -662,7 +662,7 @@ type (
Body *BlockStmt // CaseClauses only
}
- // An TypeSwitchStmt node represents a type switch statement.
+ // A TypeSwitchStmt node represents a type switch statement.
TypeSwitchStmt struct {
Switch token.Pos // position of "switch" keyword
Init Stmt // initialization statement; or nil
@@ -678,7 +678,7 @@ type (
Body []Stmt // statement list; or nil
}
- // An SelectStmt node represents a select statement.
+ // A SelectStmt node represents a select statement.
SelectStmt struct {
Select token.Pos // position of "select" keyword
Body *BlockStmt // CommClauses only
diff --git a/src/go/build/build.go b/src/go/build/build.go
index a4523a6eef..e89aa7708d 100644
--- a/src/go/build/build.go
+++ b/src/go/build/build.go
@@ -36,13 +36,13 @@ type Context struct {
GOROOT string // Go root
GOPATH string // Go path
- // WorkingDir is the caller's working directory, or the empty string to use
+ // Dir is the caller's working directory, or the empty string to use
// the current directory of the running process. In module mode, this is used
// to locate the main module.
//
- // If WorkingDir is non-empty, directories passed to Import and ImportDir must
+ // If Dir is non-empty, directories passed to Import and ImportDir must
// be absolute.
- WorkingDir string
+ Dir string
CgoEnabled bool // whether cgo files are included
UseAllFiles bool // use files regardless of +build lines, file names
@@ -905,6 +905,11 @@ Found:
}
// Record imports and information about cgo.
+ type importPos struct {
+ path string
+ pos token.Pos
+ }
+ var fileImports []importPos
isCgo := false
for _, decl := range pf.Decls {
d, ok := decl.(*ast.GenDecl)
@@ -921,13 +926,7 @@ Found:
if err != nil {
log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
}
- if isXTest {
- xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos()))
- } else if isTest {
- testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
- } else {
- imported[path] = append(imported[path], fset.Position(spec.Pos()))
- }
+ fileImports = append(fileImports, importPos{path, spec.Pos()})
if path == "C" {
if isTest {
badFile(fmt.Errorf("use of cgo in test %s not supported", filename))
@@ -946,26 +945,35 @@ Found:
}
}
}
- if isCgo {
+
+ var fileList *[]string
+ var importMap map[string][]token.Position
+ switch {
+ case isCgo:
allTags["cgo"] = true
if ctxt.CgoEnabled {
- p.CgoFiles = append(p.CgoFiles, name)
+ fileList = &p.CgoFiles
+ importMap = imported
} else {
- p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ // Ignore imports from cgo files if cgo is disabled.
+ fileList = &p.IgnoredGoFiles
+ }
+ case isXTest:
+ fileList = &p.XTestGoFiles
+ importMap = xTestImported
+ case isTest:
+ fileList = &p.TestGoFiles
+ importMap = testImported
+ default:
+ fileList = &p.GoFiles
+ importMap = imported
+ }
+ *fileList = append(*fileList, name)
+ if importMap != nil {
+ for _, imp := range fileImports {
+ importMap[imp.path] = append(importMap[imp.path], fset.Position(imp.pos))
}
- } else if isXTest {
- p.XTestGoFiles = append(p.XTestGoFiles, name)
- } else if isTest {
- p.TestGoFiles = append(p.TestGoFiles, name)
- } else {
- p.GoFiles = append(p.GoFiles, name)
}
- }
- if badGoError != nil {
- return p, badGoError
- }
- if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
- return p, &NoGoError{p.Dir}
}
for tag := range allTags {
@@ -985,6 +993,12 @@ Found:
sort.Strings(p.SFiles)
}
+ if badGoError != nil {
+ return p, badGoError
+ }
+ if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ return p, &NoGoError{p.Dir}
+ }
return p, pkgerr
}
@@ -1027,8 +1041,8 @@ func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode)
var absSrcDir string
if filepath.IsAbs(srcDir) {
absSrcDir = srcDir
- } else if ctxt.WorkingDir != "" {
- return fmt.Errorf("go/build: WorkingDir is non-empty, so relative srcDir is not allowed: %v", srcDir)
+ } else if ctxt.Dir != "" {
+ return fmt.Errorf("go/build: Dir is non-empty, so relative srcDir is not allowed: %v", srcDir)
} else {
// Find the absolute source directory. hasSubdir does not handle
// relative paths (and can't because the callbacks don't support this).
@@ -1062,16 +1076,16 @@ func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode)
parent string
err error
)
- if ctxt.WorkingDir == "" {
+ if ctxt.Dir == "" {
parent, err = os.Getwd()
if err != nil {
// A nonexistent working directory can't be in a module.
return errNoModules
}
} else {
- parent, err = filepath.Abs(ctxt.WorkingDir)
+ parent, err = filepath.Abs(ctxt.Dir)
if err != nil {
- // If the caller passed a bogus WorkingDir explicitly, that's materially
+ // If the caller passed a bogus Dir explicitly, that's materially
// different from not having modules enabled.
return err
}
@@ -1091,8 +1105,8 @@ func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode)
cmd := exec.Command("go", "list", "-e", "-compiler="+ctxt.Compiler, "-tags="+strings.Join(ctxt.BuildTags, ","), "-installsuffix="+ctxt.InstallSuffix, "-f={{.Dir}}\n{{.ImportPath}}\n{{.Root}}\n{{.Goroot}}\n{{if .Error}}{{.Error}}{{end}}\n", "--", path)
- if ctxt.WorkingDir != "" {
- cmd.Dir = ctxt.WorkingDir
+ if ctxt.Dir != "" {
+ cmd.Dir = ctxt.Dir
}
var stdout, stderr strings.Builder
diff --git a/src/go/build/build_test.go b/src/go/build/build_test.go
index 1d14731983..05ddb49920 100644
--- a/src/go/build/build_test.go
+++ b/src/go/build/build_test.go
@@ -328,7 +328,7 @@ func TestImportDirNotExist(t *testing.T) {
defer os.RemoveAll(emptyDir)
ctxt.GOPATH = emptyDir
- ctxt.WorkingDir = emptyDir
+ ctxt.Dir = emptyDir
tests := []struct {
label string
@@ -340,20 +340,38 @@ func TestImportDirNotExist(t *testing.T) {
{"Import(full, FindOnly)", "go/build/doesnotexist", "", FindOnly},
{"Import(local, FindOnly)", "./doesnotexist", filepath.Join(ctxt.GOROOT, "src/go/build"), FindOnly},
}
- for _, test := range tests {
- p, err := ctxt.Import(test.path, test.srcDir, test.mode)
- if err == nil || !strings.HasPrefix(err.Error(), "cannot find package") {
- t.Errorf(`%s got error: %q, want "cannot find package" error`, test.label, err)
- }
- // If an error occurs, build.Import is documented to return
- // a non-nil *Package containing partial information.
- if p == nil {
- t.Fatalf(`%s got nil p, want non-nil *Package`, test.label)
- }
- // Verify partial information in p.
- if p.ImportPath != "go/build/doesnotexist" {
- t.Errorf(`%s got p.ImportPath: %q, want "go/build/doesnotexist"`, test.label, p.ImportPath)
- }
+
+ defer os.Setenv("GO111MODULE", os.Getenv("GO111MODULE"))
+
+ for _, GO111MODULE := range []string{"off", "on"} {
+ t.Run("GO111MODULE="+GO111MODULE, func(t *testing.T) {
+ os.Setenv("GO111MODULE", GO111MODULE)
+
+ for _, test := range tests {
+ p, err := ctxt.Import(test.path, test.srcDir, test.mode)
+
+ errOk := (err != nil && strings.HasPrefix(err.Error(), "cannot find package"))
+ wantErr := `"cannot find package" error`
+ if test.srcDir == "" {
+ if err != nil && strings.Contains(err.Error(), "is not in GOROOT") {
+ errOk = true
+ }
+ wantErr = `"cannot find package" or "is not in GOROOT" error`
+ }
+ if !errOk {
+ t.Errorf("%s got error: %q, want %s", test.label, err, wantErr)
+ }
+ // If an error occurs, build.Import is documented to return
+ // a non-nil *Package containing partial information.
+ if p == nil {
+ t.Fatalf(`%s got nil p, want non-nil *Package`, test.label)
+ }
+ // Verify partial information in p.
+ if p.ImportPath != "go/build/doesnotexist" {
+ t.Errorf(`%s got p.ImportPath: %q, want "go/build/doesnotexist"`, test.label, p.ImportPath)
+ }
+ }
+ })
}
}
@@ -459,7 +477,7 @@ func TestImportPackageOutsideModule(t *testing.T) {
os.Setenv("GOPATH", gopath)
ctxt := Default
ctxt.GOPATH = gopath
- ctxt.WorkingDir = filepath.Join(gopath, "src/example.com/p")
+ ctxt.Dir = filepath.Join(gopath, "src/example.com/p")
want := "cannot find module providing package"
if _, err := ctxt.Import("example.com/p", gopath, FindOnly); err == nil {
@@ -515,14 +533,38 @@ func TestMissingImportErrorRepetition(t *testing.T) {
os.Setenv("GO111MODULE", "on")
defer os.Setenv("GOPROXY", os.Getenv("GOPROXY"))
os.Setenv("GOPROXY", "off")
+ defer os.Setenv("GONOPROXY", os.Getenv("GONOPROXY"))
+ os.Setenv("GONOPROXY", "none")
ctxt := Default
- ctxt.WorkingDir = tmp
+ ctxt.Dir = tmp
pkgPath := "example.com/hello"
- if _, err = ctxt.Import(pkgPath, tmp, FindOnly); err == nil {
+ _, err = ctxt.Import(pkgPath, tmp, FindOnly)
+ if err == nil {
t.Fatal("unexpected success")
- } else if n := strings.Count(err.Error(), pkgPath); n != 1 {
+ }
+ // Don't count the package path with a URL like https://...?go-get=1.
+ // See golang.org/issue/35986.
+ errStr := strings.ReplaceAll(err.Error(), "://"+pkgPath+"?go-get=1", "://...?go-get=1")
+ if n := strings.Count(errStr, pkgPath); n != 1 {
t.Fatalf("package path %q appears in error %d times; should appear once\nerror: %v", pkgPath, n, err)
}
}
+
+// TestCgoImportsIgnored checks that imports in cgo files are not included
+// in the imports list when cgo is disabled.
+// Verifies golang.org/issue/35946.
+func TestCgoImportsIgnored(t *testing.T) {
+ ctxt := Default
+ ctxt.CgoEnabled = false
+ p, err := ctxt.ImportDir("testdata/cgo_disabled", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, path := range p.Imports {
+ if path == "should/be/ignored" {
+ t.Errorf("found import %q in ignored cgo file", path)
+ }
+ }
+}
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index 753a7937e1..a64c2b3241 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -151,7 +151,7 @@ var pkgDeps = map[string][]string{
"syscall/js": {"L0"},
"internal/oserror": {"L0"},
"internal/syscall/unix": {"L0", "syscall"},
- "internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll"},
+ "internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
"internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
"time": {
// "L0" without the "io" package:
@@ -168,7 +168,7 @@ var pkgDeps = map[string][]string{
},
"internal/cfg": {"L0"},
- "internal/poll": {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows"},
+ "internal/poll": {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows", "internal/syscall/unix"},
"internal/testlog": {"L0"},
"os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
"path/filepath": {"L2", "os", "syscall", "internal/syscall/windows"},
diff --git a/src/go/build/testdata/cgo_disabled/cgo_disabled.go b/src/go/build/testdata/cgo_disabled/cgo_disabled.go
new file mode 100644
index 0000000000..d1edb99fa4
--- /dev/null
+++ b/src/go/build/testdata/cgo_disabled/cgo_disabled.go
@@ -0,0 +1,5 @@
+package cgo_disabled
+
+import "C"
+
+import _ "should/be/ignored"
diff --git a/src/go/build/testdata/cgo_disabled/empty.go b/src/go/build/testdata/cgo_disabled/empty.go
new file mode 100644
index 0000000000..63afe42d6a
--- /dev/null
+++ b/src/go/build/testdata/cgo_disabled/empty.go
@@ -0,0 +1 @@
+package cgo_disabled
diff --git a/src/go/doc/example.go b/src/go/doc/example.go
index f337f2c2d7..a010d3a85a 100644
--- a/src/go/doc/example.go
+++ b/src/go/doc/example.go
@@ -62,6 +62,9 @@ func Examples(testFiles ...*ast.File) []*Example {
if !ok || f.Recv != nil {
continue
}
+ if params := f.Type.Params; len(params.List) != 0 {
+ continue // function has params; not a valid example
+ }
numDecl++
name := f.Name.Name
if isTest(name, "Test") || isTest(name, "Benchmark") {
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index af374b70c6..3756303dfb 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -559,7 +559,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
base := derefStructPtr(x.typ)
sel := selx.Sel.Name
- obj, index, indirect := check.LookupFieldOrMethod(base, false, check.pkg, sel)
+ obj, index, indirect := check.lookupFieldOrMethod(base, false, check.pkg, sel)
switch obj.(type) {
case nil:
check.invalidArg(x.pos(), "%s has no single field %s", base, sel)
diff --git a/src/go/types/call.go b/src/go/types/call.go
index 31f9372644..689ef8744c 100644
--- a/src/go/types/call.go
+++ b/src/go/types/call.go
@@ -370,7 +370,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
goto Error
}
- obj, index, indirect = check.LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ obj, index, indirect = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
if obj == nil {
switch {
case index != nil:
diff --git a/src/go/types/check.go b/src/go/types/check.go
index eec33057de..b599df1c50 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -79,6 +79,7 @@ type Checker struct {
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions
+ pkgCnt map[string]int // counts number of imported packages with a given name (for better error messages)
// information collected during type-checking of a set of package files
// (initialized by Files, valid only for the duration of check.Files;
@@ -190,6 +191,7 @@ func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Ch
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
posMap: make(map[*Interface][]token.Pos),
+ pkgCnt: make(map[string]int),
}
}
diff --git a/src/go/types/errors.go b/src/go/types/errors.go
index 23f2611b48..91b077163c 100644
--- a/src/go/types/errors.go
+++ b/src/go/types/errors.go
@@ -10,7 +10,7 @@ import (
"fmt"
"go/ast"
"go/token"
- "path"
+ "strconv"
"strings"
)
@@ -25,8 +25,13 @@ func unreachable() {
}
func (check *Checker) qualifier(pkg *Package) string {
+ // Qualify the package unless it's the package being type-checked.
if pkg != check.pkg {
- return path.Base(pkg.path) // avoid excessively long path names in error messages
+ // If the same package name was used by multiple packages, display the full path.
+ if check.pkgCnt[pkg.name] > 1 {
+ return strconv.Quote(pkg.path)
+ }
+ return pkg.name
}
return ""
}
diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go
index 648e100060..342c8baab2 100644
--- a/src/go/types/lookup.go
+++ b/src/go/types/lookup.go
@@ -33,19 +33,19 @@ package types
// the method's formal receiver base type, nor was the receiver addressable.
//
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
- return (*Checker)(nil).LookupFieldOrMethod(T, addressable, pkg, name)
+ return (*Checker)(nil).lookupFieldOrMethod(T, addressable, pkg, name)
}
-// Internal use of Checker.LookupFieldOrMethod: If the obj result is a method
+// Internal use of Checker.lookupFieldOrMethod: If the obj result is a method
// associated with a concrete (non-interface) type, the method's signature
// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
// the method's type.
// TODO(gri) Now that we provide the *Checker, we can probably remove this
-// caveat by calling Checker.objDecl from LookupFieldOrMethod. Investigate.
+// caveat by calling Checker.objDecl from lookupFieldOrMethod. Investigate.
-// LookupFieldOrMethod is like the external version but completes interfaces
+// lookupFieldOrMethod is like the external version but completes interfaces
// as necessary.
-func (check *Checker) LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// Methods cannot be associated to a named pointer type
// (spec: "The type denoted by T is called the receiver base type;
// it must not be a pointer or interface type and it must be declared
@@ -55,7 +55,7 @@ func (check *Checker) LookupFieldOrMethod(T Type, addressable bool, pkg *Package
// not have found it for T (see also issue 8590).
if t, _ := T.(*Named); t != nil {
if p, _ := t.underlying.(*Pointer); p != nil {
- obj, index, indirect = check.lookupFieldOrMethod(p, false, pkg, name)
+ obj, index, indirect = check.rawLookupFieldOrMethod(p, false, pkg, name)
if _, ok := obj.(*Func); ok {
return nil, nil, false
}
@@ -63,7 +63,7 @@ func (check *Checker) LookupFieldOrMethod(T Type, addressable bool, pkg *Package
}
}
- return check.lookupFieldOrMethod(T, addressable, pkg, name)
+ return check.rawLookupFieldOrMethod(T, addressable, pkg, name)
}
// TODO(gri) The named type consolidation and seen maps below must be
@@ -71,8 +71,8 @@ func (check *Checker) LookupFieldOrMethod(T Type, addressable bool, pkg *Package
// types always have only one representation (even when imported
// indirectly via different packages.)
-// lookupFieldOrMethod should only be called by LookupFieldOrMethod and missingMethod.
-func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+// rawLookupFieldOrMethod should only be called by lookupFieldOrMethod and missingMethod.
+func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// WARNING: The code in this function is extremely subtle - do not modify casually!
// This function and NewMethodSet should be kept in sync.
@@ -297,7 +297,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method *
// A concrete type implements T if it implements all methods of T.
for _, m := range T.allMethods {
- obj, _, _ := check.lookupFieldOrMethod(V, false, m.pkg, m.name)
+ obj, _, _ := check.rawLookupFieldOrMethod(V, false, m.pkg, m.name)
// we must have a method (not a field of matching function type)
f, _ := obj.(*Func)
diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go
index d66a5428ff..839d076e36 100644
--- a/src/go/types/resolver.go
+++ b/src/go/types/resolver.go
@@ -188,6 +188,7 @@ func (check *Checker) importPackage(pos token.Pos, path, dir string) *Package {
// package should be complete or marked fake, but be cautious
if imp.complete || imp.fake {
check.impMap[key] = imp
+ check.pkgCnt[imp.name]++
return imp
}
diff --git a/src/go/types/testdata/issues.src b/src/go/types/testdata/issues.src
index 5ddf6e0e71..fe2407999c 100644
--- a/src/go/types/testdata/issues.src
+++ b/src/go/types/testdata/issues.src
@@ -4,8 +4,12 @@
package issues
-import "fmt"
-import syn "cmd/compile/internal/syntax"
+import (
+ "fmt"
+ syn "cmd/compile/internal/syntax"
+ t1 "text/template"
+ t2 "html/template"
+)
func issue7035() {
type T struct{ X int }
@@ -316,7 +320,7 @@ func issue28281g() (... /* ERROR expected type */ TT)
// Issue #26234: Make various field/method lookup errors easier to read by matching cmd/compile's output
func issue26234a(f *syn.File) {
- // The error message below should refer to the actual package path base (syntax)
+ // The error message below should refer to the actual package name (syntax)
// not the local package name (syn).
f.foo /* ERROR f.foo undefined \(type \*syntax.File has no field or method foo\) */
}
@@ -337,3 +341,15 @@ func issue26234b(x T) {
func issue26234c() {
T.x /* ERROR T.x undefined \(type T has no method x\) */ ()
}
+
+func issue35895() {
+ // T is defined in this package, don't qualify its name with the package name.
+ var _ T = 0 // ERROR cannot convert 0 \(untyped int constant\) to T
+
+ // There is only one package with name syntax imported, only use the (global) package name in error messages.
+ var _ *syn.File = 0 // ERROR cannot convert 0 \(untyped int constant\) to \*syntax.File
+
+ // Because both t1 and t2 have the same global package name (template),
+ // qualify packages with full path name in this case.
+ var _ t1.Template = t2 /* ERROR cannot use .* \(value of type "html/template".Template\) as "text/template".Template */ .Template{}
+} \ No newline at end of file
diff --git a/src/html/escape.go b/src/html/escape.go
index dae404fab1..1dc12873b0 100644
--- a/src/html/escape.go
+++ b/src/html/escape.go
@@ -12,7 +12,7 @@ import (
// These replacements permit compatibility with old numeric entities that
// assumed Windows-1252 encoding.
-// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#consume-a-character-reference
+// https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state
var replacementTable = [...]rune{
'\u20AC', // First entry is what 0x80 should be replaced with.
'\u0081',
diff --git a/src/internal/poll/fcntl_js.go b/src/internal/poll/fcntl_js.go
new file mode 100644
index 0000000000..120fc1195f
--- /dev/null
+++ b/src/internal/poll/fcntl_js.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js,wasm
+
+package poll
+
+import "syscall"
+
+// fcntl not supported on js/wasm
+func fcntl(fd int, cmd int, arg int) (int, error) {
+ return 0, syscall.ENOSYS
+}
diff --git a/src/internal/poll/fcntl_libc.go b/src/internal/poll/fcntl_libc.go
new file mode 100644
index 0000000000..642472bc2b
--- /dev/null
+++ b/src/internal/poll/fcntl_libc.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin solaris
+
+package poll
+
+import _ "unsafe" // for go:linkname
+
+// Implemented in the syscall package.
+//go:linkname fcntl syscall.fcntl
+func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/src/internal/poll/fcntl_syscall.go b/src/internal/poll/fcntl_syscall.go
new file mode 100644
index 0000000000..5ac814359a
--- /dev/null
+++ b/src/internal/poll/fcntl_syscall.go
@@ -0,0 +1,20 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd linux netbsd openbsd
+
+package poll
+
+import (
+ "internal/syscall/unix"
+ "syscall"
+)
+
+func fcntl(fd int, cmd int, arg int) (int, error) {
+ r, _, e := syscall.Syscall(unix.FcntlSyscall, uintptr(fd), uintptr(cmd), uintptr(arg))
+ if e != 0 {
+ return int(r), syscall.Errno(e)
+ }
+ return int(r), nil
+}
diff --git a/src/internal/poll/fd_fsync_darwin.go b/src/internal/poll/fd_fsync_darwin.go
index c68ec9782a..91751496a4 100644
--- a/src/internal/poll/fd_fsync_darwin.go
+++ b/src/internal/poll/fd_fsync_darwin.go
@@ -4,10 +4,7 @@
package poll
-import (
- "syscall"
- _ "unsafe" // for go:linkname
-)
+import "syscall"
// Fsync invokes SYS_FCNTL with SYS_FULLFSYNC because
// on OS X, SYS_FSYNC doesn't fully flush contents to disk.
@@ -21,7 +18,3 @@ func (fd *FD) Fsync() error {
_, e1 := fcntl(fd.Sysfd, syscall.F_FULLFSYNC, 0)
return e1
}
-
-// Implemented in syscall/syscall_darwin.go.
-//go:linkname fcntl syscall.fcntl
-func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/src/internal/poll/fd_fsync_posix.go b/src/internal/poll/fd_fsync_posix.go
index 0886d749d3..69358297f4 100644
--- a/src/internal/poll/fd_fsync_posix.go
+++ b/src/internal/poll/fd_fsync_posix.go
@@ -16,11 +16,3 @@ func (fd *FD) Fsync() error {
defer fd.decref()
return syscall.Fsync(fd.Sysfd)
}
-
-func fcntl(fd int, cmd int, arg int) (int, error) {
- r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
- if e != 0 {
- return int(r), syscall.Errno(e)
- }
- return int(r), nil
-}
diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go
index 41d6ef593d..8752450a1f 100644
--- a/src/internal/poll/fd_unix.go
+++ b/src/internal/poll/fd_unix.go
@@ -451,7 +451,7 @@ var tryDupCloexec = int32(1)
// DupCloseOnExec dups fd and marks it close-on-exec.
func DupCloseOnExec(fd int) (int, string, error) {
- if atomic.LoadInt32(&tryDupCloexec) == 1 {
+ if syscall.F_DUPFD_CLOEXEC != 0 && atomic.LoadInt32(&tryDupCloexec) == 1 {
r0, e1 := fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0)
if e1 == nil {
return r0, "", nil
diff --git a/src/internal/poll/splice_linux.go b/src/internal/poll/splice_linux.go
index 4f97298417..5b17ae8551 100644
--- a/src/internal/poll/splice_linux.go
+++ b/src/internal/poll/splice_linux.go
@@ -5,6 +5,7 @@
package poll
import (
+ "internal/syscall/unix"
"sync/atomic"
"syscall"
"unsafe"
@@ -169,7 +170,7 @@ func newTempPipe() (prfd, pwfd int, sc string, err error) {
defer atomic.StorePointer(&disableSplice, unsafe.Pointer(p))
// F_GETPIPE_SZ was added in 2.6.35, which does not have the -EAGAIN bug.
- if _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fds[0]), syscall.F_GETPIPE_SZ, 0); errno != 0 {
+ if _, _, errno := syscall.Syscall(unix.FcntlSyscall, uintptr(fds[0]), syscall.F_GETPIPE_SZ, 0); errno != 0 {
*p = true
destroyTempPipe(fds[0], fds[1])
return -1, -1, "fcntl", errno
diff --git a/src/internal/syscall/unix/fcntl_linux_32bit.go b/src/internal/syscall/unix/fcntl_linux_32bit.go
new file mode 100644
index 0000000000..6c75afc2ab
--- /dev/null
+++ b/src/internal/syscall/unix/fcntl_linux_32bit.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// On 32-bit Linux systems, use SYS_FCNTL64.
+// If you change the build tags here, see syscall/flock_linux_32bit.go.
+
+// +build linux,386 linux,arm linux,mips linux,mipsle
+
+package unix
+
+import "syscall"
+
+func init() {
+ FcntlSyscall = syscall.SYS_FCNTL64
+}
diff --git a/src/internal/syscall/unix/nonblocking.go b/src/internal/syscall/unix/nonblocking.go
index bcc350b56e..db25fcca98 100644
--- a/src/internal/syscall/unix/nonblocking.go
+++ b/src/internal/syscall/unix/nonblocking.go
@@ -2,14 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix dragonfly freebsd linux netbsd openbsd solaris
+// +build dragonfly freebsd linux netbsd openbsd
package unix
import "syscall"
+// FcntlSyscall is the number for the fcntl system call. This is
+// usually SYS_FCNTL, but can be overridden to SYS_FCNTL64.
+var FcntlSyscall uintptr = syscall.SYS_FCNTL
+
func IsNonblock(fd int) (nonblocking bool, err error) {
- flag, _, e1 := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(syscall.F_GETFL), 0)
+ flag, _, e1 := syscall.Syscall(FcntlSyscall, uintptr(fd), uintptr(syscall.F_GETFL), 0)
if e1 != 0 {
return false, e1
}
diff --git a/src/internal/syscall/unix/nonblocking_darwin.go b/src/internal/syscall/unix/nonblocking_libc.go
index e3dd3a06b0..37cc7943ee 100644
--- a/src/internal/syscall/unix/nonblocking_darwin.go
+++ b/src/internal/syscall/unix/nonblocking_libc.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin
+// +build aix darwin solaris
package unix
@@ -19,6 +19,6 @@ func IsNonblock(fd int) (nonblocking bool, err error) {
return flag&syscall.O_NONBLOCK != 0, nil
}
-// Implemented in syscall/syscall_darwin.go.
+// Implemented in the syscall package.
//go:linkname fcntl syscall.fcntl
func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/src/internal/syscall/windows/reparse_windows.go b/src/internal/syscall/windows/reparse_windows.go
index 610b733c4a..6e111392f0 100644
--- a/src/internal/syscall/windows/reparse_windows.go
+++ b/src/internal/syscall/windows/reparse_windows.go
@@ -60,8 +60,9 @@ type SymbolicLinkReparseBuffer struct {
// Path returns path stored in rb.
func (rb *SymbolicLinkReparseBuffer) Path() string {
- p := (*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))
- return syscall.UTF16ToString(p[rb.SubstituteNameOffset/2 : (rb.SubstituteNameOffset+rb.SubstituteNameLength)/2])
+ n1 := rb.SubstituteNameOffset / 2
+ n2 := (rb.SubstituteNameOffset + rb.SubstituteNameLength) / 2
+ return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))[n1:n2:n2])
}
type MountPointReparseBuffer struct {
@@ -83,6 +84,7 @@ type MountPointReparseBuffer struct {
// Path returns path stored in rb.
func (rb *MountPointReparseBuffer) Path() string {
- p := (*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))
- return syscall.UTF16ToString(p[rb.SubstituteNameOffset/2 : (rb.SubstituteNameOffset+rb.SubstituteNameLength)/2])
+ n1 := rb.SubstituteNameOffset / 2
+ n2 := (rb.SubstituteNameOffset + rb.SubstituteNameLength) / 2
+ return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))[n1:n2:n2])
}
diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go
index 099e91ed68..dc641116ba 100644
--- a/src/internal/syscall/windows/syscall_windows.go
+++ b/src/internal/syscall/windows/syscall_windows.go
@@ -7,9 +7,29 @@ package windows
import (
"sync"
"syscall"
+ "unicode/utf16"
"unsafe"
)
+// UTF16PtrToString is like UTF16ToString, but takes *uint16
+// as a parameter instead of []uint16.
+// max is how many times p can be advanced looking for the null terminator.
+// If max is hit, the string is truncated at that point.
+func UTF16PtrToString(p *uint16, max int) string {
+ if p == nil {
+ return ""
+ }
+ // Find NUL terminator.
+ end := unsafe.Pointer(p)
+ n := 0
+ for *(*uint16)(end) != 0 && n < max {
+ end = unsafe.Pointer(uintptr(end) + unsafe.Sizeof(*p))
+ n++
+ }
+ s := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:n:n]
+ return string(utf16.Decode(s))
+}
+
const (
ERROR_SHARING_VIOLATION syscall.Errno = 32
ERROR_LOCK_VIOLATION syscall.Errno = 33
diff --git a/src/io/example_test.go b/src/io/example_test.go
index edcd0086f5..2eaab678c9 100644
--- a/src/io/example_test.go
+++ b/src/io/example_test.go
@@ -59,7 +59,7 @@ func ExampleCopyN() {
func ExampleReadAtLeast() {
r := strings.NewReader("some io.Reader stream to be read\n")
- buf := make([]byte, 33)
+ buf := make([]byte, 14)
if _, err := io.ReadAtLeast(r, buf, 4); err != nil {
log.Fatal(err)
}
@@ -78,10 +78,9 @@ func ExampleReadAtLeast() {
}
// Output:
- // some io.Reader stream to be read
- //
+ // some io.Reader
// error: short buffer
- // error: EOF
+ // error: unexpected EOF
}
func ExampleReadFull() {
diff --git a/src/io/io_test.go b/src/io/io_test.go
index 0e4ce61240..ca90403c59 100644
--- a/src/io/io_test.go
+++ b/src/io/io_test.go
@@ -13,7 +13,7 @@ import (
"testing"
)
-// An version of bytes.Buffer without ReadFrom and WriteTo
+// A version of bytes.Buffer without ReadFrom and WriteTo
type Buffer struct {
bytes.Buffer
ReaderFrom // conflicts with and hides bytes.Buffer's ReaderFrom.
diff --git a/src/io/multi_test.go b/src/io/multi_test.go
index d34794a367..f05d5f74ef 100644
--- a/src/io/multi_test.go
+++ b/src/io/multi_test.go
@@ -286,7 +286,7 @@ func TestMultiReaderSingleByteWithEOF(t *testing.T) {
}
}
-// Test that a reader returning (n, EOF) at the end of an MultiReader
+// Test that a reader returning (n, EOF) at the end of a MultiReader
// chain continues to return EOF on its final read, rather than
// yielding a (0, EOF).
func TestMultiReaderFinalEOF(t *testing.T) {
diff --git a/src/math/big/arith_decl.go b/src/math/big/arith_decl.go
index 41e592334c..53ab012988 100644
--- a/src/math/big/arith_decl.go
+++ b/src/math/big/arith_decl.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !math_big_pure_go
+// +build !math_big_pure_go,!riscv64
package big
diff --git a/src/math/big/arith_decl_pure.go b/src/math/big/arith_decl_pure.go
index 305f7ee03b..4021a6d299 100644
--- a/src/math/big/arith_decl_pure.go
+++ b/src/math/big/arith_decl_pure.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build math_big_pure_go
+// +build math_big_pure_go riscv64
package big
diff --git a/src/math/big/int.go b/src/math/big/int.go
index bf1fa73cce..019af616d7 100644
--- a/src/math/big/int.go
+++ b/src/math/big/int.go
@@ -504,6 +504,8 @@ func (z *Int) Exp(x, y, m *Int) *Int {
// GCD sets z to the greatest common divisor of a and b and returns z.
// If x or y are not nil, GCD sets their value such that z = a*x + b*y.
+// As of Go 1.14, a and b may be zero or negative (before Go 1.14 both
+// a and b had to be > 0).
// Regardless of the signs of a and b, z is always >= 0.
// If a == b == 0, GCD sets z = x = y = 0.
// If a == 0 and b != 0, GCD sets z = |b|, x = 0, y = sign(b) * 1.
diff --git a/src/math/rand/rand.go b/src/math/rand/rand.go
index 3e44613663..d6422c914d 100644
--- a/src/math/rand/rand.go
+++ b/src/math/rand/rand.go
@@ -298,7 +298,7 @@ var _ *rngSource = globalRand.src.(*lockedSource).src
// Seed uses the provided seed value to initialize the default Source to a
// deterministic state. If Seed is not called, the generator behaves as
// if seeded by Seed(1). Seed values that have the same remainder when
-// divided by 2^31-1 generate the same pseudo-random sequence.
+// divided by 2³¹-1 generate the same pseudo-random sequence.
// Seed, unlike the Rand.Seed method, is safe for concurrent use.
func Seed(seed int64) { globalRand.Seed(seed) }
diff --git a/src/math/stubs_riscv64.s b/src/math/stubs_riscv64.s
new file mode 100644
index 0000000000..6a122125e6
--- /dev/null
+++ b/src/math/stubs_riscv64.s
@@ -0,0 +1,113 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·Asin(SB),NOSPLIT,$0
+ JMP ·asin(SB)
+
+TEXT ·Acos(SB),NOSPLIT,$0
+ JMP ·acos(SB)
+
+TEXT ·Asinh(SB),NOSPLIT,$0
+ JMP ·asinh(SB)
+
+TEXT ·Acosh(SB),NOSPLIT,$0
+ JMP ·acosh(SB)
+
+TEXT ·Atan2(SB),NOSPLIT,$0
+ JMP ·atan2(SB)
+
+TEXT ·Atan(SB),NOSPLIT,$0
+ JMP ·atan(SB)
+
+TEXT ·Atanh(SB),NOSPLIT,$0
+ JMP ·atanh(SB)
+
+TEXT ·Min(SB),NOSPLIT,$0
+ JMP ·min(SB)
+
+TEXT ·Max(SB),NOSPLIT,$0
+ JMP ·max(SB)
+
+TEXT ·Erf(SB),NOSPLIT,$0
+ JMP ·erf(SB)
+
+TEXT ·Erfc(SB),NOSPLIT,$0
+ JMP ·erfc(SB)
+
+TEXT ·Exp2(SB),NOSPLIT,$0
+ JMP ·exp2(SB)
+
+TEXT ·Expm1(SB),NOSPLIT,$0
+ JMP ·expm1(SB)
+
+TEXT ·Exp(SB),NOSPLIT,$0
+ JMP ·exp(SB)
+
+TEXT ·Floor(SB),NOSPLIT,$0
+ JMP ·floor(SB)
+
+TEXT ·Ceil(SB),NOSPLIT,$0
+ JMP ·ceil(SB)
+
+TEXT ·Trunc(SB),NOSPLIT,$0
+ JMP ·trunc(SB)
+
+TEXT ·Frexp(SB),NOSPLIT,$0
+ JMP ·frexp(SB)
+
+TEXT ·Hypot(SB),NOSPLIT,$0
+ JMP ·hypot(SB)
+
+TEXT ·Ldexp(SB),NOSPLIT,$0
+ JMP ·ldexp(SB)
+
+TEXT ·Log10(SB),NOSPLIT,$0
+ JMP ·log10(SB)
+
+TEXT ·Log2(SB),NOSPLIT,$0
+ JMP ·log2(SB)
+
+TEXT ·Log1p(SB),NOSPLIT,$0
+ JMP ·log1p(SB)
+
+TEXT ·Log(SB),NOSPLIT,$0
+ JMP ·log(SB)
+
+TEXT ·Modf(SB),NOSPLIT,$0
+ JMP ·modf(SB)
+
+TEXT ·Mod(SB),NOSPLIT,$0
+ JMP ·mod(SB)
+
+TEXT ·Remainder(SB),NOSPLIT,$0
+ JMP ·remainder(SB)
+
+TEXT ·Sin(SB),NOSPLIT,$0
+ JMP ·sin(SB)
+
+TEXT ·Sinh(SB),NOSPLIT,$0
+ JMP ·sinh(SB)
+
+TEXT ·Cos(SB),NOSPLIT,$0
+ JMP ·cos(SB)
+
+TEXT ·Cosh(SB),NOSPLIT,$0
+ JMP ·cosh(SB)
+
+TEXT ·Sqrt(SB),NOSPLIT,$0
+ JMP ·sqrt(SB)
+
+TEXT ·Tan(SB),NOSPLIT,$0
+ JMP ·tan(SB)
+
+TEXT ·Tanh(SB),NOSPLIT,$0
+ JMP ·tanh(SB)
+
+TEXT ·Cbrt(SB),NOSPLIT,$0
+ JMP ·cbrt(SB)
+
+TEXT ·Pow(SB),NOSPLIT,$0
+ JMP ·pow(SB)
diff --git a/src/net/dial_test.go b/src/net/dial_test.go
index ae40079f85..493cdfc648 100644
--- a/src/net/dial_test.go
+++ b/src/net/dial_test.go
@@ -174,7 +174,7 @@ func dialClosedPort(t *testing.T) (actual, expected time.Duration) {
}
addr := l.Addr().String()
l.Close()
- // On OpenBSD, interference from TestSelfConnect is mysteriously
+ // On OpenBSD, interference from TestTCPSelfConnect is mysteriously
// causing the first attempt to hang for a few seconds, so we throw
// away the first result and keep the second.
for i := 1; ; i++ {
diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go
index 31cb6f721a..2ad40dfe02 100644
--- a/src/net/dnsclient_unix_test.go
+++ b/src/net/dnsclient_unix_test.go
@@ -173,7 +173,7 @@ func TestAvoidDNSName(t *testing.T) {
// Without stuff before onion/local, they're fine to
// use DNS. With a search path,
- // "onion.vegegtables.com" can use DNS. Without a
+ // "onion.vegetables.com" can use DNS. Without a
// search path (or with a trailing dot), the queries
// are just kinda useless, but don't reveal anything
// private.
diff --git a/src/net/http/client.go b/src/net/http/client.go
index 6a8c59a670..a496f1c0c7 100644
--- a/src/net/http/client.go
+++ b/src/net/http/client.go
@@ -288,10 +288,17 @@ func timeBeforeContextDeadline(t time.Time, ctx context.Context) bool {
// knownRoundTripperImpl reports whether rt is a RoundTripper that's
// maintained by the Go team and known to implement the latest
-// optional semantics (notably contexts).
-func knownRoundTripperImpl(rt RoundTripper) bool {
- switch rt.(type) {
- case *Transport, *http2Transport:
+// optional semantics (notably contexts). The Request is used
+// to check whether this particular request is using an alternate protocol,
+// in which case we need to check the RoundTripper for that protocol.
+func knownRoundTripperImpl(rt RoundTripper, req *Request) bool {
+ switch t := rt.(type) {
+ case *Transport:
+ if altRT := t.alternateRoundTripper(req); altRT != nil {
+ return knownRoundTripperImpl(altRT, req)
+ }
+ return true
+ case *http2Transport, http2noDialH2RoundTripper:
return true
}
// There's a very minor chance of a false positive with this.
@@ -319,7 +326,7 @@ func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTi
if deadline.IsZero() {
return nop, alwaysFalse
}
- knownTransport := knownRoundTripperImpl(rt)
+ knownTransport := knownRoundTripperImpl(rt, req)
oldCtx := req.Context()
if req.Cancel == nil && knownTransport {
diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
index e8f7df29a1..4d6a085f60 100644
--- a/src/net/http/httputil/reverseproxy.go
+++ b/src/net/http/httputil/reverseproxy.go
@@ -24,6 +24,14 @@ import (
// ReverseProxy is an HTTP Handler that takes an incoming request and
// sends it to another server, proxying the response back to the
// client.
+//
+// ReverseProxy automatically sets the client IP as the value of the
+// X-Forwarded-For header.
+// If an X-Forwarded-For header already exists, the client IP is
+// appended to the existing values.
+// To prevent IP spoofing, be sure to delete any pre-existing
+// X-Forwarded-For header coming from the client or
+// an untrusted proxy.
type ReverseProxy struct {
// Director must be a function which modifies
// the request into a new request to be sent
diff --git a/src/net/http/omithttp2.go b/src/net/http/omithttp2.go
index a0b33e9aad..307d93a3b1 100644
--- a/src/net/http/omithttp2.go
+++ b/src/net/http/omithttp2.go
@@ -36,6 +36,10 @@ type http2erringRoundTripper struct{}
func (http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) }
+type http2noDialH2RoundTripper struct{}
+
+func (http2noDialH2RoundTripper) RoundTrip(*Request) (*Response, error) { panic(noHTTP2) }
+
type http2noDialClientConnPool struct {
http2clientConnPool http2clientConnPool
}
diff --git a/src/net/http/request.go b/src/net/http/request.go
index 72261a1bd5..88fa0939f2 100644
--- a/src/net/http/request.go
+++ b/src/net/http/request.go
@@ -350,8 +350,8 @@ func (r *Request) Context() context.Context {
// sending the request, and reading the response headers and body.
//
// To create a new request with a context, use NewRequestWithContext.
-// To change the context of a request (such as an incoming) you then
-// also want to modify to send back out, use Request.Clone. Between
+// To change the context of a request, such as an incoming request you
+// want to modify before sending back out, use Request.Clone. Between
// those two uses, it's rare to need WithContext.
func (r *Request) WithContext(ctx context.Context) *Request {
if ctx == nil {
@@ -1223,17 +1223,17 @@ func parsePostForm(r *Request) (vs url.Values, err error) {
// For all requests, ParseForm parses the raw query from the URL and updates
// r.Form.
//
-// For POST, PUT, and PATCH requests, it also parses the request body as a form
-// and puts the results into both r.PostForm and r.Form. Request body parameters
-// take precedence over URL query string values in r.Form.
+// For POST, PUT, and PATCH requests, it also reads the request body, parses it
+// as a form and puts the results into both r.PostForm and r.Form. Request body
+// parameters take precedence over URL query string values in r.Form.
+//
+// If the request Body's size has not already been limited by MaxBytesReader,
+// the size is capped at 10MB.
//
// For other HTTP methods, or when the Content-Type is not
// application/x-www-form-urlencoded, the request Body is not read, and
// r.PostForm is initialized to a non-nil, empty value.
//
-// If the request Body's size has not already been limited by MaxBytesReader,
-// the size is capped at 10MB.
-//
// ParseMultipartForm calls ParseForm automatically.
// ParseForm is idempotent.
func (r *Request) ParseForm() error {
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
index 1a690efb49..29b937993e 100644
--- a/src/net/http/serve_test.go
+++ b/src/net/http/serve_test.go
@@ -34,7 +34,6 @@ import (
"regexp"
"runtime"
"runtime/debug"
- "sort"
"strconv"
"strings"
"sync"
@@ -4116,14 +4115,49 @@ func TestServerConnState(t *testing.T) {
panic("intentional panic")
},
}
+
+ // A stateLog is a log of states over the lifetime of a connection.
+ type stateLog struct {
+ active net.Conn // The connection for which the log is recorded; set to the first connection seen in StateNew.
+ got []ConnState
+ want []ConnState
+ complete chan<- struct{} // If non-nil, closed when either 'got' is equal to 'want', or 'got' is no longer a prefix of 'want'.
+ }
+ activeLog := make(chan *stateLog, 1)
+
+ // wantLog invokes doRequests, then waits for the resulting connection to
+ // either pass through the sequence of states in want or enter a state outside
+ // of that sequence.
+ wantLog := func(doRequests func(), want ...ConnState) {
+ t.Helper()
+ complete := make(chan struct{})
+ activeLog <- &stateLog{want: want, complete: complete}
+
+ doRequests()
+
+ timer := time.NewTimer(5 * time.Second)
+ select {
+ case <-timer.C:
+ t.Errorf("Timed out waiting for connection to change state.")
+ case <-complete:
+ timer.Stop()
+ }
+ sl := <-activeLog
+ if !reflect.DeepEqual(sl.got, sl.want) {
+ t.Errorf("Request(s) produced unexpected state sequence.\nGot: %v\nWant: %v", sl.got, sl.want)
+ }
+ // Don't return sl to activeLog: we don't expect any further states after
+ // this point, and want to keep the ConnState callback blocked until the
+ // next call to wantLog.
+ }
+
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
handler[r.URL.Path](w, r)
}))
- defer ts.Close()
-
- var mu sync.Mutex // guard stateLog and connID
- var stateLog = map[int][]ConnState{}
- var connID = map[net.Conn]int{}
+ defer func() {
+ activeLog <- &stateLog{} // If the test failed, allow any remaining ConnState callbacks to complete.
+ ts.Close()
+ }()
ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
ts.Config.ConnState = func(c net.Conn, state ConnState) {
@@ -4131,20 +4165,27 @@ func TestServerConnState(t *testing.T) {
t.Errorf("nil conn seen in state %s", state)
return
}
- mu.Lock()
- defer mu.Unlock()
- id, ok := connID[c]
- if !ok {
- id = len(connID) + 1
- connID[c] = id
+ sl := <-activeLog
+ if sl.active == nil && state == StateNew {
+ sl.active = c
+ } else if sl.active != c {
+ t.Errorf("unexpected conn in state %s", state)
+ activeLog <- sl
+ return
+ }
+ sl.got = append(sl.got, state)
+ if sl.complete != nil && (len(sl.got) >= len(sl.want) || !reflect.DeepEqual(sl.got, sl.want[:len(sl.got)])) {
+ close(sl.complete)
+ sl.complete = nil
}
- stateLog[id] = append(stateLog[id], state)
+ activeLog <- sl
}
- ts.Start()
+ ts.Start()
c := ts.Client()
mustGet := func(url string, headers ...string) {
+ t.Helper()
req, err := NewRequest("GET", url, nil)
if err != nil {
t.Fatal(err)
@@ -4165,26 +4206,33 @@ func TestServerConnState(t *testing.T) {
}
}
- mustGet(ts.URL + "/")
- mustGet(ts.URL + "/close")
+ wantLog(func() {
+ mustGet(ts.URL + "/")
+ mustGet(ts.URL + "/close")
+ }, StateNew, StateActive, StateIdle, StateActive, StateClosed)
- mustGet(ts.URL + "/")
- mustGet(ts.URL+"/", "Connection", "close")
+ wantLog(func() {
+ mustGet(ts.URL + "/")
+ mustGet(ts.URL+"/", "Connection", "close")
+ }, StateNew, StateActive, StateIdle, StateActive, StateClosed)
- mustGet(ts.URL + "/hijack")
- mustGet(ts.URL + "/hijack-panic")
+ wantLog(func() {
+ mustGet(ts.URL + "/hijack")
+ }, StateNew, StateActive, StateHijacked)
- // New->Closed
- {
+ wantLog(func() {
+ mustGet(ts.URL + "/hijack-panic")
+ }, StateNew, StateActive, StateHijacked)
+
+ wantLog(func() {
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
c.Close()
- }
+ }, StateNew, StateClosed)
- // New->Active->Closed
- {
+ wantLog(func() {
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
@@ -4194,10 +4242,9 @@ func TestServerConnState(t *testing.T) {
}
c.Read(make([]byte, 1)) // block until server hangs up on us
c.Close()
- }
+ }, StateNew, StateActive, StateClosed)
- // New->Idle->Closed
- {
+ wantLog(func() {
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
@@ -4213,47 +4260,7 @@ func TestServerConnState(t *testing.T) {
t.Fatal(err)
}
c.Close()
- }
-
- want := map[int][]ConnState{
- 1: {StateNew, StateActive, StateIdle, StateActive, StateClosed},
- 2: {StateNew, StateActive, StateIdle, StateActive, StateClosed},
- 3: {StateNew, StateActive, StateHijacked},
- 4: {StateNew, StateActive, StateHijacked},
- 5: {StateNew, StateClosed},
- 6: {StateNew, StateActive, StateClosed},
- 7: {StateNew, StateActive, StateIdle, StateClosed},
- }
- logString := func(m map[int][]ConnState) string {
- var b bytes.Buffer
- var keys []int
- for id := range m {
- keys = append(keys, id)
- }
- sort.Ints(keys)
- for _, id := range keys {
- fmt.Fprintf(&b, "Conn %d: ", id)
- for _, s := range m[id] {
- fmt.Fprintf(&b, "%s ", s)
- }
- b.WriteString("\n")
- }
- return b.String()
- }
-
- for i := 0; i < 5; i++ {
- time.Sleep(time.Duration(i) * 50 * time.Millisecond)
- mu.Lock()
- match := reflect.DeepEqual(stateLog, want)
- mu.Unlock()
- if match {
- return
- }
- }
-
- mu.Lock()
- t.Errorf("Unexpected events.\nGot log:\n%s\n Want:\n%s\n", logString(stateLog), logString(want))
- mu.Unlock()
+ }, StateNew, StateActive, StateIdle, StateClosed)
}
func TestServerKeepAlivesEnabled(t *testing.T) {
@@ -6306,6 +6313,10 @@ func testContentEncodingNoSniffing(t *testing.T, h2 bool) {
// Issue 30803: ensure that TimeoutHandler logs spurious
// WriteHeader calls, for consistency with other Handlers.
func TestTimeoutHandlerSuperfluousLogs(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+
setParallel(t)
defer afterTest(t)
@@ -6314,29 +6325,30 @@ func TestTimeoutHandlerSuperfluousLogs(t *testing.T) {
testFuncName := runtime.FuncForPC(pc).Name()
timeoutMsg := "timed out here!"
- maxTimeout := 200 * time.Millisecond
tests := []struct {
- name string
- sleepTime time.Duration
- wantResp string
+ name string
+ mustTimeout bool
+ wantResp string
}{
{
- name: "return before timeout",
- sleepTime: 0,
- wantResp: "HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n",
+ name: "return before timeout",
+ wantResp: "HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n",
},
{
- name: "return after timeout",
- sleepTime: maxTimeout * 2,
+ name: "return after timeout",
+ mustTimeout: true,
wantResp: fmt.Sprintf("HTTP/1.1 503 Service Unavailable\r\nContent-Length: %d\r\n\r\n%s",
len(timeoutMsg), timeoutMsg),
},
}
for _, tt := range tests {
+ tt := tt
t.Run(tt.name, func(t *testing.T) {
- var lastSpuriousLine int32
+ exitHandler := make(chan bool, 1)
+ defer close(exitHandler)
+ lastLine := make(chan int, 1)
sh := HandlerFunc(func(w ResponseWriter, r *Request) {
w.WriteHeader(404)
@@ -6344,14 +6356,23 @@ func TestTimeoutHandlerSuperfluousLogs(t *testing.T) {
w.WriteHeader(404)
w.WriteHeader(404)
_, _, line, _ := runtime.Caller(0)
- atomic.StoreInt32(&lastSpuriousLine, int32(line))
-
- <-time.After(tt.sleepTime)
+ lastLine <- line
+ <-exitHandler
})
+ if !tt.mustTimeout {
+ exitHandler <- true
+ }
+
logBuf := new(bytes.Buffer)
srvLog := log.New(logBuf, "", 0)
- th := TimeoutHandler(sh, maxTimeout, timeoutMsg)
+ // When expecting to timeout, we'll keep the duration short.
+ dur := 20 * time.Millisecond
+ if !tt.mustTimeout {
+ // Otherwise, make it arbitrarily long to reduce the risk of flakes.
+ dur = 10 * time.Second
+ }
+ th := TimeoutHandler(sh, dur, timeoutMsg)
cst := newClientServerTest(t, h1Mode /* the test is protocol-agnostic */, th, optWithServerLog(srvLog))
defer cst.close()
@@ -6379,10 +6400,12 @@ func TestTimeoutHandlerSuperfluousLogs(t *testing.T) {
t.Fatalf("Server logs count mismatch\ngot %d, want %d\n\nGot\n%s\n", g, w, blob)
}
+ lastSpuriousLine := <-lastLine
+ firstSpuriousLine := lastSpuriousLine - 3
// Now ensure that the regexes match exactly.
// "http: superfluous response.WriteHeader call from <fn>.func\d.\d (<curFile>:lastSpuriousLine-[1, 3]"
for i, logEntry := range logEntries {
- wantLine := atomic.LoadInt32(&lastSpuriousLine) - 3 + int32(i)
+ wantLine := firstSpuriousLine + i
pat := fmt.Sprintf("^http: superfluous response.WriteHeader call from %s.func\\d+.\\d+ \\(%s:%d\\)$",
testFuncName, curFileBaseName, wantLine)
re := regexp.MustCompile(pat)
diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go
index 1d6a987545..2e01a07f84 100644
--- a/src/net/http/transfer.go
+++ b/src/net/http/transfer.go
@@ -7,7 +7,6 @@ package http
import (
"bufio"
"bytes"
- "compress/gzip"
"errors"
"fmt"
"io"
@@ -467,34 +466,6 @@ func suppressedHeaders(status int) []string {
return nil
}
-// proxyingReadCloser is a composite type that accepts and proxies
-// io.Read and io.Close calls to its respective Reader and Closer.
-//
-// It is composed of:
-// a) a top-level reader e.g. the result of decompression
-// b) a symbolic Closer e.g. the result of decompression, the
-// original body and the connection itself.
-type proxyingReadCloser struct {
- io.Reader
- io.Closer
-}
-
-// multiCloser implements io.Closer and allows a bunch of io.Closer values
-// to all be closed once.
-// Example usage is with proxyingReadCloser if we are decompressing a response
-// body on the fly and would like to close both *gzip.Reader and underlying body.
-type multiCloser []io.Closer
-
-func (mc multiCloser) Close() error {
- var err error
- for _, c := range mc {
- if err1 := c.Close(); err1 != nil && err == nil {
- err = err1
- }
- }
- return err
-}
-
// msg is *Request or *Response.
func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
t := &transferReader{RequestMethod: "GET"}
@@ -572,7 +543,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
// Prepare body reader. ContentLength < 0 means chunked encoding
// or close connection when finished, since multipart is not supported yet
switch {
- case chunked(t.TransferEncoding) || implicitlyChunked(t.TransferEncoding):
+ case chunked(t.TransferEncoding):
if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
t.Body = NoBody
} else {
@@ -593,21 +564,6 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
}
}
- // Finally if "gzip" was one of the requested transfer-encodings,
- // we'll unzip the concatenated body/payload of the request.
- // TODO: As we support more transfer-encodings, extract
- // this code and apply the un-codings in reverse.
- if t.Body != NoBody && gzipped(t.TransferEncoding) {
- zr, err := gzip.NewReader(t.Body)
- if err != nil {
- return fmt.Errorf("http: failed to gunzip body: %v", err)
- }
- t.Body = &proxyingReadCloser{
- Reader: zr,
- Closer: multiCloser{zr, t.Body},
- }
- }
-
// Unify output
switch rr := msg.(type) {
case *Request:
@@ -627,41 +583,8 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
return nil
}
-// Checks whether chunked is the last part of the encodings stack
-func chunked(te []string) bool { return len(te) > 0 && te[len(te)-1] == "chunked" }
-
-// implicitlyChunked is a helper to check for implicity of chunked, because
-// RFC 7230 Section 3.3.1 says that the sender MUST apply chunked as the final
-// payload body to ensure that the message is framed for both the request
-// and the body. Since "identity" is incompatible with any other transformational
-// encoding cannot co-exist, the presence of "identity" will cause implicitlyChunked
-// to return false.
-func implicitlyChunked(te []string) bool {
- if len(te) == 0 { // No transfer-encodings passed in, so not implicitly chunked.
- return false
- }
- for _, tei := range te {
- if tei == "identity" {
- return false
- }
- }
- return true
-}
-
-func isGzipTransferEncoding(tei string) bool {
- // RFC 7230 4.2.3 requests that "x-gzip" SHOULD be considered the same as "gzip".
- return tei == "gzip" || tei == "x-gzip"
-}
-
-// Checks where either of "gzip" or "x-gzip" are contained in transfer encodings.
-func gzipped(te []string) bool {
- for _, tei := range te {
- if isGzipTransferEncoding(tei) {
- return true
- }
- }
- return false
-}
+// Checks whether chunked is part of the encodings stack
+func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
// Checks whether the encoding is explicitly "identity".
func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
@@ -697,47 +620,25 @@ func (t *transferReader) fixTransferEncoding() error {
encodings := strings.Split(raw[0], ",")
te := make([]string, 0, len(encodings))
-
- // When adding new encodings, please maintain the invariant:
- // if chunked encoding is present, it must always
- // come last and it must be applied only once.
- // See RFC 7230 Section 3.3.1 Transfer-Encoding.
- for i, encoding := range encodings {
+ // TODO: Even though we only support "identity" and "chunked"
+ // encodings, the loop below is designed with foresight. One
+ // invariant that must be maintained is that, if present,
+ // chunked encoding must always come first.
+ for _, encoding := range encodings {
encoding = strings.ToLower(strings.TrimSpace(encoding))
-
+ // "identity" encoding is not recorded
if encoding == "identity" {
- // "identity" should not be mixed with other transfer-encodings/compressions
- // because it means "no compression, no transformation".
- if len(encodings) != 1 {
- return &badStringError{`"identity" when present must be the only transfer encoding`, strings.Join(encodings, ",")}
- }
- // "identity" is not recorded.
break
}
-
- switch {
- case encoding == "chunked":
- // "chunked" MUST ALWAYS be the last
- // encoding as per the loop invariant.
- // That is:
- // Invalid: [chunked, gzip]
- // Valid: [gzip, chunked]
- if i+1 != len(encodings) {
- return &badStringError{"chunked must be applied only once, as the last encoding", strings.Join(encodings, ",")}
- }
- // Supported otherwise.
-
- case isGzipTransferEncoding(encoding):
- // Supported
-
- default:
+ if encoding != "chunked" {
return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", encoding)}
}
-
te = te[0 : len(te)+1]
te[len(te)-1] = encoding
}
-
+ if len(te) > 1 {
+ return &badStringError{"too many transfer encodings", strings.Join(te, ",")}
+ }
if len(te) > 0 {
// RFC 7230 3.3.2 says "A sender MUST NOT send a
// Content-Length header field in any message that
diff --git a/src/net/http/transfer_test.go b/src/net/http/transfer_test.go
index a8ce2d3709..65009ee8bf 100644
--- a/src/net/http/transfer_test.go
+++ b/src/net/http/transfer_test.go
@@ -7,7 +7,6 @@ package http
import (
"bufio"
"bytes"
- "compress/gzip"
"crypto/rand"
"fmt"
"io"
@@ -62,6 +61,7 @@ func TestFinalChunkedBodyReadEOF(t *testing.T) {
buf := make([]byte, len(want))
n, err := res.Body.Read(buf)
if n != len(want) || err != io.EOF {
+ t.Logf("body = %#v", res.Body)
t.Errorf("Read = %v, %v; want %d, EOF", n, err, len(want))
}
if string(buf) != want {
@@ -290,7 +290,7 @@ func TestFixTransferEncoding(t *testing.T) {
},
{
hdr: Header{"Transfer-Encoding": {"chunked, chunked", "identity", "chunked"}},
- wantErr: &badStringError{"chunked must be applied only once, as the last encoding", "chunked, chunked"},
+ wantErr: &badStringError{"too many transfer encodings", "chunked,chunked"},
},
{
hdr: Header{"Transfer-Encoding": {"chunked"}},
@@ -310,283 +310,3 @@ func TestFixTransferEncoding(t *testing.T) {
}
}
}
-
-func gzipIt(s string) string {
- buf := new(bytes.Buffer)
- gw := gzip.NewWriter(buf)
- gw.Write([]byte(s))
- gw.Close()
- return buf.String()
-}
-
-func TestUnitTestProxyingReadCloserClosesBody(t *testing.T) {
- var checker closeChecker
- buf := new(bytes.Buffer)
- buf.WriteString("Hello, Gophers!")
- prc := &proxyingReadCloser{
- Reader: buf,
- Closer: &checker,
- }
- prc.Close()
-
- read, err := ioutil.ReadAll(prc)
- if err != nil {
- t.Fatalf("Read error: %v", err)
- }
- if g, w := string(read), "Hello, Gophers!"; g != w {
- t.Errorf("Read mismatch: got %q want %q", g, w)
- }
-
- if checker.closed != true {
- t.Fatal("closeChecker.Close was never invoked")
- }
-}
-
-func TestGzipTransferEncoding_request(t *testing.T) {
- helloWorldGzipped := gzipIt("Hello, World!")
-
- tests := []struct {
- payload string
- wantErr string
- wantBody string
- }{
-
- {
- // The case of "chunked" properly applied as the last encoding
- // and a gzipped request payload that is streamed in 3 parts.
- payload: `POST / HTTP/1.1
-Host: golang.org
-Transfer-Encoding: gzip, chunked
-Content-Type: text/html; charset=UTF-8
-
-` + fmt.Sprintf("%02x\r\n%s\r\n%02x\r\n%s\r\n%02x\r\n%s\r\n0\r\n\r\n",
- 3, helloWorldGzipped[:3],
- 5, helloWorldGzipped[3:8],
- len(helloWorldGzipped)-8, helloWorldGzipped[8:]),
- wantBody: `Hello, World!`,
- },
-
- {
- // The request specifies "Transfer-Encoding: chunked" so its body must be left untouched.
- payload: `PUT / HTTP/1.1
-Host: golang.org
-Transfer-Encoding: chunked
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + fmt.Sprintf("%0x\r\n%s\r\n0\r\n\r\n", len(helloWorldGzipped), helloWorldGzipped),
- // We want that payload as it was sent.
- wantBody: helloWorldGzipped,
- },
-
- {
- // Valid request, the body doesn't have "Transfer-Encoding: chunked" but implicitly encoded
- // for chunking as per the advisory from RFC 7230 3.3.1 which advises for cases where.
- payload: `POST / HTTP/1.1
-Host: localhost
-Transfer-Encoding: gzip
-Content-Type: text/html; charset=UTF-8
-
-` + fmt.Sprintf("%0x\r\n%s\r\n0\r\n\r\n", len(helloWorldGzipped), helloWorldGzipped),
- wantBody: `Hello, World!`,
- },
-
- {
- // Invalid request, the body isn't chunked nor is the connection terminated immediately
- // hence invalid as per the advisory from RFC 7230 3.3.1 which advises for cases where
- // a Transfer-Encoding that isn't finally chunked is provided.
- payload: `PUT / HTTP/1.1
-Host: golang.org
-Transfer-Encoding: gzip
-Content-Length: 0
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-`,
- wantErr: `EOF`,
- },
-
- {
- // The case of chunked applied before another encoding.
- payload: `PUT / HTTP/1.1
-Location: golang.org
-Transfer-Encoding: chunked, gzip
-Content-Length: 0
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-`,
- wantErr: `chunked must be applied only once, as the last encoding "chunked, gzip"`,
- },
-
- {
- // The case of chunked properly applied as the
- // last encoding BUT with a bad "Content-Length".
- payload: `POST / HTTP/1.1
-Host: golang.org
-Transfer-Encoding: gzip, chunked
-Content-Length: 10
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + "0\r\n\r\n",
- wantErr: "EOF",
- },
- }
-
- for i, tt := range tests {
- req, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.payload)))
- if tt.wantErr != "" {
- if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
- t.Errorf("test %d. Error mismatch\nGot: %v\nWant: %s", i, err, tt.wantErr)
- }
- continue
- }
-
- if err != nil {
- t.Errorf("test %d. Unexpected ReadRequest error: %v\nPayload:\n%s", i, err, tt.payload)
- continue
- }
-
- got, err := ioutil.ReadAll(req.Body)
- req.Body.Close()
- if err != nil {
- t.Errorf("test %d. Failed to read response body: %v", i, err)
- }
- if g, w := string(got), tt.wantBody; g != w {
- t.Errorf("test %d. Request body mimsatch\nGot:\n%s\n\nWant:\n%s", i, g, w)
- }
- }
-}
-
-func TestGzipTransferEncoding_response(t *testing.T) {
- helloWorldGzipped := gzipIt("Hello, World!")
-
- tests := []struct {
- payload string
- wantErr string
- wantBody string
- }{
-
- {
- // The case of "chunked" properly applied as the last encoding
- // and a gzipped payload that is streamed in 3 parts.
- payload: `HTTP/1.1 302 Found
-Location: https://golang.org/
-Transfer-Encoding: gzip, chunked
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + fmt.Sprintf("%02x\r\n%s\r\n%02x\r\n%s\r\n%02x\r\n%s\r\n0\r\n\r\n",
- 3, helloWorldGzipped[:3],
- 5, helloWorldGzipped[3:8],
- len(helloWorldGzipped)-8, helloWorldGzipped[8:]),
- wantBody: `Hello, World!`,
- },
-
- {
- // The response specifies "Transfer-Encoding: chunked" so response body must be left untouched.
- payload: `HTTP/1.1 302 Found
-Location: https://golang.org/
-Transfer-Encoding: chunked
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + fmt.Sprintf("%0x\r\n%s\r\n0\r\n\r\n", len(helloWorldGzipped), helloWorldGzipped),
- // We want that payload as it was sent.
- wantBody: helloWorldGzipped,
- },
-
- {
- // Valid response, the body doesn't have "Transfer-Encoding: chunked" but implicitly encoded
- // for chunking as per the advisory from RFC 7230 3.3.1 which advises for cases where.
- payload: `HTTP/1.1 302 Found
-Location: https://golang.org/
-Transfer-Encoding: gzip
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + fmt.Sprintf("%0x\r\n%s\r\n0\r\n\r\n", len(helloWorldGzipped), helloWorldGzipped),
- wantBody: `Hello, World!`,
- },
-
- {
- // Invalid response, the body isn't chunked nor is the connection terminated immediately
- // hence invalid as per the advisory from RFC 7230 3.3.1 which advises for cases where
- // a Transfer-Encoding that isn't finally chunked is provided.
- payload: `HTTP/1.1 302 Found
-Location: https://golang.org/
-Transfer-Encoding: gzip
-Content-Length: 0
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-`,
- wantErr: `EOF`,
- },
-
- {
- // The case of chunked applied before another encoding.
- payload: `HTTP/1.1 302 Found
-Location: https://golang.org/
-Transfer-Encoding: chunked, gzip
-Content-Length: 0
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-`,
- wantErr: `chunked must be applied only once, as the last encoding "chunked, gzip"`,
- },
-
- {
- // The case of chunked properly applied as the
- // last encoding BUT with a bad "Content-Length".
- payload: `HTTP/1.1 302 Found
-Location: https://golang.org/
-Transfer-Encoding: gzip, chunked
-Content-Length: 10
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + "0\r\n\r\n",
- wantErr: "EOF",
- },
-
- {
- // Including "identity" more than once.
- payload: `HTTP/1.1 200 OK
-Location: https://golang.org/
-Transfer-Encoding: identity, identity
-Content-Length: 0
-Connection: close
-Content-Type: text/html; charset=UTF-8
-
-` + "0\r\n\r\n",
- wantErr: `"identity" when present must be the only transfer encoding "identity, identity"`,
- },
- }
-
- for i, tt := range tests {
- res, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.payload)), nil)
- if tt.wantErr != "" {
- if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
- t.Errorf("test %d. Error mismatch\nGot: %v\nWant: %s", i, err, tt.wantErr)
- }
- continue
- }
-
- if err != nil {
- t.Errorf("test %d. Unexpected ReadResponse error: %v\nPayload:\n%s", i, err, tt.payload)
- continue
- }
-
- got, err := ioutil.ReadAll(res.Body)
- res.Body.Close()
- if err != nil {
- t.Errorf("test %d. Failed to read response body: %v", i, err)
- }
- if g, w := string(got), tt.wantBody; g != w {
- t.Errorf("test %d. Response body mimsatch\nGot:\n%s\n\nWant:\n%s", i, g, w)
- }
- }
-}
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index dd61617fd1..d0bfdb412c 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -469,6 +469,17 @@ func (t *Transport) useRegisteredProtocol(req *Request) bool {
return true
}
+// alternateRoundTripper returns the alternate RoundTripper to use
+// for this request if the Request's URL scheme requires one,
+// or nil for the normal case of using the Transport.
+func (t *Transport) alternateRoundTripper(req *Request) RoundTripper {
+ if !t.useRegisteredProtocol(req) {
+ return nil
+ }
+ altProto, _ := t.altProto.Load().(map[string]RoundTripper)
+ return altProto[req.URL.Scheme]
+}
+
// roundTrip implements a RoundTripper over HTTP.
func (t *Transport) roundTrip(req *Request) (*Response, error) {
t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
@@ -500,12 +511,9 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
}
}
- if t.useRegisteredProtocol(req) {
- altProto, _ := t.altProto.Load().(map[string]RoundTripper)
- if altRT := altProto[scheme]; altRT != nil {
- if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
- return resp, err
- }
+ if altRT := t.alternateRoundTripper(req); altRT != nil {
+ if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
+ return resp, err
}
}
if !isHTTP {
@@ -1559,22 +1567,54 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers
if hdr == nil {
hdr = make(Header)
}
+ if pa := cm.proxyAuth(); pa != "" {
+ hdr = hdr.Clone()
+ hdr.Set("Proxy-Authorization", pa)
+ }
connectReq := &Request{
Method: "CONNECT",
URL: &url.URL{Opaque: cm.targetAddr},
Host: cm.targetAddr,
Header: hdr,
}
- if pa := cm.proxyAuth(); pa != "" {
- connectReq.Header.Set("Proxy-Authorization", pa)
+
+ // If there's no done channel (no deadline or cancellation
+ // from the caller possible), at least set some (long)
+ // timeout here. This will make sure we don't block forever
+ // and leak a goroutine if the connection stops replying
+ // after the TCP connect.
+ connectCtx := ctx
+ if ctx.Done() == nil {
+ newCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
+ defer cancel()
+ connectCtx = newCtx
}
- connectReq.Write(conn)
- // Read response.
- // Okay to use and discard buffered reader here, because
- // TLS server will not speak until spoken to.
- br := bufio.NewReader(conn)
- resp, err := ReadResponse(br, connectReq)
+ didReadResponse := make(chan struct{}) // closed after CONNECT write+read is done or fails
+ var (
+ resp *Response
+ err error // write or read error
+ )
+ // Write the CONNECT request & read the response.
+ go func() {
+ defer close(didReadResponse)
+ err = connectReq.Write(conn)
+ if err != nil {
+ return
+ }
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err = ReadResponse(br, connectReq)
+ }()
+ select {
+ case <-connectCtx.Done():
+ conn.Close()
+ <-didReadResponse
+ return nil, connectCtx.Err()
+ case <-didReadResponse:
+ // resp or err now set
+ }
if err != nil {
conn.Close()
return nil, err
diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
index 517b03bf48..3ca7ce93b2 100644
--- a/src/net/http/transport_test.go
+++ b/src/net/http/transport_test.go
@@ -1442,6 +1442,72 @@ func TestTransportProxy(t *testing.T) {
}
}
+// Issue 28012: verify that the Transport closes its TCP connection to http proxies
+// when they're slow to reply to HTTPS CONNECT responses.
+func TestTransportProxyHTTPSConnectLeak(t *testing.T) {
+ setParallel(t)
+ defer afterTest(t)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ ln := newLocalListener(t)
+ defer ln.Close()
+ listenerDone := make(chan struct{})
+ go func() {
+ defer close(listenerDone)
+ c, err := ln.Accept()
+ if err != nil {
+ t.Errorf("Accept: %v", err)
+ return
+ }
+ defer c.Close()
+ // Read the CONNECT request
+ br := bufio.NewReader(c)
+ cr, err := ReadRequest(br)
+ if err != nil {
+ t.Errorf("proxy server failed to read CONNECT request")
+ return
+ }
+ if cr.Method != "CONNECT" {
+ t.Errorf("unexpected method %q", cr.Method)
+ return
+ }
+
+ // Now hang and never write a response; instead, cancel the request and wait
+ // for the client to close.
+ // (Prior to Issue 28012 being fixed, we never closed.)
+ cancel()
+ var buf [1]byte
+ _, err = br.Read(buf[:])
+ if err != io.EOF {
+ t.Errorf("proxy server Read err = %v; want EOF", err)
+ }
+ return
+ }()
+
+ c := &Client{
+ Transport: &Transport{
+ Proxy: func(*Request) (*url.URL, error) {
+ return url.Parse("http://" + ln.Addr().String())
+ },
+ },
+ }
+ req, err := NewRequestWithContext(ctx, "GET", "https://golang.fake.tld/", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = c.Do(req)
+ if err == nil {
+ t.Errorf("unexpected Get success")
+ }
+
+ // Wait unconditionally for the listener goroutine to exit: this should never
+ // hang, so if it does we want a full goroutine dump — and that's exactly what
+ // the testing package will give us when the test run times out.
+ <-listenerDone
+}
+
// Issue 16997: test transport dial preserves typed errors
func TestTransportDialPreservesNetOpProxyError(t *testing.T) {
defer afterTest(t)
@@ -1484,6 +1550,44 @@ func TestTransportDialPreservesNetOpProxyError(t *testing.T) {
}
}
+// Issue 36431: calls to RoundTrip should not mutate t.ProxyConnectHeader.
+//
+// (A bug caused dialConn to instead write the per-request Proxy-Authorization
+// header through to the shared Header instance, introducing a data race.)
+func TestTransportProxyDialDoesNotMutateProxyConnectHeader(t *testing.T) {
+ setParallel(t)
+ defer afterTest(t)
+
+ proxy := httptest.NewTLSServer(NotFoundHandler())
+ defer proxy.Close()
+ c := proxy.Client()
+
+ tr := c.Transport.(*Transport)
+ tr.Proxy = func(*Request) (*url.URL, error) {
+ u, _ := url.Parse(proxy.URL)
+ u.User = url.UserPassword("aladdin", "opensesame")
+ return u, nil
+ }
+ h := tr.ProxyConnectHeader
+ if h == nil {
+ h = make(Header)
+ }
+ tr.ProxyConnectHeader = h.Clone()
+
+ req, err := NewRequest("GET", "https://golang.fake.tld/", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = c.Do(req)
+ if err == nil {
+ t.Errorf("unexpected Get success")
+ }
+
+ if !reflect.DeepEqual(tr.ProxyConnectHeader, h) {
+ t.Errorf("tr.ProxyConnectHeader = %v; want %v", tr.ProxyConnectHeader, h)
+ }
+}
+
// TestTransportGzipRecursive sends a gzip quine and checks that the
// client gets the same value back. This is more cute than anything,
// but checks that we don't recurse forever, and checks that
@@ -6039,3 +6143,35 @@ func TestTransportDecrementConnWhenIdleConnRemoved(t *testing.T) {
t.Errorf("error occurred: %v", err)
}
}
+
+// Issue 36820
+// Test that we use the older backward compatible cancellation protocol
+// when a RoundTripper is registered via RegisterProtocol.
+func TestAltProtoCancellation(t *testing.T) {
+ defer afterTest(t)
+ tr := &Transport{}
+ c := &Client{
+ Transport: tr,
+ Timeout: time.Millisecond,
+ }
+ tr.RegisterProtocol("timeout", timeoutProto{})
+ _, err := c.Get("timeout://bar.com/path")
+ if err == nil {
+ t.Error("request unexpectedly succeeded")
+ } else if !strings.Contains(err.Error(), timeoutProtoErr.Error()) {
+ t.Errorf("got error %q, does not contain expected string %q", err, timeoutProtoErr)
+ }
+}
+
+var timeoutProtoErr = errors.New("canceled as expected")
+
+type timeoutProto struct{}
+
+func (timeoutProto) RoundTrip(req *Request) (*Response, error) {
+ select {
+ case <-req.Cancel:
+ return nil, timeoutProtoErr
+ case <-time.After(5 * time.Second):
+ return nil, errors.New("request was not canceled")
+ }
+}
diff --git a/src/net/interface_windows.go b/src/net/interface_windows.go
index 28b0a65f66..544943278d 100644
--- a/src/net/interface_windows.go
+++ b/src/net/interface_windows.go
@@ -58,7 +58,7 @@ func interfaceTable(ifindex int) ([]Interface, error) {
if ifindex == 0 || ifindex == int(index) {
ifi := Interface{
Index: int(index),
- Name: syscall.UTF16ToString((*(*[10000]uint16)(unsafe.Pointer(aa.FriendlyName)))[:]),
+ Name: windows.UTF16PtrToString(aa.FriendlyName, 10000),
}
if aa.OperStatus == windows.IfOperStatusUp {
ifi.Flags |= FlagUp
diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go
index 8a41510daf..1663b78ef0 100644
--- a/src/net/lookup_test.go
+++ b/src/net/lookup_test.go
@@ -998,12 +998,16 @@ func TestConcurrentPreferGoResolversDial(t *testing.T) {
defer wg.Done()
_, err := r.LookupIPAddr(context.Background(), "google.com")
if err != nil {
- t.Fatalf("lookup failed for resolver %d: %q", index, err)
+ t.Errorf("lookup failed for resolver %d: %q", index, err)
}
}(resolver.Resolver, i)
}
wg.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
+
for i, resolver := range resolvers {
if !resolver.dialed {
t.Errorf("custom resolver %d not dialed during lookup", i)
diff --git a/src/net/lookup_windows.go b/src/net/lookup_windows.go
index cb840ae238..7d5c941956 100644
--- a/src/net/lookup_windows.go
+++ b/src/net/lookup_windows.go
@@ -6,6 +6,7 @@ package net
import (
"context"
+ "internal/syscall/windows"
"os"
"runtime"
"syscall"
@@ -233,7 +234,7 @@ func (*Resolver) lookupCNAME(ctx context.Context, name string) (string, error) {
defer syscall.DnsRecordListFree(r, 1)
resolved := resolveCNAME(syscall.StringToUTF16Ptr(name), r)
- cname := syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(resolved))[:])
+ cname := windows.UTF16PtrToString(resolved, 256)
return absDomainName([]byte(cname)), nil
}
@@ -277,7 +278,7 @@ func (*Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
mxs := make([]*MX, 0, 10)
for _, p := range validRecs(r, syscall.DNS_TYPE_MX, name) {
v := (*syscall.DNSMXData)(unsafe.Pointer(&p.Data[0]))
- mxs = append(mxs, &MX{absDomainName([]byte(syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.NameExchange))[:]))), v.Preference})
+ mxs = append(mxs, &MX{absDomainName([]byte(windows.UTF16PtrToString(v.NameExchange, 256))), v.Preference})
}
byPref(mxs).sort()
return mxs, nil
@@ -317,8 +318,8 @@ func (*Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) {
for _, p := range validRecs(r, syscall.DNS_TYPE_TEXT, name) {
d := (*syscall.DNSTXTData)(unsafe.Pointer(&p.Data[0]))
s := ""
- for _, v := range (*[1 << 10]*uint16)(unsafe.Pointer(&(d.StringArray[0])))[:d.StringCount] {
- s += syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(v))[:])
+ for _, v := range (*[1 << 10]*uint16)(unsafe.Pointer(&(d.StringArray[0])))[:d.StringCount:d.StringCount] {
+ s += windows.UTF16PtrToString(v, 1<<20)
}
txts = append(txts, s)
}
@@ -343,7 +344,7 @@ func (*Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error)
ptrs := make([]string, 0, 10)
for _, p := range validRecs(r, syscall.DNS_TYPE_PTR, arpa) {
v := (*syscall.DNSPTRData)(unsafe.Pointer(&p.Data[0]))
- ptrs = append(ptrs, absDomainName([]byte(syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Host))[:]))))
+ ptrs = append(ptrs, absDomainName([]byte(windows.UTF16PtrToString(v.Host, 256))))
}
return ptrs, nil
}
diff --git a/src/net/net.go b/src/net/net.go
index 38c6b99637..1d7e5e7f65 100644
--- a/src/net/net.go
+++ b/src/net/net.go
@@ -452,6 +452,7 @@ type OpError struct {
Addr Addr
// Err is the error that occurred during the operation.
+ // The Error method panics if the error is nil.
Err error
}
diff --git a/src/os/dir_darwin.go b/src/os/dir_darwin.go
index 2f9ba78d68..a274dd1268 100644
--- a/src/os/dir_darwin.go
+++ b/src/os/dir_darwin.go
@@ -24,6 +24,16 @@ func (d *dirInfo) close() {
d.dir = 0
}
+func (f *File) seekInvalidate() {
+ if f.dirinfo == nil {
+ return
+ }
+ // Free cached dirinfo, so we allocate a new one if we
+ // access this file as a directory again. See #35767.
+ f.dirinfo.close()
+ f.dirinfo = nil
+}
+
func (f *File) readdirnames(n int) (names []string, err error) {
if f.dirinfo == nil {
dir, call, errno := f.pfd.OpenDir()
diff --git a/src/os/dir_unix.go b/src/os/dir_unix.go
index e0c4989756..2856a2dc0f 100644
--- a/src/os/dir_unix.go
+++ b/src/os/dir_unix.go
@@ -26,6 +26,8 @@ const (
func (d *dirInfo) close() {}
+func (f *File) seekInvalidate() {}
+
func (f *File) readdirnames(n int) (names []string, err error) {
// If this file has no dirinfo, create one.
if f.dirinfo == nil {
diff --git a/src/os/env_windows.go b/src/os/env_windows.go
index e8f647e7ac..b1b1ee4b3e 100644
--- a/src/os/env_windows.go
+++ b/src/os/env_windows.go
@@ -23,16 +23,20 @@ func environForSysProcAttr(sys *syscall.SysProcAttr) (env []string, err error) {
defer windows.DestroyEnvironmentBlock(block)
blockp := uintptr(unsafe.Pointer(block))
for {
- entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:]
- for i, v := range entry {
- if v == 0 {
- entry = entry[:i]
- break
- }
+
+ // find NUL terminator
+ end := unsafe.Pointer(blockp)
+ for *(*uint16)(end) != 0 {
+ end = unsafe.Pointer(uintptr(end) + 2)
}
- if len(entry) == 0 {
+
+ n := (uintptr(end) - uintptr(unsafe.Pointer(blockp))) / 2
+ if n == 0 {
+ // environment block ends with empty string
break
}
+
+ entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:n:n]
env = append(env, string(utf16.Decode(entry)))
blockp += 2 * (uintptr(len(entry)) + 1)
}
diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go
index 0c17b7a35d..3474ae0ca4 100644
--- a/src/os/exec/exec.go
+++ b/src/os/exec/exec.go
@@ -238,7 +238,6 @@ func (c *Cmd) argv() []string {
// skipStdinCopyError optionally specifies a function which reports
// whether the provided stdin copy error should be ignored.
-// It is non-nil everywhere but Plan 9, which lacks EPIPE. See exec_posix.go.
var skipStdinCopyError func(error) bool
func (c *Cmd) stdin() (f *os.File, err error) {
diff --git a/src/os/exec/exec_plan9.go b/src/os/exec/exec_plan9.go
new file mode 100644
index 0000000000..d90bd04399
--- /dev/null
+++ b/src/os/exec/exec_plan9.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import "os"
+
+func init() {
+ skipStdinCopyError = func(err error) bool {
+ // Ignore hungup errors copying to stdin if the program
+ // completed successfully otherwise.
+ // See Issue 35753.
+ pe, ok := err.(*os.PathError)
+ return ok &&
+ pe.Op == "write" && pe.Path == "|1" &&
+ pe.Err.Error() == "i/o on hungup channel"
+ }
+}
diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go
index 19bda6902a..dce66c5c2e 100644
--- a/src/os/exec/exec_test.go
+++ b/src/os/exec/exec_test.go
@@ -974,11 +974,6 @@ func (delayedInfiniteReader) Read(b []byte) (int, error) {
func TestIgnorePipeErrorOnSuccess(t *testing.T) {
testenv.MustHaveExec(t)
- // We really only care about testing this on Unixy and Windowsy things.
- if runtime.GOOS == "plan9" {
- t.Skipf("skipping test on %q", runtime.GOOS)
- }
-
testWith := func(r io.Reader) func(*testing.T) {
return func(t *testing.T) {
cmd := helperCommand(t, "echo", "foo")
diff --git a/src/os/exec_windows.go b/src/os/exec_windows.go
index 38293a0d28..10503c595f 100644
--- a/src/os/exec_windows.go
+++ b/src/os/exec_windows.go
@@ -6,11 +6,11 @@ package os
import (
"errors"
+ "internal/syscall/windows"
"runtime"
"sync/atomic"
"syscall"
"time"
- "unsafe"
)
func (p *Process) wait() (ps *ProcessState, err error) {
@@ -98,8 +98,7 @@ func findProcess(pid int) (p *Process, err error) {
}
func init() {
- p := syscall.GetCommandLine()
- cmd := syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(p))[:])
+ cmd := windows.UTF16PtrToString(syscall.GetCommandLine(), 0xffff)
if len(cmd) == 0 {
arg0, _ := Executable()
Args = []string{arg0}
diff --git a/src/os/file.go b/src/os/file.go
index 7995de79bf..9f8c82718b 100644
--- a/src/os/file.go
+++ b/src/os/file.go
@@ -204,6 +204,10 @@ func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
// relative to the current offset, and 2 means relative to the end.
// It returns the new offset and an error, if any.
// The behavior of Seek on a file opened with O_APPEND is not specified.
+//
+// If f is a directory, the behavior of Seek varies by operating
+// system; you can seek to the beginning of the directory on Unix-like
+// operating systems, but not on Windows.
func (f *File) Seek(offset int64, whence int) (ret int64, err error) {
if err := f.checkValid("seek"); err != nil {
return 0, err
diff --git a/src/os/file_unix.go b/src/os/file_unix.go
index 31c43eb61e..6945937fd6 100644
--- a/src/os/file_unix.go
+++ b/src/os/file_unix.go
@@ -295,6 +295,7 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) {
// relative to the current offset, and 2 means relative to the end.
// It returns the new offset and an error, if any.
func (f *File) seek(offset int64, whence int) (ret int64, err error) {
+ f.seekInvalidate()
ret, err = f.pfd.Seek(offset, whence)
runtime.KeepAlive(f)
return ret, err
diff --git a/src/os/os_test.go b/src/os/os_test.go
index 02c80f3d81..278c19e44b 100644
--- a/src/os/os_test.go
+++ b/src/os/os_test.go
@@ -2406,3 +2406,45 @@ func TestUserHomeDir(t *testing.T) {
t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode())
}
}
+
+func TestDirSeek(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ testenv.SkipFlaky(t, 36019)
+ }
+ wd, err := Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ f, err := Open(wd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ dirnames1, err := f.Readdirnames(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ret, err := f.Seek(0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ret != 0 {
+ t.Fatalf("seek result not zero: %d", ret)
+ }
+
+ dirnames2, err := f.Readdirnames(0)
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ if len(dirnames1) != len(dirnames2) {
+ t.Fatalf("listings have different lengths: %d and %d\n", len(dirnames1), len(dirnames2))
+ }
+ for i, n1 := range dirnames1 {
+ n2 := dirnames2[i]
+ if n1 != n2 {
+ t.Fatalf("different name i=%d n1=%s n2=%s\n", i, n1, n2)
+ }
+ }
+}
diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go
index 651fe63b3f..8c14103143 100644
--- a/src/os/os_windows_test.go
+++ b/src/os/os_windows_test.go
@@ -263,7 +263,8 @@ func createMountPoint(link string, target *reparseData) error {
buf.SubstituteNameLength = target.substituteName.length
buf.PrintNameOffset = target.printName.offset
buf.PrintNameLength = target.printName.length
- copy((*[2048]uint16)(unsafe.Pointer(&buf.PathBuffer[0]))[:], target.pathBuf)
+ pbuflen := len(target.pathBuf)
+ copy((*[2048]uint16)(unsafe.Pointer(&buf.PathBuffer[0]))[:pbuflen:pbuflen], target.pathBuf)
var rdb _REPARSE_DATA_BUFFER
rdb.header.ReparseTag = windows.IO_REPARSE_TAG_MOUNT_POINT
@@ -356,7 +357,8 @@ func createSymbolicLink(link string, target *reparseData, isrelative bool) error
if isrelative {
buf.Flags = windows.SYMLINK_FLAG_RELATIVE
}
- copy((*[2048]uint16)(unsafe.Pointer(&buf.PathBuffer[0]))[:], target.pathBuf)
+ pbuflen := len(target.pathBuf)
+ copy((*[2048]uint16)(unsafe.Pointer(&buf.PathBuffer[0]))[:pbuflen:pbuflen], target.pathBuf)
var rdb _REPARSE_DATA_BUFFER
rdb.header.ReparseTag = syscall.IO_REPARSE_TAG_SYMLINK
@@ -714,7 +716,7 @@ func TestReadStdin(t *testing.T) {
if n > consoleSize {
n = consoleSize
}
- n = copy((*[10000]uint16)(unsafe.Pointer(buf))[:n], s16)
+ n = copy((*[10000]uint16)(unsafe.Pointer(buf))[:n:n], s16)
s16 = s16[n:]
*read = uint32(n)
t.Logf("read %d -> %d", toread, *read)
diff --git a/src/os/user/lookup_windows.go b/src/os/user/lookup_windows.go
index 7499f6a470..faaddd2341 100644
--- a/src/os/user/lookup_windows.go
+++ b/src/os/user/lookup_windows.go
@@ -44,11 +44,7 @@ func lookupFullNameServer(servername, username string) (string, error) {
}
defer syscall.NetApiBufferFree(p)
i := (*syscall.UserInfo10)(unsafe.Pointer(p))
- if i.FullName == nil {
- return "", nil
- }
- name := syscall.UTF16ToString((*[1024]uint16)(unsafe.Pointer(i.FullName))[:])
- return name, nil
+ return windows.UTF16PtrToString(i.FullName, 1024), nil
}
func lookupFullName(domain, username, domainAndUser string) (string, error) {
@@ -165,14 +161,13 @@ func listGroupsForUsernameAndDomain(username, domain string) ([]string, error) {
if entriesRead == 0 {
return nil, fmt.Errorf("listGroupsForUsernameAndDomain: NetUserGetLocalGroups() returned an empty list for domain: %s, username: %s", domain, username)
}
- entries := (*[1024]windows.LocalGroupUserInfo0)(unsafe.Pointer(p0))[:entriesRead]
+ entries := (*[1024]windows.LocalGroupUserInfo0)(unsafe.Pointer(p0))[:entriesRead:entriesRead]
var sids []string
for _, entry := range entries {
if entry.Name == nil {
continue
}
- name := syscall.UTF16ToString((*[1024]uint16)(unsafe.Pointer(entry.Name))[:])
- sid, err := lookupGroupName(name)
+ sid, err := lookupGroupName(windows.UTF16PtrToString(entry.Name, 1024))
if err != nil {
return nil, err
}
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 7443666fa6..5f2f600174 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -4853,6 +4853,9 @@ func TestStructOfExportRules(t *testing.T) {
if exported != test.exported {
t.Errorf("test-%d: got exported=%v want exported=%v", i, exported, test.exported)
}
+ if field.PkgPath != test.field.PkgPath {
+ t.Errorf("test-%d: got PkgPath=%q want pkgPath=%q", i, field.PkgPath, test.field.PkgPath)
+ }
})
}
}
@@ -5308,6 +5311,24 @@ func TestStructOfTooManyFields(t *testing.T) {
}
}
+func TestStructOfDifferentPkgPath(t *testing.T) {
+ fields := []StructField{
+ {
+ Name: "f1",
+ PkgPath: "p1",
+ Type: TypeOf(int(0)),
+ },
+ {
+ Name: "f2",
+ PkgPath: "p2",
+ Type: TypeOf(int(0)),
+ },
+ }
+ shouldPanic(func() {
+ StructOf(fields)
+ })
+}
+
func TestChanOf(t *testing.T) {
// check construction and use of type not in binary
type T string
diff --git a/src/reflect/asm_riscv64.s b/src/reflect/asm_riscv64.s
new file mode 100644
index 0000000000..e6fab39874
--- /dev/null
+++ b/src/reflect/asm_riscv64.s
@@ -0,0 +1,36 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOV CTXT, 8(SP)
+ MOV $argframe+0(FP), T0
+ MOV T0, 16(SP)
+ ADD $32, SP, T1
+ MOV T1, 24(SP)
+ MOVB ZERO, 32(SP)
+ CALL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOV CTXT, 8(SP)
+ MOV $argframe+0(FP), T0
+ MOV T0, 16(SP)
+ ADD $32, SP, T1
+ MOV T1, 24(SP)
+ MOVB ZERO, 32(SP)
+ CALL ·callMethod(SB)
+ RET
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 3d6fde0ae3..cd8522d904 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -646,7 +646,7 @@ func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
// Implemented in the runtime package.
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
-// resolveTextOff resolves an function pointer offset from a base type.
+// resolveTextOff resolves a function pointer offset from a base type.
// The (*rtype).textOff method is a convenience wrapper for this function.
// Implemented in the runtime package.
func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
@@ -2371,6 +2371,7 @@ func StructOf(fields []StructField) Type {
lastzero := uintptr(0)
repr = append(repr, "struct {"...)
+ pkgpath := ""
for i, field := range fields {
if field.Name == "" {
panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
@@ -2381,11 +2382,18 @@ func StructOf(fields []StructField) Type {
if field.Type == nil {
panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
}
- f := runtimeStructField(field)
+ f, fpkgpath := runtimeStructField(field)
ft := f.typ
if ft.kind&kindGCProg != 0 {
hasGCProg = true
}
+ if fpkgpath != "" {
+ if pkgpath == "" {
+ pkgpath = fpkgpath
+ } else if pkgpath != fpkgpath {
+ panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
+ }
+ }
// Update string and hash
name := f.name.name()
@@ -2617,6 +2625,9 @@ func StructOf(fields []StructField) Type {
prototype := *(**structType)(unsafe.Pointer(&istruct))
*typ = *prototype
typ.fields = fs
+ if pkgpath != "" {
+ typ.pkgPath = newName(pkgpath, "", false)
+ }
// Look in cache.
if ts, ok := structLookupCache.m.Load(hash); ok {
@@ -2741,7 +2752,10 @@ func StructOf(fields []StructField) Type {
return addToCache(&typ.rtype)
}
-func runtimeStructField(field StructField) structField {
+// runtimeStructField takes a StructField value passed to StructOf and
+// returns both the corresponding internal representation, of type
+// structField, and the pkgpath value to use for this field.
+func runtimeStructField(field StructField) (structField, string) {
if field.Anonymous && field.PkgPath != "" {
panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
}
@@ -2762,11 +2776,12 @@ func runtimeStructField(field StructField) structField {
}
resolveReflectType(field.Type.common()) // install in runtime
- return structField{
+ f := structField{
name: newName(field.Name, string(field.Tag), exported),
typ: field.Type.common(),
offsetEmbed: offsetEmbed,
}
+ return f, field.PkgPath
}
// typeptrdata returns the length in bytes of the prefix of t
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index 56a8212981..d8789b4b5f 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -344,7 +344,7 @@ func initAlgAES() {
getRandomData(aeskeysched[:])
}
-// Note: These routines perform the read with an native endianness.
+// Note: These routines perform the read with a native endianness.
func readUnaligned32(p unsafe.Pointer) uint32 {
q := (*[4]byte)(p)
if sys.BigEndian {
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
new file mode 100644
index 0000000000..31dc94ce54
--- /dev/null
+++ b/src/runtime/asm_riscv64.s
@@ -0,0 +1,669 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+// func rt0_go()
+TEXT runtime·rt0_go(SB),NOSPLIT,$0
+ // X2 = stack; A0 = argc; A1 = argv
+
+ ADD $-24, X2
+ MOV A0, 8(X2) // argc
+ MOV A1, 16(X2) // argv
+
+ // create istack out of the given (operating system) stack.
+ // _cgo_init may update stackguard.
+ MOV $runtime·g0(SB), g
+ MOV $(-64*1024), T0
+ ADD T0, X2, T1
+ MOV T1, g_stackguard0(g)
+ MOV T1, g_stackguard1(g)
+ MOV T1, (g_stack+stack_lo)(g)
+ MOV X2, (g_stack+stack_hi)(g)
+
+ // if there is a _cgo_init, call it using the gcc ABI.
+ MOV _cgo_init(SB), T0
+ BEQ T0, ZERO, nocgo
+
+ MOV ZERO, A3 // arg 3: not used
+ MOV ZERO, A2 // arg 2: not used
+ MOV $setg_gcc<>(SB), A1 // arg 1: setg
+ MOV g, A0 // arg 0: G
+ JALR RA, T0
+
+nocgo:
+ // update stackguard after _cgo_init
+ MOV (g_stack+stack_lo)(g), T0
+ ADD $const__StackGuard, T0
+ MOV T0, g_stackguard0(g)
+ MOV T0, g_stackguard1(g)
+
+ // set the per-goroutine and per-mach "registers"
+ MOV $runtime·m0(SB), T0
+
+ // save m->g0 = g0
+ MOV g, m_g0(T0)
+ // save m0 to g0->m
+ MOV T0, g_m(g)
+
+ CALL runtime·check(SB)
+
+ // args are already prepared
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ MOV $runtime·mainPC(SB), T0 // entry
+ ADD $-24, X2
+ MOV T0, 16(X2)
+ MOV ZERO, 8(X2)
+ MOV ZERO, 0(X2)
+ CALL runtime·newproc(SB)
+ ADD $24, X2
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ WORD $0 // crash if reached
+ RET
+
+// void setg_gcc(G*); set g called from gcc with g in A0
+TEXT setg_gcc<>(SB),NOSPLIT,$0-0
+ MOV A0, g
+ CALL runtime·save_g(SB)
+ RET
+
+// func cputicks() int64
+TEXT runtime·cputicks(SB),NOSPLIT,$0-8
+ WORD $0xc0102573 // rdtime a0
+ MOV A0, ret+0(FP)
+ RET
+
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
+// of the G stack. We need to distinguish the routine that
+// lives at the bottom of the G stack from the one that lives
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
+ UNDEF
+ JALR RA, ZERO // make sure this function is not leaf
+ RET
+
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-8
+ MOV fn+0(FP), CTXT // CTXT = fn
+ MOV g_m(g), T0 // T0 = m
+
+ MOV m_gsignal(T0), T1 // T1 = gsignal
+ BEQ g, T1, noswitch
+
+ MOV m_g0(T0), T1 // T1 = g0
+ BEQ g, T1, noswitch
+
+ MOV m_curg(T0), T2
+ BEQ g, T2, switch
+
+ // Bad: g is not gsignal, not g0, not curg. What is it?
+ // Hide call from linker nosplit analysis.
+ MOV $runtime·badsystemstack(SB), T1
+ JALR RA, T1
+
+switch:
+ // save our state in g->sched. Pretend to
+ // be systemstack_switch if the G stack is scanned.
+ MOV $runtime·systemstack_switch(SB), T2
+ ADD $8, T2 // get past prologue
+ MOV T2, (g_sched+gobuf_pc)(g)
+ MOV X2, (g_sched+gobuf_sp)(g)
+ MOV ZERO, (g_sched+gobuf_lr)(g)
+ MOV g, (g_sched+gobuf_g)(g)
+
+ // switch to g0
+ MOV T1, g
+ CALL runtime·save_g(SB)
+ MOV (g_sched+gobuf_sp)(g), T0
+ // make it look like mstart called systemstack on g0, to stop traceback
+ ADD $-8, T0
+ MOV $runtime·mstart(SB), T1
+ MOV T1, 0(T0)
+ MOV T0, X2
+
+ // call target function
+ MOV 0(CTXT), T1 // code pointer
+ JALR RA, T1
+
+ // switch back to g
+ MOV g_m(g), T0
+ MOV m_curg(T0), g
+ CALL runtime·save_g(SB)
+ MOV (g_sched+gobuf_sp)(g), X2
+ MOV ZERO, (g_sched+gobuf_sp)(g)
+ RET
+
+noswitch:
+ // already on m stack, just call directly
+ // Using a tail call here cleans up tracebacks since we won't stop
+ // at an intermediate systemstack.
+ MOV 0(CTXT), T1 // code pointer
+ ADD $8, X2
+ JMP (T1)
+
+TEXT runtime·getcallerpc(SB),NOSPLIT|NOFRAME,$0-8
+ MOV 0(X2), T0 // LR saved by caller
+ MOV T0, ret+0(FP)
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+// Caller has already loaded:
+// R1: framesize, R2: argsize, R3: LR
+//
+// The traceback routines see morestack on a g0 as being
+// the top of a stack (for example, morestack calling newstack
+// calling the scheduler calling newm calling gc), so we must
+// record an argument size. For that purpose, it has no arguments.
+
+// func morestack()
+TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
+ // Cannot grow scheduler stack (m->g0).
+ MOV g_m(g), A0
+ MOV m_g0(A0), A1
+ BNE g, A1, 3(PC)
+ CALL runtime·badmorestackg0(SB)
+ CALL runtime·abort(SB)
+
+ // Cannot grow signal stack (m->gsignal).
+ MOV m_gsignal(A0), A1
+ BNE g, A1, 3(PC)
+ CALL runtime·badmorestackgsignal(SB)
+ CALL runtime·abort(SB)
+
+ // Called from f.
+ // Set g->sched to context in f.
+ MOV X2, (g_sched+gobuf_sp)(g)
+ MOV T0, (g_sched+gobuf_pc)(g)
+ MOV RA, (g_sched+gobuf_lr)(g)
+ MOV CTXT, (g_sched+gobuf_ctxt)(g)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOV RA, (m_morebuf+gobuf_pc)(A0) // f's caller's PC
+ MOV X2, (m_morebuf+gobuf_sp)(A0) // f's caller's SP
+ MOV g, (m_morebuf+gobuf_g)(A0)
+
+ // Call newstack on m->g0's stack.
+ MOV m_g0(A0), g
+ CALL runtime·save_g(SB)
+ MOV (g_sched+gobuf_sp)(g), X2
+ // Create a stack frame on g0 to call newstack.
+ MOV ZERO, -8(X2) // Zero saved LR in frame
+ ADD $-8, X2
+ CALL runtime·newstack(SB)
+
+ // Not reached, but make sure the return PC from the call to newstack
+ // is still in this function, and not the beginning of the next.
+ UNDEF
+
+// func morestack_noctxt()
+TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
+ MOV ZERO, CTXT
+ JMP runtime·morestack(SB)
+
+// AES hashing not implemented for riscv64
+TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
+ JMP runtime·memhashFallback(SB)
+TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
+ JMP runtime·strhashFallback(SB)
+TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
+ JMP runtime·memhash32Fallback(SB)
+TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
+ JMP runtime·memhash64Fallback(SB)
+
+// func return0()
+TEXT runtime·return0(SB), NOSPLIT, $0
+ MOV $0, A0
+ RET
+
+// restore state from Gobuf; longjmp
+
+// func gogo(buf *gobuf)
+TEXT runtime·gogo(SB), NOSPLIT, $16-8
+ MOV buf+0(FP), T0
+ MOV gobuf_g(T0), g // make sure g is not nil
+ CALL runtime·save_g(SB)
+
+ MOV (g), ZERO // make sure g is not nil
+ MOV gobuf_sp(T0), X2
+ MOV gobuf_lr(T0), RA
+ MOV gobuf_ret(T0), A0
+ MOV gobuf_ctxt(T0), CTXT
+ MOV ZERO, gobuf_sp(T0)
+ MOV ZERO, gobuf_ret(T0)
+ MOV ZERO, gobuf_lr(T0)
+ MOV ZERO, gobuf_ctxt(T0)
+ MOV gobuf_pc(T0), T0
+ JALR ZERO, T0
+
+// func jmpdefer(fv *funcval, argp uintptr)
+// called from deferreturn
+// 1. grab stored return address from the caller's frame
+// 2. sub 8 bytes to get back to JAL deferreturn
+// 3. JMP to fn
+TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
+ MOV 0(X2), RA
+ ADD $-8, RA
+
+ MOV fv+0(FP), CTXT
+ MOV argp+8(FP), X2
+ ADD $-8, X2
+ MOV 0(CTXT), T0
+ JALR ZERO, T0
+
+// func procyield(cycles uint32)
+TEXT runtime·procyield(SB),NOSPLIT,$0-0
+ RET
+
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+
+// func mcall(fn func(*g))
+TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
+ // Save caller state in g->sched
+ MOV X2, (g_sched+gobuf_sp)(g)
+ MOV RA, (g_sched+gobuf_pc)(g)
+ MOV ZERO, (g_sched+gobuf_lr)(g)
+ MOV g, (g_sched+gobuf_g)(g)
+
+ // Switch to m->g0 & its stack, call fn.
+ MOV g, T0
+ MOV g_m(g), T1
+ MOV m_g0(T1), g
+ CALL runtime·save_g(SB)
+ BNE g, T0, 2(PC)
+ JMP runtime·badmcall(SB)
+ MOV fn+0(FP), CTXT // context
+ MOV 0(CTXT), T1 // code pointer
+ MOV (g_sched+gobuf_sp)(g), X2 // sp = m->g0->sched.sp
+ ADD $-16, X2
+ MOV T0, 8(X2)
+ MOV ZERO, 0(X2)
+ JALR RA, T1
+ JMP runtime·badmcall2(SB)
+
+// func gosave(buf *gobuf)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
+ MOV buf+0(FP), T1
+ MOV X2, gobuf_sp(T1)
+ MOV RA, gobuf_pc(T1)
+ MOV g, gobuf_g(T1)
+ MOV ZERO, gobuf_lr(T1)
+ MOV ZERO, gobuf_ret(T1)
+ // Assert ctxt is zero. See func save.
+ MOV gobuf_ctxt(T1), T1
+ BEQ T1, ZERO, 2(PC)
+ CALL runtime·badctxt(SB)
+ RET
+
+// func asmcgocall(fn, arg unsafe.Pointer) int32
+TEXT ·asmcgocall(SB),NOSPLIT,$0-20
+ // TODO(jsing): Add support for cgo - issue #36641.
+ WORD $0 // crash
+
+// func asminit()
+TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
+ RET
+
+// reflectcall: call a function with the given argument list
+// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// we don't have variable-sized frames, so we use a small number
+// of constant-sized-frame functions to encode a few bits of size in the pc.
+// Caution: ugly multiline assembly macros in your future!
+
+#define DISPATCH(NAME,MAXSIZE) \
+ MOV $MAXSIZE, T1 \
+ BLTU T1, T0, 3(PC) \
+ MOV $NAME(SB), T2; \
+ JALR ZERO, T2
+// Note: can't just "BR NAME(SB)" - bad inlining results.
+
+// func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32)
+TEXT reflect·call(SB), NOSPLIT, $0-0
+ JMP ·reflectcall(SB)
+
+// func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
+ MOVWU argsize+24(FP), T0
+ DISPATCH(runtime·call32, 32)
+ DISPATCH(runtime·call64, 64)
+ DISPATCH(runtime·call128, 128)
+ DISPATCH(runtime·call256, 256)
+ DISPATCH(runtime·call512, 512)
+ DISPATCH(runtime·call1024, 1024)
+ DISPATCH(runtime·call2048, 2048)
+ DISPATCH(runtime·call4096, 4096)
+ DISPATCH(runtime·call8192, 8192)
+ DISPATCH(runtime·call16384, 16384)
+ DISPATCH(runtime·call32768, 32768)
+ DISPATCH(runtime·call65536, 65536)
+ DISPATCH(runtime·call131072, 131072)
+ DISPATCH(runtime·call262144, 262144)
+ DISPATCH(runtime·call524288, 524288)
+ DISPATCH(runtime·call1048576, 1048576)
+ DISPATCH(runtime·call2097152, 2097152)
+ DISPATCH(runtime·call4194304, 4194304)
+ DISPATCH(runtime·call8388608, 8388608)
+ DISPATCH(runtime·call16777216, 16777216)
+ DISPATCH(runtime·call33554432, 33554432)
+ DISPATCH(runtime·call67108864, 67108864)
+ DISPATCH(runtime·call134217728, 134217728)
+ DISPATCH(runtime·call268435456, 268435456)
+ DISPATCH(runtime·call536870912, 536870912)
+ DISPATCH(runtime·call1073741824, 1073741824)
+ MOV $runtime·badreflectcall(SB), T2
+ JALR ZERO, T2
+
+#define CALLFN(NAME,MAXSIZE) \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+ NO_LOCAL_POINTERS; \
+ /* copy arguments to stack */ \
+ MOV arg+16(FP), A1; \
+ MOVWU argsize+24(FP), A2; \
+ MOV X2, A3; \
+ ADD $8, A3; \
+ ADD A3, A2; \
+ BEQ A3, A2, 6(PC); \
+ MOVBU (A1), A4; \
+ ADD $1, A1; \
+ MOVB A4, (A3); \
+ ADD $1, A3; \
+ JMP -5(PC); \
+ /* call function */ \
+ MOV f+8(FP), CTXT; \
+ MOV (CTXT), A4; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ JALR RA, A4; \
+ /* copy return values back */ \
+ MOV argtype+0(FP), A5; \
+ MOV arg+16(FP), A1; \
+ MOVWU n+24(FP), A2; \
+ MOVWU retoffset+28(FP), A4; \
+ ADD $8, X2, A3; \
+ ADD A4, A3; \
+ ADD A4, A1; \
+ SUB A4, A2; \
+ CALL callRet<>(SB); \
+ RET
+
+// callRet copies return values back at the end of call*. This is a
+// separate function so it can allocate stack space for the arguments
+// to reflectcallmove. It does not follow the Go ABI; it expects its
+// arguments in registers.
+TEXT callRet<>(SB), NOSPLIT, $32-0
+ MOV A5, 8(X2)
+ MOV A1, 16(X2)
+ MOV A3, 24(X2)
+ MOV A2, 32(X2)
+ CALL runtime·reflectcallmove(SB)
+ RET
+
+CALLFN(·call16, 16)
+CALLFN(·call32, 32)
+CALLFN(·call64, 64)
+CALLFN(·call128, 128)
+CALLFN(·call256, 256)
+CALLFN(·call512, 512)
+CALLFN(·call1024, 1024)
+CALLFN(·call2048, 2048)
+CALLFN(·call4096, 4096)
+CALLFN(·call8192, 8192)
+CALLFN(·call16384, 16384)
+CALLFN(·call32768, 32768)
+CALLFN(·call65536, 65536)
+CALLFN(·call131072, 131072)
+CALLFN(·call262144, 262144)
+CALLFN(·call524288, 524288)
+CALLFN(·call1048576, 1048576)
+CALLFN(·call2097152, 2097152)
+CALLFN(·call4194304, 4194304)
+CALLFN(·call8388608, 8388608)
+CALLFN(·call16777216, 16777216)
+CALLFN(·call33554432, 33554432)
+CALLFN(·call67108864, 67108864)
+CALLFN(·call134217728, 134217728)
+CALLFN(·call268435456, 268435456)
+CALLFN(·call536870912, 536870912)
+CALLFN(·call1073741824, 1073741824)
+
+// func goexit(neverCallThisFunction)
+// The top-most function running on a goroutine
+// returns to goexit+PCQuantum.
+TEXT runtime·goexit(SB),NOSPLIT|NOFRAME,$0-0
+ MOV ZERO, ZERO // NOP
+ JMP runtime·goexit1(SB) // does not return
+ // traceback from goexit1 must hit code range of goexit
+ MOV ZERO, ZERO // NOP
+
+// func cgocallback_gofunc(fv uintptr, frame uintptr, framesize, ctxt uintptr)
+TEXT ·cgocallback_gofunc(SB),NOSPLIT,$24-32
+ // TODO(jsing): Add support for cgo - issue #36641.
+ WORD $0 // crash
+
+TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
+ EBREAK
+ RET
+
+TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
+ EBREAK
+ RET
+
+// void setg(G*); set g. for use by needm.
+TEXT runtime·setg(SB), NOSPLIT, $0-8
+ MOV gg+0(FP), g
+ // This only happens if iscgo, so jump straight to save_g
+ CALL runtime·save_g(SB)
+ RET
+
+TEXT ·checkASM(SB),NOSPLIT,$0-1
+ MOV $1, T0
+ MOV T0, ret+0(FP)
+ RET
+
+// gcWriteBarrier performs a heap pointer write and informs the GC.
+//
+// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
+// - T0 is the destination of the write
+// - T1 is the value being written at T0.
+// It clobbers R30 (the linker temp register - REG_TMP).
+// The act of CALLing gcWriteBarrier will clobber RA (LR).
+// It does not clobber any other general-purpose registers,
+// but may clobber others (e.g., floating point registers).
+TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$296
+ // Save the registers clobbered by the fast path.
+ MOV A0, 280(X2)
+ MOV A1, 288(X2)
+ MOV g_m(g), A0
+ MOV m_p(A0), A0
+ MOV (p_wbBuf+wbBuf_next)(A0), A1
+ // Increment wbBuf.next position.
+ ADD $16, A1
+ MOV A1, (p_wbBuf+wbBuf_next)(A0)
+ MOV (p_wbBuf+wbBuf_end)(A0), A0
+ MOV A0, T6 // T6 is linker temp register (REG_TMP)
+ // Record the write.
+ MOV T1, -16(A1) // Record value
+ MOV (T0), A0 // TODO: This turns bad writes into bad reads.
+ MOV A0, -8(A1) // Record *slot
+ // Is the buffer full?
+ BEQ A1, T6, flush
+ret:
+ MOV 280(X2), A0
+ MOV 288(X2), A1
+ // Do the write.
+ MOV T1, (T0)
+ RET
+
+flush:
+ // Save all general purpose registers since these could be
+ // clobbered by wbBufFlush and were not saved by the caller.
+ MOV T0, 8(X2) // Also first argument to wbBufFlush
+ MOV T1, 16(X2) // Also second argument to wbBufFlush
+
+ // TODO: Optimise
+ // R3 is g.
+ // R4 already saved (T0)
+ // R5 already saved (T1)
+ // R9 already saved (A0)
+ // R10 already saved (A1)
+ // R30 is tmp register.
+ MOV X0, 24(X2)
+ MOV X1, 32(X2)
+ MOV X2, 40(X2)
+ MOV X3, 48(X2)
+ MOV X4, 56(X2)
+ MOV X5, 64(X2)
+ MOV X6, 72(X2)
+ MOV X7, 80(X2)
+ MOV X8, 88(X2)
+ MOV X9, 96(X2)
+ MOV X10, 104(X2)
+ MOV X11, 112(X2)
+ MOV X12, 120(X2)
+ MOV X13, 128(X2)
+ MOV X14, 136(X2)
+ MOV X15, 144(X2)
+ MOV X16, 152(X2)
+ MOV X17, 160(X2)
+ MOV X18, 168(X2)
+ MOV X19, 176(X2)
+ MOV X20, 184(X2)
+ MOV X21, 192(X2)
+ MOV X22, 200(X2)
+ MOV X23, 208(X2)
+ MOV X24, 216(X2)
+ MOV X25, 224(X2)
+ MOV X26, 232(X2)
+ MOV X27, 240(X2)
+ MOV X28, 248(X2)
+ MOV X29, 256(X2)
+ MOV X30, 264(X2)
+ MOV X31, 272(X2)
+
+ // This takes arguments T0 and T1.
+ CALL runtime·wbBufFlush(SB)
+
+ MOV 24(X2), X0
+ MOV 32(X2), X1
+ MOV 40(X2), X2
+ MOV 48(X2), X3
+ MOV 56(X2), X4
+ MOV 64(X2), X5
+ MOV 72(X2), X6
+ MOV 80(X2), X7
+ MOV 88(X2), X8
+ MOV 96(X2), X9
+ MOV 104(X2), X10
+ MOV 112(X2), X11
+ MOV 120(X2), X12
+ MOV 128(X2), X13
+ MOV 136(X2), X14
+ MOV 144(X2), X15
+ MOV 152(X2), X16
+ MOV 160(X2), X17
+ MOV 168(X2), X18
+ MOV 176(X2), X19
+ MOV 184(X2), X20
+ MOV 192(X2), X21
+ MOV 200(X2), X22
+ MOV 208(X2), X23
+ MOV 216(X2), X24
+ MOV 224(X2), X25
+ MOV 232(X2), X26
+ MOV 240(X2), X27
+ MOV 248(X2), X28
+ MOV 256(X2), X29
+ MOV 264(X2), X30
+ MOV 272(X2), X31
+
+ JMP ret
+
+// Note: these functions use a special calling convention to save generated code space.
+// Arguments are passed in registers, but the space for those arguments are allocated
+// in the caller's stack frame. These stubs write the args into that stack space and
+// then tail call to the corresponding runtime handler.
+// The tail call makes these stubs disappear in backtraces.
+TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
+ MOV T0, x+0(FP)
+ MOV T1, y+8(FP)
+ JMP runtime·goPanicIndex(SB)
+TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
+ MOV T0, x+0(FP)
+ MOV T1, y+8(FP)
+ JMP runtime·goPanicIndexU(SB)
+TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
+ MOV T1, x+0(FP)
+ MOV T2, y+8(FP)
+ JMP runtime·goPanicSliceAlen(SB)
+TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
+ MOV T1, x+0(FP)
+ MOV T2, y+8(FP)
+ JMP runtime·goPanicSliceAlenU(SB)
+TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
+ MOV T1, x+0(FP)
+ MOV T2, y+8(FP)
+ JMP runtime·goPanicSliceAcap(SB)
+TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
+ MOV T1, x+0(FP)
+ MOV T2, y+8(FP)
+ JMP runtime·goPanicSliceAcapU(SB)
+TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
+ MOV T0, x+0(FP)
+ MOV T1, y+8(FP)
+ JMP runtime·goPanicSliceB(SB)
+TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
+ MOV T0, x+0(FP)
+ MOV T1, y+8(FP)
+ JMP runtime·goPanicSliceBU(SB)
+TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
+ MOV T2, x+0(FP)
+ MOV T3, y+8(FP)
+ JMP runtime·goPanicSlice3Alen(SB)
+TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
+ MOV T2, x+0(FP)
+ MOV T3, y+8(FP)
+ JMP runtime·goPanicSlice3AlenU(SB)
+TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
+ MOV T2, x+0(FP)
+ MOV T3, y+8(FP)
+ JMP runtime·goPanicSlice3Acap(SB)
+TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
+ MOV T2, x+0(FP)
+ MOV T3, y+8(FP)
+ JMP runtime·goPanicSlice3AcapU(SB)
+TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
+ MOV T1, x+0(FP)
+ MOV T2, y+8(FP)
+ JMP runtime·goPanicSlice3B(SB)
+TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
+ MOV T1, x+0(FP)
+ MOV T2, y+8(FP)
+ JMP runtime·goPanicSlice3BU(SB)
+TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
+ MOV T0, x+0(FP)
+ MOV T1, y+8(FP)
+ JMP runtime·goPanicSlice3C(SB)
+TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
+ MOV T0, x+0(FP)
+ MOV T1, y+8(FP)
+ JMP runtime·goPanicSlice3CU(SB)
+
+DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
+GLOBL runtime·mainPC(SB),RODATA,$8
diff --git a/src/runtime/atomic_riscv64.s b/src/runtime/atomic_riscv64.s
new file mode 100644
index 0000000000..9cf54490f1
--- /dev/null
+++ b/src/runtime/atomic_riscv64.s
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+#define FENCE WORD $0x0ff0000f
+
+// func publicationBarrier()
+TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
+ FENCE
+ RET
diff --git a/src/runtime/callers_test.go b/src/runtime/callers_test.go
index 3cd1b40ec9..302e33deeb 100644
--- a/src/runtime/callers_test.go
+++ b/src/runtime/callers_test.go
@@ -254,9 +254,8 @@ func TestCallersDivZeroPanic(t *testing.T) {
func TestCallersDeferNilFuncPanic(t *testing.T) {
// Make sure we don't have any extra frames on the stack. We cut off the check
// at runtime.sigpanic, because non-open-coded defers (which may be used in
- // non-opt or race checker mode) include an extra 'jmpdefer' frame (which is
- // where the nil pointer deref happens). We could consider hiding jmpdefer in
- // tracebacks.
+ // non-opt or race checker mode) include an extra 'deferreturn' frame (which is
+ // where the nil pointer deref happens).
state := 1
want := []string{"runtime.Callers", "runtime_test.TestCallersDeferNilFuncPanic.func1",
"runtime.gopanic", "runtime.panicmem", "runtime.sigpanic"}
@@ -279,3 +278,32 @@ func TestCallersDeferNilFuncPanic(t *testing.T) {
// function exit, rather than at the defer statement.
state = 2
}
+
+// Same test, but forcing non-open-coded defer by putting the defer in a loop. See
+// issue #36050
+func TestCallersDeferNilFuncPanicWithLoop(t *testing.T) {
+ state := 1
+ want := []string{"runtime.Callers", "runtime_test.TestCallersDeferNilFuncPanicWithLoop.func1",
+ "runtime.gopanic", "runtime.panicmem", "runtime.sigpanic", "runtime.deferreturn", "runtime_test.TestCallersDeferNilFuncPanicWithLoop"}
+
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatal("did not panic")
+ }
+ pcs := make([]uintptr, 20)
+ pcs = pcs[:runtime.Callers(0, pcs)]
+ testCallersEqual(t, pcs, want)
+ if state == 1 {
+ t.Fatal("nil defer func panicked at defer time rather than function exit time")
+ }
+
+ }()
+
+ for i := 0; i < 1; i++ {
+ var f func()
+ defer f()
+ }
+ // Use the value of 'state' to make sure nil defer func f causes panic at
+ // function exit, rather than at the defer statement.
+ state = 2
+}
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 5f8ff8139a..a4e64b00cc 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -90,6 +90,11 @@ import (
type cgoCallers [32]uintptr
// Call from Go to C.
+//
+// This must be nosplit because it's used for syscalls on some
+// platforms. Syscalls may have untyped arguments on the stack, so
+// it's not safe to grow or scan the stack.
+//
//go:nosplit
func cgocall(fn, arg unsafe.Pointer) int32 {
if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
@@ -127,6 +132,13 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
// saved by entersyscall here.
entersyscall()
+ // Tell asynchronous preemption that we're entering external
+ // code. We do this after entersyscall because this may block
+ // and cause an async preemption to fail, but at this point a
+ // sync preemption will succeed (though this is not a matter
+ // of correctness).
+ osPreemptExtEnter(mp)
+
mp.incgo = true
errno := asmcgocall(fn, arg)
@@ -135,6 +147,8 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
mp.incgo = false
mp.ncgo--
+ osPreemptExtExit(mp)
+
exitsyscall()
// Note that raceacquire must be called only after exitsyscall has
@@ -188,12 +202,16 @@ func cgocallbackg(ctxt uintptr) {
exitsyscall() // coming out of cgo call
gp.m.incgo = false
+ osPreemptExtExit(gp.m)
+
cgocallbackg1(ctxt)
// At this point unlockOSThread has been called.
// The following code must not change to a different m.
// This is enforced by checking incgo in the schedule function.
+ osPreemptExtEnter(gp.m)
+
gp.m.incgo = true
// going back to cgo call
reentersyscall(savedpc, uintptr(savedsp))
@@ -352,6 +370,7 @@ func unwindm(restore *bool) {
if mp.ncgo > 0 {
mp.incgo = false
mp.ncgo--
+ osPreemptExtExit(mp)
}
releasem(mp)
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 677af99eac..c953b23add 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -121,21 +121,6 @@ func chanbuf(c *hchan, i uint) unsafe.Pointer {
return add(c.buf, uintptr(i)*uintptr(c.elemsize))
}
-// full reports whether a send on c would block (that is, the channel is full).
-// It uses a single word-sized read of mutable state, so although
-// the answer is instantaneously true, the correct answer may have changed
-// by the time the calling function receives the return value.
-func full(c *hchan) bool {
- // c.dataqsiz is immutable (never written after the channel is created)
- // so it is safe to read at any time during channel operation.
- if c.dataqsiz == 0 {
- // Assumes that a pointer read is relaxed-atomic.
- return c.recvq.first == nil
- }
- // Assumes that a uint read is relaxed-atomic.
- return c.qcount == c.dataqsiz
-}
-
// entry point for c <- x from compiled code
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
@@ -175,7 +160,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
//
// After observing that the channel is not closed, we observe that the channel is
// not ready for sending. Each of these observations is a single word-sized read
- // (first c.closed and second full()).
+ // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
// Because a closed channel cannot transition from 'ready for sending' to
// 'not ready for sending', even if the channel is closed between the two observations,
// they imply a moment between the two when the channel was both not yet closed
@@ -184,10 +169,9 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
//
// It is okay if the reads are reordered here: if we observe that the channel is not
// ready for sending and then observe that it is not closed, that implies that the
- // channel wasn't closed during the first observation. However, nothing here
- // guarantees forward progress. We rely on the side effects of lock release in
- // chanrecv() and closechan() to update this thread's view of c.closed and full().
- if !block && c.closed == 0 && full(c) {
+ // channel wasn't closed during the first observation.
+ if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
+ (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
return false
}
@@ -417,16 +401,6 @@ func closechan(c *hchan) {
}
}
-// empty reports whether a read from c would block (that is, the channel is
-// empty). It uses a single atomic read of mutable state.
-func empty(c *hchan) bool {
- // c.dataqsiz is immutable.
- if c.dataqsiz == 0 {
- return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
- }
- return atomic.Loaduint(&c.qcount) == 0
-}
-
// entry points for <- c from compiled code
//go:nosplit
func chanrecv1(c *hchan, elem unsafe.Pointer) {
@@ -462,33 +436,21 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
- if !block && empty(c) {
- // After observing that the channel is not ready for receiving, we observe whether the
- // channel is closed.
- //
- // Reordering of these checks could lead to incorrect behavior when racing with a close.
- // For example, if the channel was open and not empty, was closed, and then drained,
- // reordered reads could incorrectly indicate "open and empty". To prevent reordering,
- // we use atomic loads for both checks, and rely on emptying and closing to happen in
- // separate critical sections under the same lock. This assumption fails when closing
- // an unbuffered channel with a blocked send, but that is an error condition anyway.
- if atomic.Load(&c.closed) == 0 {
- // Because a channel cannot be reopened, the later observation of the channel
- // being not closed implies that it was also not closed at the moment of the
- // first observation. We behave as if we observed the channel at that moment
- // and report that the receive cannot proceed.
- return
- }
- // The channel is irreversibly closed. Re-check whether the channel has any pending data
- // to receive, which could have arrived between the empty and closed checks above.
- // Sequential consistency is also required here, when racing with such a send.
- if empty(c) {
- // The channel is irreversibly closed and empty.
- if ep != nil {
- typedmemclr(c.elemtype, ep)
- }
- return true, false
- }
+ //
+ // After observing that the channel is not ready for receiving, we observe that the
+ // channel is not closed. Each of these observations is a single word-sized read
+ // (first c.sendq.first or c.qcount, and second c.closed).
+ // Because a channel cannot be reopened, the later observation of the channel
+ // being not closed implies that it was also not closed at the moment of the
+ // first observation. We behave as if we observed the channel at that moment
+ // and report that the receive cannot proceed.
+ //
+ // The order of operations is important here: reversing the operations can lead to
+ // incorrect behavior when racing with a close.
+ if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
+ c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
+ atomic.Load(&c.closed) == 0 {
+ return
}
var t0 int64
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index d4752dd344..1180e76fcd 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -719,6 +719,7 @@ func TestSelectStackAdjust(t *testing.T) {
if after.NumGC-before.NumGC >= 2 {
goto done
}
+ runtime.Gosched()
}
t.Fatal("failed to trigger concurrent GC")
done:
@@ -1126,20 +1127,6 @@ func BenchmarkChanPopular(b *testing.B) {
wg.Wait()
}
-func BenchmarkChanClosed(b *testing.B) {
- c := make(chan struct{})
- close(c)
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- select {
- case <-c:
- default:
- b.Error("Unreachable")
- }
- }
- })
-}
-
var (
alwaysFalse = false
workSink = 0
diff --git a/src/runtime/checkptr.go b/src/runtime/checkptr.go
index 3c6a40206f..ddbc8168af 100644
--- a/src/runtime/checkptr.go
+++ b/src/runtime/checkptr.go
@@ -6,45 +6,22 @@ package runtime
import "unsafe"
-type ptrAlignError struct {
- ptr unsafe.Pointer
- elem *_type
- n uintptr
-}
-
-func (e ptrAlignError) RuntimeError() {}
-
-func (e ptrAlignError) Error() string {
- return "runtime error: unsafe pointer conversion"
-}
-
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
// Check that (*[n]elem)(p) is appropriately aligned.
// TODO(mdempsky): What about fieldAlign?
if uintptr(p)&(uintptr(elem.align)-1) != 0 {
- panic(ptrAlignError{p, elem, n})
+ throw("checkptr: unsafe pointer conversion")
}
// Check that (*[n]elem)(p) doesn't straddle multiple heap objects.
if size := n * elem.size; size > 1 && checkptrBase(p) != checkptrBase(add(p, size-1)) {
- panic(ptrAlignError{p, elem, n})
+ throw("checkptr: unsafe pointer conversion")
}
}
-type ptrArithError struct {
- ptr unsafe.Pointer
- originals []unsafe.Pointer
-}
-
-func (e ptrArithError) RuntimeError() {}
-
-func (e ptrArithError) Error() string {
- return "runtime error: unsafe pointer arithmetic"
-}
-
func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer) {
if 0 < uintptr(p) && uintptr(p) < minLegalPointer {
- panic(ptrArithError{p, originals})
+ throw("checkptr: unsafe pointer arithmetic")
}
// Check that if the computed pointer p points into a heap
@@ -61,7 +38,7 @@ func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer) {
}
}
- panic(ptrArithError{p, originals})
+ throw("checkptr: unsafe pointer arithmetic")
}
// checkptrBase returns the base address for the allocation containing
diff --git a/src/runtime/checkptr_test.go b/src/runtime/checkptr_test.go
new file mode 100644
index 0000000000..c5f22cc101
--- /dev/null
+++ b/src/runtime/checkptr_test.go
@@ -0,0 +1,46 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "internal/testenv"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestCheckPtr(t *testing.T) {
+ t.Parallel()
+ testenv.MustHaveGoRun(t)
+
+ exe, err := buildTestProg(t, "testprog", "-gcflags=all=-d=checkptr=1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testCases := []struct {
+ cmd string
+ want string
+ }{
+ {"CheckPtrAlignment", "fatal error: checkptr: unsafe pointer conversion\n"},
+ {"CheckPtrArithmetic", "fatal error: checkptr: unsafe pointer arithmetic\n"},
+ {"CheckPtrSize", "fatal error: checkptr: unsafe pointer conversion\n"},
+ {"CheckPtrSmall", "fatal error: checkptr: unsafe pointer arithmetic\n"},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.cmd, func(t *testing.T) {
+ t.Parallel()
+ got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput()
+ if err != nil {
+ t.Log(err)
+ }
+ if !strings.HasPrefix(string(got), tc.want) {
+ t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want)
+ }
+ })
+ }
+}
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index 76eeb2e41a..af5c3a1170 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -26,12 +26,12 @@ func GOMAXPROCS(n int) int {
return ret
}
- stopTheWorldGC("GOMAXPROCS")
+ stopTheWorld("GOMAXPROCS")
// newprocs will be processed by startTheWorld
newprocs = int32(n)
- startTheWorldGC()
+ startTheWorld()
return ret
}
diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go
index ba349845cf..ba5ef18e02 100644
--- a/src/runtime/defs_linux_386.go
+++ b/src/runtime/defs_linux_386.go
@@ -227,3 +227,14 @@ type sockaddr_un struct {
family uint16
path [108]byte
}
+
+const __NEW_UTS_LEN = 64
+
+type new_utsname struct {
+ sysname [__NEW_UTS_LEN + 1]byte
+ nodename [__NEW_UTS_LEN + 1]byte
+ release [__NEW_UTS_LEN + 1]byte
+ version [__NEW_UTS_LEN + 1]byte
+ machine [__NEW_UTS_LEN + 1]byte
+ domainname [__NEW_UTS_LEN + 1]byte
+}
diff --git a/src/runtime/defs_linux_amd64.go b/src/runtime/defs_linux_amd64.go
index 9eb5646ca3..8144354d5a 100644
--- a/src/runtime/defs_linux_amd64.go
+++ b/src/runtime/defs_linux_amd64.go
@@ -263,3 +263,14 @@ type sockaddr_un struct {
family uint16
path [108]byte
}
+
+const __NEW_UTS_LEN = 64
+
+type new_utsname struct {
+ sysname [__NEW_UTS_LEN + 1]byte
+ nodename [__NEW_UTS_LEN + 1]byte
+ release [__NEW_UTS_LEN + 1]byte
+ version [__NEW_UTS_LEN + 1]byte
+ machine [__NEW_UTS_LEN + 1]byte
+ domainname [__NEW_UTS_LEN + 1]byte
+}
diff --git a/src/runtime/defs_linux_riscv64.go b/src/runtime/defs_linux_riscv64.go
new file mode 100644
index 0000000000..60da0fae00
--- /dev/null
+++ b/src/runtime/defs_linux_riscv64.go
@@ -0,0 +1,209 @@
+// Generated using cgo, then manually converted into appropriate naming and code
+// for the Go runtime.
+// go tool cgo -godefs defs_linux.go defs1_linux.go defs2_linux.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+ _ENOSYS = 0x26
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_FREE = 0x8
+ _MADV_HUGEPAGE = 0xe
+ _MADV_NOHUGEPAGE = 0xf
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_RESTORER = 0x0
+ _SA_SIGINFO = 0x4
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+//go:nosplit
+func (ts *timespec) setNsec(ns int64) {
+ ts.tv_sec = ns / 1e9
+ ts.tv_nsec = ns % 1e9
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint64
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ // below here is a union; si_addr is the only field we use
+ si_addr uint64
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type epollevent struct {
+ events uint32
+ pad_cgo_0 [4]byte
+ data [8]byte // unaligned uintptr
+}
+
+const (
+ _O_RDONLY = 0x0
+ _O_NONBLOCK = 0x800
+ _O_CLOEXEC = 0x80000
+)
+
+type user_regs_struct struct {
+ pc uint64
+ ra uint64
+ sp uint64
+ gp uint64
+ tp uint64
+ t0 uint64
+ t1 uint64
+ t2 uint64
+ s0 uint64
+ s1 uint64
+ a0 uint64
+ a1 uint64
+ a2 uint64
+ a3 uint64
+ a4 uint64
+ a5 uint64
+ a6 uint64
+ a7 uint64
+ s2 uint64
+ s3 uint64
+ s4 uint64
+ s5 uint64
+ s6 uint64
+ s7 uint64
+ s8 uint64
+ s9 uint64
+ s10 uint64
+ s11 uint64
+ t3 uint64
+ t4 uint64
+ t5 uint64
+ t6 uint64
+}
+
+type user_fpregs_struct struct {
+ f [528]byte
+}
+
+type usigset struct {
+ us_x__val [16]uint64
+}
+
+type sigcontext struct {
+ sc_regs user_regs_struct
+ sc_fpregs user_fpregs_struct
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_flags int32
+ ss_size uintptr
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_stack stackt
+ uc_sigmask usigset
+ uc_x__unused [0]uint8
+ uc_pad_cgo_0 [8]byte
+ uc_mcontext sigcontext
+}
diff --git a/src/runtime/export_darwin_test.go b/src/runtime/export_darwin_test.go
new file mode 100644
index 0000000000..e9b6eb36da
--- /dev/null
+++ b/src/runtime/export_darwin_test.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+func Fcntl(fd, cmd, arg uintptr) (uintptr, uintptr) {
+ r := fcntl(int32(fd), int32(cmd), int32(arg))
+ if r < 0 {
+ return ^uintptr(0), uintptr(-r)
+ }
+ return uintptr(r), 0
+}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 47cefa1f3b..88cb1acc5b 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -43,6 +43,11 @@ var PhysHugePageSize = physHugePageSize
var NetpollGenericInit = netpollGenericInit
+var ParseRelease = parseRelease
+
+var Memmove = memmove
+var MemclrNoHeapPointers = memclrNoHeapPointers
+
const PreemptMSupported = preemptMSupported
type LFNode struct {
@@ -355,7 +360,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
+ pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -576,6 +581,8 @@ func RunGetgThreadSwitchTest() {
const (
PageSize = pageSize
PallocChunkPages = pallocChunkPages
+ PageAlloc64Bit = pageAlloc64Bit
+ PallocSumBytes = pallocSumBytes
)
// Expose pallocSum for testing.
@@ -726,15 +733,38 @@ func (p *PageAlloc) Free(base, npages uintptr) {
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
-func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
- return (*PallocData)(&((*pageAlloc)(p).chunks[i]))
-}
func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) {
systemstack(func() {
r = (*pageAlloc)(p).scavenge(nbytes, locked)
})
return
}
+func (p *PageAlloc) InUse() []AddrRange {
+ ranges := make([]AddrRange, 0, len(p.inUse.ranges))
+ for _, r := range p.inUse.ranges {
+ ranges = append(ranges, AddrRange{
+ Base: r.base,
+ Limit: r.limit,
+ })
+ }
+ return ranges
+}
+
+// Returns nil if the PallocData's L2 is missing.
+func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
+ ci := chunkIdx(i)
+ l2 := (*pageAlloc)(p).chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return (*PallocData)(&l2[ci.l2()])
+}
+
+// AddrRange represents a range over addresses.
+// Specifically, it represents the range [Base, Limit).
+type AddrRange struct {
+ Base, Limit uintptr
+}
// BitRange represents a range over a bitmap.
type BitRange struct {
@@ -769,7 +799,7 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
p.grow(addr, pallocChunkBytes)
// Initialize the bitmap and update pageAlloc metadata.
- chunk := &p.chunks[chunkIndex(addr)]
+ chunk := p.chunkOf(chunkIndex(addr))
// Clear all the scavenged bits which grow set.
chunk.scavenged.clearRange(0, pallocChunkPages)
@@ -823,8 +853,13 @@ func FreePageAlloc(pp *PageAlloc) {
}
// Free the mapped space for chunks.
- chunksLen := uintptr(cap(p.chunks)) * unsafe.Sizeof(p.chunks[0])
- sysFree(unsafe.Pointer(&p.chunks[0]), alignUp(chunksLen, physPageSize), nil)
+ for i := range p.chunks {
+ if x := p.chunks[i]; x != nil {
+ p.chunks[i] = nil
+ // This memory comes from sysAlloc and will always be page-aligned.
+ sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
+ }
+ }
}
// BaseChunkIdx is a convenient chunkIdx value which works on both
@@ -861,7 +896,7 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := &mheap_.pages.chunks[i]
+ chunk := mheap_.pages.chunkOf(i)
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index dc3772d936..0ecc4eaf71 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -78,21 +78,6 @@ It is a comma-separated list of name=val pairs setting these named variables:
If the line ends with "(forced)", this GC was forced by a
runtime.GC() call.
- Setting gctrace to any value > 0 also causes the garbage collector
- to emit a summary when memory is released back to the system.
- This process of returning memory to the system is called scavenging.
- The format of this summary is subject to change.
- Currently it is:
- scvg#: # MB released printed only if non-zero
- scvg#: inuse: # idle: # sys: # released: # consumed: # (MB)
- where the fields are as follows:
- scvg# the scavenge cycle number, incremented at each scavenge
- inuse: # MB used or partially used spans
- idle: # MB spans pending scavenging
- sys: # MB mapped from the system
- released: # MB released to the system
- consumed: # MB allocated from the system
-
madvdontneed: setting madvdontneed=1 will use MADV_DONTNEED
instead of MADV_FREE on Linux when returning memory to the
kernel. This is less efficient, but causes RSS numbers to drop
@@ -114,6 +99,19 @@ It is a comma-separated list of name=val pairs setting these named variables:
scavenge: scavenge=1 enables debugging mode of heap scavenger.
+ scavtrace: setting scavtrace=1 causes the runtime to emit a single line to standard
+ error, roughly once per GC cycle, summarizing the amount of work done by the
+ scavenger as well as the total amount of memory returned to the operating system
+ and an estimate of physical memory utilization. The format of this line is subject
+ to change, but currently it is:
+ scav # KiB work, # KiB total, #% util
+ where the fields are as follows:
+ # KiB work the amount of memory returned to the OS since the last scav line
+ # KiB total how much of the heap at this point in time has been released to the OS
+ #% util the fraction of all unscavenged memory which is in-use
+ If the line ends with "(forced)", then scavenging was forced by a
+ debug.FreeOSMemory() call.
+
scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
detailed multiline info every X milliseconds, describing state of the scheduler,
processors, threads and goroutines.
diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go
index c228c779e4..ec1ba90c2e 100644
--- a/src/runtime/gcinfo_test.go
+++ b/src/runtime/gcinfo_test.go
@@ -179,7 +179,7 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
- case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x", "wasm":
+ case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
diff --git a/src/runtime/hash64.go b/src/runtime/hash64.go
index 798d6dcd9e..d1283824ad 100644
--- a/src/runtime/hash64.go
+++ b/src/runtime/hash64.go
@@ -6,7 +6,7 @@
// xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/
-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
+// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm
package runtime
diff --git a/src/runtime/internal/atomic/atomic_riscv64.go b/src/runtime/internal/atomic/atomic_riscv64.go
new file mode 100644
index 0000000000..d52512369e
--- /dev/null
+++ b/src/runtime/internal/atomic/atomic_riscv64.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+//go:noescape
+func Xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func Xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
+//go:noescape
+func Xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func Xchg64(ptr *uint64, new uint64) uint64
+
+//go:noescape
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func Load(ptr *uint32) uint32
+
+//go:noescape
+func Load8(ptr *uint8) uint8
+
+//go:noescape
+func Load64(ptr *uint64) uint64
+
+// NO go:noescape annotation; *ptr escapes if result escapes (#31525)
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func LoadAcq(ptr *uint32) uint32
+
+//go:noescape
+func Or8(ptr *uint8, val uint8)
+
+//go:noescape
+func And8(ptr *uint8, val uint8)
+
+//go:noescape
+func Cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
+//go:noescape
+func Store(ptr *uint32, val uint32)
+
+//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
+func Store64(ptr *uint64, val uint64)
+
+// NO go:noescape annotation; see atomic_pointer.go.
+func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
diff --git a/src/runtime/internal/atomic/atomic_riscv64.s b/src/runtime/internal/atomic/atomic_riscv64.s
new file mode 100644
index 0000000000..d79f28acde
--- /dev/null
+++ b/src/runtime/internal/atomic/atomic_riscv64.s
@@ -0,0 +1,242 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"),
+// which may be toggled on and off. Their precise semantics are defined in
+// section 6.3 of the specification, but the basic idea is as follows:
+//
+// - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily.
+// It guarantees only that it will execute atomically.
+//
+// - If aq is set, the CPU may move the instruction backward, but not forward.
+//
+// - If rl is set, the CPU may move the instruction forward, but not backward.
+//
+// - If both are set, the CPU may not reorder the instruction at all.
+//
+// These four modes correspond to other well-known memory models on other CPUs.
+// On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On
+// Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence
+// (or a lock prefix).
+//
+// Go's memory model requires that
+// - if a read happens after a write, the read must observe the write, and
+// that
+// - if a read happens concurrently with a write, the read may observe the
+// write.
+// aq is sufficient to guarantee this, so that's what we use here. (This jibes
+// with ARM, which uses dmb ishst.)
+
+#include "textflag.h"
+
+#define AMOWSC(op,rd,rs1,rs2) WORD $0x0600202f+rd<<7+rs1<<15+rs2<<20+op<<27
+#define AMODSC(op,rd,rs1,rs2) WORD $0x0600302f+rd<<7+rs1<<15+rs2<<20+op<<27
+#define ADD_ 0
+#define SWAP_ 1
+#define LR_ 2
+#define SC_ 3
+#define OR_ 8
+#define AND_ 12
+#define FENCE WORD $0x0ff0000f
+
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+
+TEXT ·Cas(SB), NOSPLIT, $0-17
+ MOV ptr+0(FP), A0
+ MOVW old+8(FP), A1
+ MOVW new+12(FP), A2
+cas_again:
+ AMOWSC(LR_,13,10,0) // lr.w.aq a3,(a0)
+ BNE A3, A1, cas_fail
+ AMOWSC(SC_,14,10,12) // sc.w.aq a4,a2,(a0)
+ BNE A4, ZERO, cas_again
+ MOV $1, A0
+ MOVB A0, ret+16(FP)
+ RET
+cas_fail:
+ MOV $0, A0
+ MOV A0, ret+16(FP)
+ RET
+
+// func Cas64(ptr *uint64, old, new uint64) bool
+TEXT ·Cas64(SB), NOSPLIT, $0-25
+ MOV ptr+0(FP), A0
+ MOV old+8(FP), A1
+ MOV new+16(FP), A2
+cas_again:
+ AMODSC(LR_,13,10,0) // lr.d.aq a3,(a0)
+ BNE A3, A1, cas_fail
+ AMODSC(SC_,14,10,12) // sc.d.aq a4,a2,(a0)
+ BNE A4, ZERO, cas_again
+ MOV $1, A0
+ MOVB A0, ret+24(FP)
+ RET
+cas_fail:
+ MOVB ZERO, ret+24(FP)
+ RET
+
+// func Load(ptr *uint32) uint32
+TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
+ MOV ptr+0(FP), A0
+ AMOWSC(LR_,10,10,0)
+ MOVW A0, ret+8(FP)
+ RET
+
+// func Load8(ptr *uint8) uint8
+TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
+ MOV ptr+0(FP), A0
+ FENCE
+ MOVBU (A0), A1
+ FENCE
+ MOVB A1, ret+8(FP)
+ RET
+
+// func Load64(ptr *uint64) uint64
+TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
+ MOV ptr+0(FP), A0
+ AMODSC(LR_,10,10,0)
+ MOV A0, ret+8(FP)
+ RET
+
+// func Store(ptr *uint32, val uint32)
+TEXT ·Store(SB), NOSPLIT, $0-12
+ MOV ptr+0(FP), A0
+ MOVW val+8(FP), A1
+ AMOWSC(SWAP_,0,10,11)
+ RET
+
+// func Store8(ptr *uint8, val uint8)
+TEXT ·Store8(SB), NOSPLIT, $0-9
+ MOV ptr+0(FP), A0
+ MOVBU val+8(FP), A1
+ FENCE
+ MOVB A1, (A0)
+ FENCE
+ RET
+
+// func Store64(ptr *uint64, val uint64)
+TEXT ·Store64(SB), NOSPLIT, $0-16
+ MOV ptr+0(FP), A0
+ MOV val+8(FP), A1
+ AMODSC(SWAP_,0,10,11)
+ RET
+
+TEXT ·Casp1(SB), NOSPLIT, $0-25
+ JMP ·Cas64(SB)
+
+TEXT ·Casuintptr(SB),NOSPLIT,$0-25
+ JMP ·Cas64(SB)
+
+TEXT ·CasRel(SB), NOSPLIT, $0-17
+ JMP ·Cas(SB)
+
+TEXT ·Loaduintptr(SB),NOSPLIT,$0-16
+ JMP ·Load64(SB)
+
+TEXT ·Storeuintptr(SB),NOSPLIT,$0-16
+ JMP ·Store64(SB)
+
+TEXT ·Loaduint(SB),NOSPLIT,$0-16
+ JMP ·Loaduintptr(SB)
+
+TEXT ·Loadint64(SB),NOSPLIT,$0-16
+ JMP ·Loaduintptr(SB)
+
+TEXT ·Xaddint64(SB),NOSPLIT,$0-24
+ MOV ptr+0(FP), A0
+ MOV delta+8(FP), A1
+ WORD $0x04b5352f // amoadd.d.aq a0,a1,(a0)
+ ADD A0, A1, A0
+ MOVW A0, ret+16(FP)
+ RET
+
+TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
+ JMP ·Load(SB)
+
+// func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+TEXT ·Loadp(SB),NOSPLIT,$0-16
+ JMP ·Load64(SB)
+
+// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
+TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
+ JMP ·Store64(SB)
+
+TEXT ·StoreRel(SB), NOSPLIT, $0-12
+ JMP ·Store(SB)
+
+// func Xchg(ptr *uint32, new uint32) uint32
+TEXT ·Xchg(SB), NOSPLIT, $0-20
+ MOV ptr+0(FP), A0
+ MOVW new+8(FP), A1
+ AMOWSC(SWAP_,11,10,11)
+ MOVW A1, ret+16(FP)
+ RET
+
+// func Xchg64(ptr *uint64, new uint64) uint64
+TEXT ·Xchg64(SB), NOSPLIT, $0-24
+ MOV ptr+0(FP), A0
+ MOV new+8(FP), A1
+ AMODSC(SWAP_,11,10,11)
+ MOV A1, ret+16(FP)
+ RET
+
+// Atomically:
+// *val += delta;
+// return *val;
+
+// func Xadd(ptr *uint32, delta int32) uint32
+TEXT ·Xadd(SB), NOSPLIT, $0-20
+ MOV ptr+0(FP), A0
+ MOVW delta+8(FP), A1
+ AMOWSC(ADD_,12,10,11)
+ ADD A2,A1,A0
+ MOVW A0, ret+16(FP)
+ RET
+
+// func Xadd64(ptr *uint64, delta int64) uint64
+TEXT ·Xadd64(SB), NOSPLIT, $0-24
+ MOV ptr+0(FP), A0
+ MOV delta+8(FP), A1
+ AMODSC(ADD_,12,10,11)
+ ADD A2,A1,A0
+ MOV A0, ret+16(FP)
+ RET
+
+// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
+ JMP ·Xadd64(SB)
+
+// func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
+ JMP ·Xchg64(SB)
+
+// func And8(ptr *uint8, val uint8)
+TEXT ·And8(SB), NOSPLIT, $0-9
+ MOV ptr+0(FP), A0
+ MOVBU val+8(FP), A1
+ AND $3, A0, A2
+ AND $-4, A0
+ SLL $3, A2
+ XOR $255, A1
+ SLL A2, A1
+ XOR $-1, A1
+ AMOWSC(AND_,0,10,11)
+ RET
+
+// func Or8(ptr *uint8, val uint8)
+TEXT ·Or8(SB), NOSPLIT, $0-9
+ MOV ptr+0(FP), A0
+ MOVBU val+8(FP), A1
+ AND $3, A0, A2
+ AND $-4, A0
+ SLL $3, A2
+ SLL A2, A1
+ AMOWSC(OR_,0,10,11)
+ RET
diff --git a/src/runtime/internal/sys/arch.go b/src/runtime/internal/sys/arch.go
index 75beb7872f..13c00cf639 100644
--- a/src/runtime/internal/sys/arch.go
+++ b/src/runtime/internal/sys/arch.go
@@ -14,6 +14,7 @@ const (
MIPS
MIPS64
PPC64
+ RISCV64
S390X
WASM
)
diff --git a/src/runtime/internal/sys/arch_riscv64.go b/src/runtime/internal/sys/arch_riscv64.go
new file mode 100644
index 0000000000..7cdcc8fcbd
--- /dev/null
+++ b/src/runtime/internal/sys/arch_riscv64.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ ArchFamily = RISCV64
+ BigEndian = false
+ CacheLineSize = 64
+ DefaultPhysPageSize = 4096
+ PCQuantum = 4
+ Int64Align = 8
+ HugePageSize = 1 << 21
+ MinFrameSize = 8
+)
+
+type Uintreg uint64
diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go
index ea3455a8c4..9d821b989e 100644
--- a/src/runtime/lfstack_64bit.go
+++ b/src/runtime/lfstack_64bit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
+// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm
package runtime
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 47ed470504..de363408e7 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -502,6 +502,7 @@ func mallocinit() {
// allocation at 0x40 << 32 because when using 4k pages with 3-level
// translation buffers, the user address space is limited to 39 bits
// On darwin/arm64, the address space is even smaller.
+ //
// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
// processes.
for i := 0x7f; i >= 0; i-- {
diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go
index 5ed4feb77d..5c97f548fd 100644
--- a/src/runtime/malloc_test.go
+++ b/src/runtime/malloc_test.go
@@ -204,14 +204,6 @@ type acLink struct {
var arenaCollisionSink []*acLink
func TestArenaCollision(t *testing.T) {
- if GOOS == "darwin" && race.Enabled {
- // Skip this test on Darwin in race mode because Darwin 10.10 has
- // issues following arena hints and runs out of them in race mode, so
- // MAP_FIXED is used to ensure we keep the heap in the memory region the
- // race detector expects.
- // TODO(mknyszek): Delete this when Darwin 10.10 is no longer supported.
- t.Skip("disabled on Darwin with race mode since MAP_FIXED is used")
- }
testenv.MustHaveExec(t)
// Test that mheap.sysAlloc handles collisions with other
diff --git a/src/runtime/mem_aix.go b/src/runtime/mem_aix.go
index eeebfa73ad..7e145b072a 100644
--- a/src/runtime/mem_aix.go
+++ b/src/runtime/mem_aix.go
@@ -63,14 +63,15 @@ func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
mSysStatInc(sysStat, n)
// AIX does not allow mapping a range that is already mapped.
- // So always unmap first even if it is already unmapped.
- munmap(v, n)
- p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
-
+ // So, call mprotect to change permissions.
+ // Note that sysMap is always called with a non-nil pointer
+ // since it transitions a Reserved memory region to Prepared,
+ // so mprotect is always possible.
+ _, err := mprotect(v, n, _PROT_READ|_PROT_WRITE)
if err == _ENOMEM {
throw("runtime: out of memory")
}
- if p != v || err != 0 {
+ if err != 0 {
throw("runtime: cannot map pages in arena address space")
}
}
diff --git a/src/runtime/memclr_riscv64.s b/src/runtime/memclr_riscv64.s
new file mode 100755
index 0000000000..ba7704e805
--- /dev/null
+++ b/src/runtime/memclr_riscv64.s
@@ -0,0 +1,44 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// void runtime·memclrNoHeapPointers(void*, uintptr)
+TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
+ MOV ptr+0(FP), T1
+ MOV n+8(FP), T2
+ ADD T1, T2, T4
+
+ // If less than eight bytes, do one byte at a time.
+ SLTU $8, T2, T3
+ BNE T3, ZERO, outcheck
+
+ // Do one byte at a time until eight-aligned.
+ JMP aligncheck
+align:
+ MOVB ZERO, (T1)
+ ADD $1, T1
+aligncheck:
+ AND $7, T1, T3
+ BNE T3, ZERO, align
+
+ // Do eight bytes at a time as long as there is room.
+ ADD $-7, T4, T5
+ JMP wordscheck
+words:
+ MOV ZERO, (T1)
+ ADD $8, T1
+wordscheck:
+ SLTU T5, T1, T3
+ BNE T3, ZERO, words
+
+ JMP outcheck
+out:
+ MOVB ZERO, (T1)
+ ADD $1, T1
+outcheck:
+ BNE T1, T4, out
+
+done:
+ RET
diff --git a/src/runtime/memmove_386.s b/src/runtime/memmove_386.s
index 7b54070f59..ecadee39af 100644
--- a/src/runtime/memmove_386.s
+++ b/src/runtime/memmove_386.s
@@ -28,6 +28,8 @@
#include "go_asm.h"
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-12
MOVL to+0(FP), DI
diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s
index b4243a833b..9458351fec 100644
--- a/src/runtime/memmove_amd64.s
+++ b/src/runtime/memmove_amd64.s
@@ -28,6 +28,8 @@
#include "go_asm.h"
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-24
diff --git a/src/runtime/memmove_arm.s b/src/runtime/memmove_arm.s
index 8352fb7860..7bad8d2249 100644
--- a/src/runtime/memmove_arm.s
+++ b/src/runtime/memmove_arm.s
@@ -58,6 +58,8 @@
#define FW3 R4
#define FR3 R8 /* shared with TE */
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $4-12
_memmove:
diff --git a/src/runtime/memmove_arm64.s b/src/runtime/memmove_arm64.s
index ac29f94c7b..dbb7e9a28a 100644
--- a/src/runtime/memmove_arm64.s
+++ b/src/runtime/memmove_arm64.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
MOVD to+0(FP), R3
@@ -22,7 +24,7 @@ check:
CMP R3, R4
BLT backward
- // Copying forward proceeds by copying R7/8 words then copying R6 bytes.
+ // Copying forward proceeds by copying R7/32 quadwords then R6 <= 31 tail bytes.
// R3 and R4 are advanced as we copy.
// (There may be implementations of armv8 where copying by bytes until
@@ -30,11 +32,12 @@ check:
// optimization, but the on the one tested so far (xgene) it did not
// make a significance difference.)
- CBZ R7, noforwardlarge // Do we need to do any doubleword-by-doubleword copying?
+ CBZ R7, noforwardlarge // Do we need to do any quadword copying?
ADD R3, R7, R9 // R9 points just past where we copy by word
forwardlargeloop:
+ // Copy 32 bytes at a time.
LDP.P 32(R4), (R8, R10)
STP.P (R8, R10), 32(R3)
LDP -16(R4), (R11, R12)
@@ -43,10 +46,26 @@ forwardlargeloop:
CBNZ R7, forwardlargeloop
noforwardlarge:
- CBNZ R6, forwardtail // Do we need to do any byte-by-byte copying?
+ CBNZ R6, forwardtail // Do we need to copy any tail bytes?
RET
forwardtail:
+ // There are R6 <= 31 bytes remaining to copy.
+ // This is large enough to still contain pointers,
+ // which must be copied atomically.
+ // Copy the next 16 bytes, then 8 bytes, then any remaining bytes.
+ TBZ $4, R6, 3(PC) // write 16 bytes if R6&16 != 0
+ LDP.P 16(R4), (R8, R10)
+ STP.P (R8, R10), 16(R3)
+
+ TBZ $3, R6, 3(PC) // write 8 bytes if R6&8 != 0
+ MOVD.P 8(R4), R8
+ MOVD.P R8, 8(R3)
+
+ AND $7, R6
+ CBNZ R6, 2(PC)
+ RET
+
ADD R3, R6, R9 // R9 points just past the destination memory
forwardtailloop:
@@ -90,7 +109,7 @@ copy1:
RET
backward:
- // Copying backwards proceeds by copying R6 bytes then copying R7/8 words.
+ // Copying backwards first copies R6 <= 31 tail bytes, then R7/32 quadwords.
// R3 and R4 are advanced to the end of the destination/source buffers
// respectively and moved back as we copy.
@@ -99,13 +118,28 @@ backward:
CBZ R6, nobackwardtail // Do we need to do any byte-by-byte copying?
- SUB R6, R3, R9 // R9 points at the lowest destination byte that should be copied by byte.
+ AND $7, R6, R12
+ CBZ R12, backwardtaillarge
+
+ SUB R12, R3, R9 // R9 points at the lowest destination byte that should be copied by byte.
backwardtailloop:
+ // Copy sub-pointer-size tail.
MOVBU.W -1(R4), R8
MOVBU.W R8, -1(R3)
CMP R9, R3
BNE backwardtailloop
+backwardtaillarge:
+ // Do 8/16-byte write if possible.
+ // See comment at forwardtail.
+ TBZ $3, R6, 3(PC)
+ MOVD.W -8(R4), R8
+ MOVD.W R8, -8(R3)
+
+ TBZ $4, R6, 3(PC)
+ LDP.W -16(R4), (R8, R10)
+ STP.W (R8, R10), -16(R3)
+
nobackwardtail:
CBNZ R7, backwardlarge // Do we need to do any doubleword-by-doubleword copying?
RET
diff --git a/src/runtime/memmove_mips64x.s b/src/runtime/memmove_mips64x.s
index a4cb7dc81e..8a1b88afba 100644
--- a/src/runtime/memmove_mips64x.s
+++ b/src/runtime/memmove_mips64x.s
@@ -6,6 +6,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
MOVV to+0(FP), R1
diff --git a/src/runtime/memmove_mipsx.s b/src/runtime/memmove_mipsx.s
index 13544a3598..6c86558f8d 100644
--- a/src/runtime/memmove_mipsx.s
+++ b/src/runtime/memmove_mipsx.s
@@ -14,6 +14,8 @@
#define MOVWLO MOVWL
#endif
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB),NOSPLIT,$-0-12
MOVW n+8(FP), R3
diff --git a/src/runtime/memmove_plan9_386.s b/src/runtime/memmove_plan9_386.s
index 65dec93f6b..1b2f8470ae 100644
--- a/src/runtime/memmove_plan9_386.s
+++ b/src/runtime/memmove_plan9_386.s
@@ -25,6 +25,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-12
MOVL to+0(FP), DI
diff --git a/src/runtime/memmove_plan9_amd64.s b/src/runtime/memmove_plan9_amd64.s
index b729c7c0e7..68e11d59fd 100644
--- a/src/runtime/memmove_plan9_amd64.s
+++ b/src/runtime/memmove_plan9_amd64.s
@@ -25,6 +25,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-24
diff --git a/src/runtime/memmove_ppc64x.s b/src/runtime/memmove_ppc64x.s
index 60cbcc41ec..dbb3b90fcf 100644
--- a/src/runtime/memmove_ppc64x.s
+++ b/src/runtime/memmove_ppc64x.s
@@ -6,6 +6,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
MOVD to+0(FP), R3
diff --git a/src/runtime/memmove_riscv64.s b/src/runtime/memmove_riscv64.s
new file mode 100755
index 0000000000..5dec8d0a33
--- /dev/null
+++ b/src/runtime/memmove_riscv64.s
@@ -0,0 +1,98 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// See memmove Go doc for important implementation constraints.
+
+// void runtime·memmove(void*, void*, uintptr)
+TEXT runtime·memmove(SB),NOSPLIT,$-0-24
+ MOV to+0(FP), T0
+ MOV from+8(FP), T1
+ MOV n+16(FP), T2
+ ADD T1, T2, T5
+
+ // If the destination is ahead of the source, start at the end of the
+ // buffer and go backward.
+ BLTU T1, T0, b
+
+ // If less than eight bytes, do one byte at a time.
+ SLTU $8, T2, T3
+ BNE T3, ZERO, f_outcheck
+
+ // Do one byte at a time until from is eight-aligned.
+ JMP f_aligncheck
+f_align:
+ MOVB (T1), T3
+ MOVB T3, (T0)
+ ADD $1, T0
+ ADD $1, T1
+f_aligncheck:
+ AND $7, T1, T3
+ BNE T3, ZERO, f_align
+
+ // Do eight bytes at a time as long as there is room.
+ ADD $-7, T5, T6
+ JMP f_wordscheck
+f_words:
+ MOV (T1), T3
+ MOV T3, (T0)
+ ADD $8, T0
+ ADD $8, T1
+f_wordscheck:
+ SLTU T6, T1, T3
+ BNE T3, ZERO, f_words
+
+ // Finish off the remaining partial word.
+ JMP f_outcheck
+f_out:
+ MOVB (T1), T3
+ MOVB T3, (T0)
+ ADD $1, T0
+ ADD $1, T1
+f_outcheck:
+ BNE T1, T5, f_out
+
+ RET
+
+b:
+ ADD T0, T2, T4
+ // If less than eight bytes, do one byte at a time.
+ SLTU $8, T2, T3
+ BNE T3, ZERO, b_outcheck
+
+ // Do one byte at a time until from+n is eight-aligned.
+ JMP b_aligncheck
+b_align:
+ ADD $-1, T4
+ ADD $-1, T5
+ MOVB (T5), T3
+ MOVB T3, (T4)
+b_aligncheck:
+ AND $7, T5, T3
+ BNE T3, ZERO, b_align
+
+ // Do eight bytes at a time as long as there is room.
+ ADD $7, T1, T6
+ JMP b_wordscheck
+b_words:
+ ADD $-8, T4
+ ADD $-8, T5
+ MOV (T5), T3
+ MOV T3, (T4)
+b_wordscheck:
+ SLTU T5, T6, T3
+ BNE T3, ZERO, b_words
+
+ // Finish off the remaining partial word.
+ JMP b_outcheck
+b_out:
+ ADD $-1, T4
+ ADD $-1, T5
+ MOVB (T5), T3
+ MOVB T3, (T4)
+b_outcheck:
+ BNE T5, T1, b_out
+
+ RET
diff --git a/src/runtime/memmove_s390x.s b/src/runtime/memmove_s390x.s
index 4ce98b0a95..f4c2b87d92 100644
--- a/src/runtime/memmove_s390x.s
+++ b/src/runtime/memmove_s390x.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB),NOSPLIT|NOFRAME,$0-24
MOVD to+0(FP), R6
diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go
index 0b2e19123d..396c1304c5 100644
--- a/src/runtime/memmove_test.go
+++ b/src/runtime/memmove_test.go
@@ -11,7 +11,9 @@ import (
"internal/race"
"internal/testenv"
. "runtime"
+ "sync/atomic"
"testing"
+ "unsafe"
)
func TestMemmove(t *testing.T) {
@@ -206,6 +208,71 @@ func cmpb(a, b []byte) int {
return l
}
+// Ensure that memmove writes pointers atomically, so the GC won't
+// observe a partially updated pointer.
+func TestMemmoveAtomicity(t *testing.T) {
+ if race.Enabled {
+ t.Skip("skip under the race detector -- this test is intentionally racy")
+ }
+
+ var x int
+
+ for _, backward := range []bool{true, false} {
+ for _, n := range []int{3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 49} {
+ n := n
+
+ // test copying [N]*int.
+ sz := uintptr(n * PtrSize)
+ name := fmt.Sprint(sz)
+ if backward {
+ name += "-backward"
+ } else {
+ name += "-forward"
+ }
+ t.Run(name, func(t *testing.T) {
+ // Use overlapping src and dst to force forward/backward copy.
+ var s [100]*int
+ src := s[n-1 : 2*n-1]
+ dst := s[:n]
+ if backward {
+ src, dst = dst, src
+ }
+ for i := range src {
+ src[i] = &x
+ }
+ for i := range dst {
+ dst[i] = nil
+ }
+
+ var ready uint32
+ go func() {
+ sp := unsafe.Pointer(&src[0])
+ dp := unsafe.Pointer(&dst[0])
+ atomic.StoreUint32(&ready, 1)
+ for i := 0; i < 10000; i++ {
+ Memmove(dp, sp, sz)
+ MemclrNoHeapPointers(dp, sz)
+ }
+ atomic.StoreUint32(&ready, 2)
+ }()
+
+ for atomic.LoadUint32(&ready) == 0 {
+ Gosched()
+ }
+
+ for atomic.LoadUint32(&ready) != 2 {
+ for i := range dst {
+ p := dst[i]
+ if p != nil && p != &x {
+ t.Fatalf("got partially updated pointer %p at dst[%d], want either nil or %p", p, i, &x)
+ }
+ }
+ }
+ })
+ }
+ }
+}
+
func benchmarkSizes(b *testing.B, sizes []int, fn func(b *testing.B, n int)) {
for _, n := range sizes {
b.Run(fmt.Sprint(n), func(b *testing.B) {
diff --git a/src/runtime/memmove_wasm.s b/src/runtime/memmove_wasm.s
index d5e2016930..8525fea35e 100644
--- a/src/runtime/memmove_wasm.s
+++ b/src/runtime/memmove_wasm.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memmove Go doc for important implementation constraints.
+
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-24
MOVD to+0(FP), R0
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index bda8eadc9d..604d7d09b4 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -1269,7 +1269,6 @@ func gcStart(trigger gcTrigger) {
}
// Ok, we're doing it! Stop everybody else
- semacquire(&gcsema)
semacquire(&worldsema)
if trace.enabled {
@@ -1368,13 +1367,6 @@ func gcStart(trigger gcTrigger) {
work.pauseNS += now - work.pauseStart
work.tMark = now
})
-
- // Release the world sema before Gosched() in STW mode
- // because we will need to reacquire it later but before
- // this goroutine becomes runnable again, and we could
- // self-deadlock otherwise.
- semrelease(&worldsema)
-
// In STW mode, we could block the instant systemstack
// returns, so don't do anything important here. Make sure we
// block rather than returning to user code.
@@ -1444,10 +1436,6 @@ top:
return
}
- // forEachP needs worldsema to execute, and we'll need it to
- // stop the world later, so acquire worldsema now.
- semacquire(&worldsema)
-
// Flush all local buffers and collect flushedWork flags.
gcMarkDoneFlushed = 0
systemstack(func() {
@@ -1508,7 +1496,6 @@ top:
// work to do. Keep going. It's possible the
// transition condition became true again during the
// ragged barrier, so re-check it.
- semrelease(&worldsema)
goto top
}
@@ -1585,7 +1572,6 @@ top:
now := startTheWorldWithSema(true)
work.pauseNS += now - work.pauseStart
})
- semrelease(&worldsema)
goto top
}
}
@@ -1803,7 +1789,6 @@ func gcMarkTermination(nextTriggerRatio float64) {
}
semrelease(&worldsema)
- semrelease(&gcsema)
// Careful: another GC cycle may start now.
releasem(mp)
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index e02c874f66..54f988a902 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -46,8 +46,6 @@ const (
// gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
// some miscellany) and initializes scanning-related state.
//
-// The caller must have call gcCopySpans().
-//
// The world must be stopped.
//
//go:nowritebarrier
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index c7bab59fb7..c2625095f6 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -80,6 +80,17 @@ const (
// maxPagesPerPhysPage is the maximum number of supported runtime pages per
// physical page, based on maxPhysPageSize.
maxPagesPerPhysPage = maxPhysPageSize / pageSize
+
+ // scavengeCostRatio is the approximate ratio between the costs of using previously
+ // scavenged memory and scavenging memory.
+ //
+ // For most systems the cost of scavenging greatly outweighs the costs
+ // associated with using scavenged memory, making this constant 0. On other systems
+ // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial.
+ //
+ // This ratio is used as part of multiplicative factor to help the scavenger account
+ // for the additional costs of using scavenged memory in its pacing.
+ scavengeCostRatio = 0.7 * sys.GoosDarwin
)
// heapRetained returns an estimate of the current heap RSS.
@@ -246,7 +257,7 @@ func bgscavenge(c chan int) {
released := uintptr(0)
// Time in scavenging critical section.
- crit := int64(0)
+ crit := float64(0)
// Run on the system stack since we grab the heap lock,
// and a stack growth with the heap lock means a deadlock.
@@ -264,16 +275,10 @@ func bgscavenge(c chan int) {
// Scavenge one page, and measure the amount of time spent scavenging.
start := nanotime()
released = mheap_.pages.scavengeOne(physPageSize, false)
- crit = nanotime() - start
+ atomic.Xadduintptr(&mheap_.pages.scavReleased, released)
+ crit = float64(nanotime() - start)
})
- if debug.gctrace > 0 {
- if released > 0 {
- print("scvg: ", released>>10, " KB released\n")
- }
- print("scvg: inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
- }
-
if released == 0 {
lock(&scavenge.lock)
scavenge.parked = true
@@ -281,6 +286,14 @@ func bgscavenge(c chan int) {
continue
}
+ // Multiply the critical time by 1 + the ratio of the costs of using
+ // scavenged memory vs. scavenging memory. This forces us to pay down
+ // the cost of reusing this memory eagerly by sleeping for a longer period
+ // of time and scavenging less frequently. More concretely, we avoid situations
+ // where we end up scavenging so often that we hurt allocation performance
+ // because of the additional overheads of using scavenged memory.
+ crit *= 1 + scavengeCostRatio
+
// If we spent more than 10 ms (for example, if the OS scheduled us away, or someone
// put their machine to sleep) in the critical section, bound the time we use to
// calculate at 10 ms to avoid letting the sleep time get arbitrarily high.
@@ -296,13 +309,13 @@ func bgscavenge(c chan int) {
// much, then scavengeEMWA < idealFraction, so we'll adjust the sleep time
// down.
adjust := scavengeEWMA / idealFraction
- sleepTime := int64(adjust * float64(crit) / (scavengePercent / 100.0))
+ sleepTime := int64(adjust * crit / (scavengePercent / 100.0))
// Go to sleep.
slept := scavengeSleep(sleepTime)
// Compute the new ratio.
- fraction := float64(crit) / float64(crit+slept)
+ fraction := crit / (crit + float64(slept))
// Set a lower bound on the fraction.
// Due to OS-related anomalies we may "sleep" for an inordinate amount
@@ -346,12 +359,39 @@ func (s *pageAlloc) scavenge(nbytes uintptr, locked bool) uintptr {
return released
}
+// printScavTrace prints a scavenge trace line to standard error.
+//
+// released should be the amount of memory released since the last time this
+// was called, and forced indicates whether the scavenge was forced by the
+// application.
+func printScavTrace(released uintptr, forced bool) {
+ printlock()
+ print("scav ",
+ released>>10, " KiB work, ",
+ atomic.Load64(&memstats.heap_released)>>10, " KiB total, ",
+ (atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util",
+ )
+ if forced {
+ print(" (forced)")
+ }
+ println()
+ printunlock()
+}
+
// resetScavengeAddr sets the scavenge start address to the top of the heap's
// address space. This should be called each time the scavenger's pacing
// changes.
//
// s.mheapLock must be held.
func (s *pageAlloc) resetScavengeAddr() {
+ released := atomic.Loaduintptr(&s.scavReleased)
+ if debug.scavtrace > 0 {
+ printScavTrace(released, false)
+ }
+ // Subtract from scavReleased instead of just setting it to zero because
+ // the scavenger could have increased scavReleased concurrently with the
+ // load above, and we may miss an update by just blindly zeroing the field.
+ atomic.Xadduintptr(&s.scavReleased, -released)
s.scavAddr = chunkBase(s.end) - 1
}
@@ -405,22 +445,24 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
}
lockHeap()
- top := chunkIndex(s.scavAddr)
- if top < s.start {
+ ci := chunkIndex(s.scavAddr)
+ if ci < s.start {
unlockHeap()
return 0
}
// Check the chunk containing the scav addr, starting at the addr
// and see if there are any free and unscavenged pages.
- ci := chunkIndex(s.scavAddr)
- if s.summary[len(s.summary)-1][ci].max() >= uint(minPages) {
+ //
+ // Only check this if s.scavAddr is covered by any address range
+ // in s.inUse, so that we know our check of the summary is safe.
+ if s.inUse.contains(s.scavAddr) && s.summary[len(s.summary)-1][ci].max() >= uint(minPages) {
// We only bother looking for a candidate if there at least
// minPages free pages at all. It's important that we only
// continue if the summary says we can because that's how
// we can tell if parts of the address space are unused.
// See the comment on s.chunks in mpagealloc.go.
- base, npages := s.chunks[ci].findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages)
+ base, npages := s.chunkOf(ci).findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages)
// If we found something, scavenge it and return!
if npages != 0 {
@@ -429,55 +471,97 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
return uintptr(npages) * pageSize
}
}
- unlockHeap()
- // Slow path: iterate optimistically looking for any free and unscavenged page.
- // If we think we see something, stop and verify it!
- for i := top - 1; i >= s.start; i-- {
- // If this chunk is totally in-use or has no unscavenged pages, don't bother
- // doing a more sophisticated check.
- //
- // Note we're accessing the summary and the chunks without a lock, but
- // that's fine. We're being optimistic anyway.
-
- // Check if there are enough free pages at all. It's imperative that we
- // check this before the chunk itself so that we quickly skip over
- // unused parts of the address space, which may have a cleared bitmap
- // but a zero'd summary which indicates not to allocate from there.
- if s.summary[len(s.summary)-1][i].max() < uint(minPages) {
- continue
+ // getInUseRange returns the highest range in the
+ // intersection of [0, addr] and s.inUse.
+ //
+ // s.mheapLock must be held.
+ getInUseRange := func(addr uintptr) addrRange {
+ top := s.inUse.findSucc(addr)
+ if top == 0 {
+ return addrRange{}
+ }
+ r := s.inUse.ranges[top-1]
+ // addr is inclusive, so treat it as such when
+ // updating the limit, which is exclusive.
+ if r.limit > addr+1 {
+ r.limit = addr + 1
}
+ return r
+ }
- // Run over the chunk looking harder for a candidate. Again, we could
- // race with a lot of different pieces of code, but we're just being
- // optimistic.
- if !s.chunks[i].hasScavengeCandidate(minPages) {
- continue
+ // Slow path: iterate optimistically over the in-use address space
+ // looking for any free and unscavenged page. If we think we see something,
+ // lock and verify it!
+ //
+ // We iterate over the address space by taking ranges from inUse.
+newRange:
+ for {
+ r := getInUseRange(s.scavAddr)
+ if r.size() == 0 {
+ break
}
+ unlockHeap()
- // We found a candidate, so let's lock and verify it.
- lockHeap()
+ // Iterate over all of the chunks described by r.
+ // Note that r.limit is the exclusive upper bound, but what
+ // we want is the top chunk instead, inclusive, so subtract 1.
+ bot, top := chunkIndex(r.base), chunkIndex(r.limit-1)
+ for i := top; i >= bot; i-- {
+ // If this chunk is totally in-use or has no unscavenged pages, don't bother
+ // doing a more sophisticated check.
+ //
+ // Note we're accessing the summary and the chunks without a lock, but
+ // that's fine. We're being optimistic anyway.
- // Find, verify, and scavenge if we can.
- chunk := &s.chunks[i]
- base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
- if npages > 0 {
- // We found memory to scavenge! Mark the bits and report that up.
- s.scavengeRangeLocked(i, base, npages)
- unlockHeap()
- return uintptr(npages) * pageSize
+ // Check quickly if there are enough free pages at all.
+ if s.summary[len(s.summary)-1][i].max() < uint(minPages) {
+ continue
+ }
+
+ // Run over the chunk looking harder for a candidate. Again, we could
+ // race with a lot of different pieces of code, but we're just being
+ // optimistic. Make sure we load the l2 pointer atomically though, to
+ // avoid races with heap growth. It may or may not be possible to also
+ // see a nil pointer in this case if we do race with heap growth, but
+ // just defensively ignore the nils. This operation is optimistic anyway.
+ l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&s.chunks[i.l1()])))
+ if l2 == nil || !l2[i.l2()].hasScavengeCandidate(minPages) {
+ continue
+ }
+
+ // We found a candidate, so let's lock and verify it.
+ lockHeap()
+
+ // Find, verify, and scavenge if we can.
+ chunk := s.chunkOf(i)
+ base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
+ if npages > 0 {
+ // We found memory to scavenge! Mark the bits and report that up.
+ // scavengeRangeLocked will update scavAddr for us, also.
+ s.scavengeRangeLocked(i, base, npages)
+ unlockHeap()
+ return uintptr(npages) * pageSize
+ }
+
+ // We were fooled, let's take this opportunity to move the scavAddr
+ // all the way down to where we searched as scavenged for future calls
+ // and keep iterating. Then, go get a new range.
+ s.scavAddr = chunkBase(i-1) + pallocChunkPages*pageSize - 1
+ continue newRange
}
+ lockHeap()
- // We were fooled, let's take this opportunity to move the scavAddr
- // all the way down to where we searched as scavenged for future calls
- // and keep iterating.
- s.scavAddr = chunkBase(i-1) + pallocChunkPages*pageSize - 1
- unlockHeap()
+ // Move the scavenger down the heap, past everything we just searched.
+ // Since we don't check if scavAddr moved while twe let go of the heap lock,
+ // it's possible that it moved down and we're moving it up here. This
+ // raciness could result in us searching parts of the heap unnecessarily.
+ // TODO(mknyszek): Remove this racy behavior through explicit address
+ // space reservations, which are difficult to do with just scavAddr.
+ s.scavAddr = r.base - 1
}
-
- lockHeap()
- // We couldn't find anything, so signal that there's nothing left
- // to scavenge.
+ // We reached the end of the in-use address space and couldn't find anything,
+ // so signal that there's nothing left to scavenge.
s.scavAddr = minScavAddr
unlockHeap()
@@ -488,7 +572,7 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
//
// s.mheapLock must be held.
func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {
- s.chunks[ci].scavenged.setRange(base, npages)
+ s.chunkOf(ci).scavenged.setRange(base, npages)
// Compute the full address for the start of the range.
addr := chunkBase(ci) + uintptr(base)*pageSize
diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go
index 518d5ab27a..58f9e3a80d 100644
--- a/src/runtime/mgcscavenge_test.go
+++ b/src/runtime/mgcscavenge_test.go
@@ -272,6 +272,9 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
// Tests end-to-end scavenging on a pageAlloc.
func TestPageAllocScavenge(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
type test struct {
request, expect uintptr
}
@@ -279,12 +282,13 @@ func TestPageAllocScavenge(t *testing.T) {
if minPages < 1 {
minPages = 1
}
- tests := map[string]struct {
+ type setup struct {
beforeAlloc map[ChunkIdx][]BitRange
beforeScav map[ChunkIdx][]BitRange
expect []test
afterScav map[ChunkIdx][]BitRange
- }{
+ }
+ tests := map[string]setup{
"AllFreeUnscavExhaust": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@@ -393,6 +397,26 @@ func TestPageAllocScavenge(t *testing.T) {
},
},
}
+ if PageAlloc64Bit != 0 {
+ tests["ScavAllVeryDiscontiguous"] = setup{
+ beforeAlloc: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {},
+ BaseChunkIdx + 0x1000: {},
+ },
+ beforeScav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {},
+ BaseChunkIdx + 0x1000: {},
+ },
+ expect: []test{
+ {^uintptr(0), 2 * PallocChunkPages * PageSize},
+ {^uintptr(0), 0},
+ },
+ afterScav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 0x1000: {{0, PallocChunkPages}},
+ },
+ }
+ }
for name, v := range tests {
v := v
runTest := func(t *testing.T, locked bool) {
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 114c97b874..5427d8839d 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -70,7 +70,7 @@ type mheap struct {
// on the swept stack.
sweepSpans [2]gcSweepBuf
- _ uint32 // align uint64 fields on 32-bit for atomics
+ // _ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
//
@@ -786,7 +786,9 @@ func (h *mheap) reclaim(npage uintptr) {
// reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
// It returns the number of pages returned to the heap.
//
-// h.lock must be held and the caller must be non-preemptible.
+// h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
+// temporarily unlocked and re-locked in order to do sweeping or if tracing is
+// enabled.
func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
// The heap lock must be held because this accesses the
// heapArena.spans arrays using potentially non-live pointers.
@@ -842,8 +844,10 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
n -= uintptr(len(inUse) * 8)
}
if trace.enabled {
+ unlock(&h.lock)
// Account for pages scanned but not reclaimed.
traceGCSweepSpan((n0 - nFreed) * pageSize)
+ lock(&h.lock)
}
return nFreed
}
@@ -1430,11 +1434,8 @@ func (h *mheap) scavengeAll() {
unlock(&h.lock)
gp.m.mallocing--
- if debug.gctrace > 0 {
- if released > 0 {
- print("forced scvg: ", released>>20, " MB released\n")
- }
- print("forced scvg: inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
+ if debug.scavtrace > 0 {
+ printScavTrace(released, true)
}
}
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 615ec1868c..64e220772e 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -83,6 +83,7 @@ var arches = map[string]func(){
"mips64x": func() { genMIPS(true) },
"mipsx": func() { genMIPS(false) },
"ppc64x": genPPC64,
+ "riscv64": genRISCV64,
"s390x": genS390X,
"wasm": genWasm,
}
@@ -478,6 +479,11 @@ func genPPC64() {
p("JMP (CTR)")
}
+func genRISCV64() {
+ p("// No async preemption on riscv64 - see issue 36711")
+ p("UNDEF")
+}
+
func genS390X() {
// Add integer registers R0-R12
// R13 (g), R14 (LR), R15 (SP) are special, and not saved here.
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 21ea6a8525..bb751f1f8e 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -9,9 +9,8 @@
//
// Pages are managed using a bitmap that is sharded into chunks.
// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
-// process's address space. Chunks are allocated using a SLAB allocator
-// and pointers to chunks are managed in one large array, which is mapped
-// in as needed.
+// process's address space. Chunks are managed in a sparse-array-style structure
+// similar to mheap.arenas, since the bitmap may be large on some systems.
//
// The bitmap is efficiently searched by using a radix tree in combination
// with fast bit-wise intrinsics. Allocation is performed using an address-ordered
@@ -49,6 +48,7 @@
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
@@ -74,6 +74,14 @@ const (
summaryLevelBits = 3
summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
+ // pallocChunksL2Bits is the number of bits of the chunk index number
+ // covered by the second level of the chunks map.
+ //
+ // See (*pageAlloc).chunks for more details. Update the documentation
+ // there should this change.
+ pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
+ pallocChunksL1Shift = pallocChunksL2Bits
+
// Maximum searchAddr value, which indicates that the heap has no free space.
//
// We subtract arenaBaseOffset because we want this to represent the maximum
@@ -111,6 +119,26 @@ func chunkPageIndex(p uintptr) uint {
return uint(p % pallocChunkBytes / pageSize)
}
+// l1 returns the index into the first level of (*pageAlloc).chunks.
+func (i chunkIdx) l1() uint {
+ if pallocChunksL1Bits == 0 {
+ // Let the compiler optimize this away if there's no
+ // L1 map.
+ return 0
+ } else {
+ return uint(i) >> pallocChunksL1Shift
+ }
+}
+
+// l2 returns the index into the second level of (*pageAlloc).chunks.
+func (i chunkIdx) l2() uint {
+ if pallocChunksL1Bits == 0 {
+ return uint(i)
+ } else {
+ return uint(i) & (1<<pallocChunksL2Bits - 1)
+ }
+}
+
// addrsToSummaryRange converts base and limit pointers into a range
// of entries for the given summary level.
//
@@ -154,31 +182,52 @@ type pageAlloc struct {
// runtime segmentation fault, we get a much friendlier out-of-bounds
// error.
//
+ // To iterate over a summary level, use inUse to determine which ranges
+ // are currently available. Otherwise one might try to access
+ // memory which is only Reserved which may result in a hard fault.
+ //
// We may still get segmentation faults < len since some of that
// memory may not be committed yet.
summary [summaryLevels][]pallocSum
// chunks is a slice of bitmap chunks.
//
- // The backing store for chunks is reserved in init and committed
- // by grow.
+ // The total size of chunks is quite large on most 64-bit platforms
+ // (O(GiB) or more) if flattened, so rather than making one large mapping
+ // (which has problems on some platforms, even when PROT_NONE) we use a
+ // two-level sparse array approach similar to the arena index in mheap.
//
// To find the chunk containing a memory address `a`, do:
- // chunks[chunkIndex(a)]
+ // chunkOf(chunkIndex(a))
+ //
+ // Below is a table describing the configuration for chunks for various
+ // heapAddrBits supported by the runtime.
+ //
+ // heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
+ // ------------------------------------------------
+ // 32 | 0 | 10 | 128 KiB
+ // 33 (iOS) | 0 | 11 | 256 KiB
+ // 48 | 13 | 13 | 1 MiB
//
- // summary[len(s.summary)-1][i] should always be checked, at least
- // for a zero max value, before accessing chunks[i]. It's possible the
- // bitmap at that index is mapped in and zeroed, indicating that it
- // contains free space, but in actuality it is unused since its
- // corresponding summary was never updated. Tests may ignore this
- // and assume the zero value (and that it is mapped).
+ // There's no reason to use the L1 part of chunks on 32-bit, the
+ // address space is small so the L2 is small. For platforms with a
+ // 48-bit address space, we pick the L1 such that the L2 is 1 MiB
+ // in size, which is a good balance between low granularity without
+ // making the impact on BSS too high (note the L1 is stored directly
+ // in pageAlloc).
+ //
+ // To iterate over the bitmap, use inUse to determine which ranges
+ // are currently available. Otherwise one might iterate over unused
+ // ranges.
//
// TODO(mknyszek): Consider changing the definition of the bitmap
// such that 1 means free and 0 means in-use so that summaries and
// the bitmaps align better on zero-values.
- chunks []pallocData
+ chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
- // The address to start an allocation search with.
+ // The address to start an allocation search with. It must never
+ // point to any memory that is not contained in inUse, i.e.
+ // inUse.contains(searchAddr) must always be true.
//
// When added with arenaBaseOffset, we guarantee that
// all valid heap addresses (when also added with
@@ -190,15 +239,34 @@ type pageAlloc struct {
// space on architectures with segmented address spaces.
searchAddr uintptr
- // The address to start a scavenge candidate search with.
+ // The address to start a scavenge candidate search with. It
+ // need not point to memory contained in inUse.
scavAddr uintptr
+ // The amount of memory scavenged since the last scavtrace print.
+ //
+ // Read and updated atomically.
+ scavReleased uintptr
+
// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
// chunks in the range [start, end) are
// currently ready to use.
start, end chunkIdx
+ // inUse is a slice of ranges of address space which are
+ // known by the page allocator to be currently in-use (passed
+ // to grow).
+ //
+ // This field is currently unused on 32-bit architectures but
+ // is harmless to track. We care much more about having a
+ // contiguous heap in these cases and take additional measures
+ // to ensure that, so in nearly all cases this should have just
+ // 1 element.
+ //
+ // All access is protected by the mheapLock.
+ inUse addrRanges
+
// mheap_.lock. This level of indirection makes it possible
// to test pageAlloc indepedently of the runtime allocator.
mheapLock *mutex
@@ -222,6 +290,9 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
}
s.sysStat = sysStat
+ // Initialize s.inUse.
+ s.inUse.init(sysStat)
+
// System-dependent initialization.
s.sysInit()
@@ -231,67 +302,10 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
// Start with the scavAddr in a state indicating there's nothing more to do.
s.scavAddr = minScavAddr
- // Reserve space for the bitmap and put this reservation
- // into the chunks slice.
- const maxChunks = (1 << heapAddrBits) / pallocChunkBytes
- r := sysReserve(nil, maxChunks*unsafe.Sizeof(s.chunks[0]))
- if r == nil {
- throw("failed to reserve page bitmap memory")
- }
- sl := notInHeapSlice{(*notInHeap)(r), 0, maxChunks}
- s.chunks = *(*[]pallocData)(unsafe.Pointer(&sl))
-
// Set the mheapLock.
s.mheapLock = mheapLock
}
-// extendMappedRegion ensures that all the memory in the range
-// [base+nbase, base+nlimit) is in the Ready state.
-// base must refer to the beginning of a memory region in the
-// Reserved state. extendMappedRegion assumes that the region
-// [base+mbase, base+mlimit) is already mapped.
-//
-// Note that extendMappedRegion only supports extending
-// mappings in one direction. Therefore,
-// nbase < mbase && nlimit > mlimit is an invalid input
-// and this function will throw.
-func extendMappedRegion(base unsafe.Pointer, mbase, mlimit, nbase, nlimit uintptr, sysStat *uint64) {
- if uintptr(base)%physPageSize != 0 {
- print("runtime: base = ", base, "\n")
- throw("extendMappedRegion: base not page-aligned")
- }
- // Round the offsets to a physical page.
- mbase = alignDown(mbase, physPageSize)
- nbase = alignDown(nbase, physPageSize)
- mlimit = alignUp(mlimit, physPageSize)
- nlimit = alignUp(nlimit, physPageSize)
-
- // If none of the region is mapped, don't bother
- // trying to figure out which parts are.
- if mlimit-mbase != 0 {
- // Determine which part of the region actually needs
- // mapping.
- if nbase < mbase && nlimit > mlimit {
- // TODO(mknyszek): Consider supporting this case. It can't
- // ever happen currently in the page allocator, but may be
- // useful in the future. Also, it would make this function's
- // purpose simpler to explain.
- throw("mapped region extended in two directions")
- } else if nbase < mbase && nlimit <= mlimit {
- nlimit = mbase
- } else if nbase >= mbase && nlimit > mlimit {
- nbase = mlimit
- } else {
- return
- }
- }
-
- // Transition from Reserved to Ready.
- rbase := add(base, nbase)
- sysMap(rbase, nlimit-nbase, sysStat)
- sysUsed(rbase, nlimit-nbase)
-}
-
// compareSearchAddrTo compares an address against s.searchAddr in a linearized
// view of the address space on systems with discontinuous process address spaces.
// This linearized view is the same one generated by chunkIndex and arenaIndex,
@@ -315,6 +329,11 @@ func (s *pageAlloc) compareSearchAddrTo(addr uintptr) int {
return 0
}
+// chunkOf returns the chunk at the given chunk index.
+func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
+ return &s.chunks[ci.l1()][ci.l2()]
+}
+
// grow sets up the metadata for the address range [base, base+size).
// It may allocate metadata, in which case *s.sysStat will be updated.
//
@@ -332,7 +351,6 @@ func (s *pageAlloc) grow(base, size uintptr) {
// Update s.start and s.end.
// If no growth happened yet, start == 0. This is generally
// safe since the zero page is unmapped.
- oldStart, oldEnd := s.start, s.end
firstGrowth := s.start == 0
start, end := chunkIndex(base), chunkIndex(limit)
if firstGrowth || start < s.start {
@@ -340,22 +358,11 @@ func (s *pageAlloc) grow(base, size uintptr) {
}
if end > s.end {
s.end = end
-
- // s.end corresponds directly to the length of s.chunks,
- // so just update it here.
- s.chunks = s.chunks[:end]
}
-
- // Extend the mapped part of the chunk reservation.
- elemSize := unsafe.Sizeof(s.chunks[0])
- extendMappedRegion(
- unsafe.Pointer(&s.chunks[0]),
- uintptr(oldStart)*elemSize,
- uintptr(oldEnd)*elemSize,
- uintptr(s.start)*elemSize,
- uintptr(s.end)*elemSize,
- s.sysStat,
- )
+ // Note that [base, limit) will never overlap with any existing
+ // range inUse because grow only ever adds never-used memory
+ // regions to the page allocator.
+ s.inUse.add(addrRange{base, limit})
// A grow operation is a lot like a free operation, so if our
// chunk ends up below the (linearized) s.searchAddr, update
@@ -364,11 +371,21 @@ func (s *pageAlloc) grow(base, size uintptr) {
s.searchAddr = base
}
- // Newly-grown memory is always considered scavenged.
+ // Add entries into chunks, which is sparse, if needed. Then,
+ // initialize the bitmap.
//
+ // Newly-grown memory is always considered scavenged.
// Set all the bits in the scavenged bitmaps high.
for c := chunkIndex(base); c < chunkIndex(limit); c++ {
- s.chunks[c].scavenged.setRange(0, pallocChunkPages)
+ if s.chunks[c.l1()] == nil {
+ // Create the necessary l2 entry.
+ //
+ // Store it atomically to avoid races with readers which
+ // don't acquire the heap lock.
+ r := sysAlloc(unsafe.Sizeof(*s.chunks[0]), s.sysStat)
+ atomic.StorepNoWB(unsafe.Pointer(&s.chunks[c.l1()]), r)
+ }
+ s.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
}
// Update summaries accordingly. The grow acts like a free, so
@@ -395,7 +412,7 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
// Fast path: the allocation doesn't span more than one chunk,
// so update this one and if the summary didn't change, return.
x := s.summary[len(s.summary)-1][sc]
- y := s.chunks[sc].summarize()
+ y := s.chunkOf(sc).summarize()
if x == y {
return
}
@@ -406,7 +423,7 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
summary := s.summary[len(s.summary)-1]
// Update the summary for chunk sc.
- summary[sc] = s.chunks[sc].summarize()
+ summary[sc] = s.chunkOf(sc).summarize()
// Update the summaries for chunks in between, which are
// either totally allocated or freed.
@@ -423,7 +440,7 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
}
// Update the summary for chunk ec.
- summary[ec] = s.chunks[ec].summarize()
+ summary[ec] = s.chunkOf(ec).summarize()
} else {
// Slow general path: the allocation spans more than one chunk
// and at least one summary is guaranteed to change.
@@ -432,7 +449,7 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
// every chunk in the range and manually recompute the summary.
summary := s.summary[len(s.summary)-1]
for c := sc; c <= ec; c++ {
- summary[c] = s.chunks[c].summarize()
+ summary[c] = s.chunkOf(c).summarize()
}
}
@@ -479,18 +496,22 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
scav := uint(0)
if sc == ec {
// The range doesn't cross any chunk boundaries.
- scav += s.chunks[sc].scavenged.popcntRange(si, ei+1-si)
- s.chunks[sc].allocRange(si, ei+1-si)
+ chunk := s.chunkOf(sc)
+ scav += chunk.scavenged.popcntRange(si, ei+1-si)
+ chunk.allocRange(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
- scav += s.chunks[sc].scavenged.popcntRange(si, pallocChunkPages-si)
- s.chunks[sc].allocRange(si, pallocChunkPages-si)
+ chunk := s.chunkOf(sc)
+ scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
+ chunk.allocRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
- scav += s.chunks[c].scavenged.popcntRange(0, pallocChunkPages)
- s.chunks[c].allocAll()
+ chunk := s.chunkOf(c)
+ scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
+ chunk.allocAll()
}
- scav += s.chunks[ec].scavenged.popcntRange(0, ei+1)
- s.chunks[ec].allocRange(0, ei+1)
+ chunk = s.chunkOf(ec)
+ scav += chunk.scavenged.popcntRange(0, ei+1)
+ chunk.allocRange(0, ei+1)
}
s.update(base, npages, true, true)
return uintptr(scav) * pageSize
@@ -702,7 +723,7 @@ nextLevel:
// After iterating over all levels, i must contain a chunk index which
// is what the final level represents.
ci := chunkIdx(i)
- j, searchIdx := s.chunks[ci].find(npages, 0)
+ j, searchIdx := s.chunkOf(ci).find(npages, 0)
if j < 0 {
// We couldn't find any space in this chunk despite the summaries telling
// us it should be there. There's likely a bug, so dump some state and throw.
@@ -744,7 +765,7 @@ func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(s.searchAddr)
if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) {
- j, searchIdx := s.chunks[i].find(npages, chunkPageIndex(s.searchAddr))
+ j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr))
if j < 0 {
print("runtime: max = ", max, ", npages = ", npages, "\n")
print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr), ", s.searchAddr = ", hex(s.searchAddr), "\n")
@@ -793,7 +814,8 @@ func (s *pageAlloc) free(base, npages uintptr) {
if npages == 1 {
// Fast path: we're clearing a single bit, and we know exactly
// where it is, so mark it directly.
- s.chunks[chunkIndex(base)].free1(chunkPageIndex(base))
+ i := chunkIndex(base)
+ s.chunkOf(i).free1(chunkPageIndex(base))
} else {
// Slow path: we're clearing more bits so we may need to iterate.
limit := base + npages*pageSize - 1
@@ -802,14 +824,14 @@ func (s *pageAlloc) free(base, npages uintptr) {
if sc == ec {
// The range doesn't cross any chunk boundaries.
- s.chunks[sc].free(si, ei+1-si)
+ s.chunkOf(sc).free(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
- s.chunks[sc].free(si, pallocChunkPages-si)
+ s.chunkOf(sc).free(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
- s.chunks[c].freeAll()
+ s.chunkOf(c).freeAll()
}
- s.chunks[ec].free(0, ei+1)
+ s.chunkOf(ec).free(0, ei+1)
}
}
s.update(base, npages, true, false)
diff --git a/src/runtime/mpagealloc_32bit.go b/src/runtime/mpagealloc_32bit.go
index 996228c046..6658a900ac 100644
--- a/src/runtime/mpagealloc_32bit.go
+++ b/src/runtime/mpagealloc_32bit.go
@@ -26,6 +26,13 @@ const (
// Constants for testing.
pageAlloc32Bit = 1
pageAlloc64Bit = 0
+
+ // Number of bits needed to represent all indices into the L1 of the
+ // chunks map.
+ //
+ // See (*pageAlloc).chunks for more details. Update the documentation
+ // there should this number change.
+ pallocChunksL1Bits = 0
)
// See comment in mpagealloc_64bit.go.
diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go
index dc9ae8c8d6..0b475ed206 100644
--- a/src/runtime/mpagealloc_64bit.go
+++ b/src/runtime/mpagealloc_64bit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 !darwin,arm64 mips64 mips64le ppc64 ppc64le s390x
+// +build amd64 !darwin,arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x
// See mpagealloc_32bit.go for why darwin/arm64 is excluded here.
@@ -17,6 +17,13 @@ const (
// Constants for testing.
pageAlloc32Bit = 0
pageAlloc64Bit = 1
+
+ // Number of bits needed to represent all indices into the L1 of the
+ // chunks map.
+ //
+ // See (*pageAlloc).chunks for more details. Update the documentation
+ // there should this number change.
+ pallocChunksL1Bits = 13
)
// levelBits is the number of bits in the radix for a given level in the super summary
@@ -95,42 +102,79 @@ func (s *pageAlloc) sysGrow(base, limit uintptr) {
throw("sysGrow bounds not aligned to pallocChunkBytes")
}
+ // addrRangeToSummaryRange converts a range of addresses into a range
+ // of summary indices which must be mapped to support those addresses
+ // in the summary range.
+ addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
+ sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base, r.limit)
+ return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
+ }
+
+ // summaryRangeToSumAddrRange converts a range of indices in any
+ // level of s.summary into page-aligned addresses which cover that
+ // range of indices.
+ summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
+ baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
+ limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
+ base := unsafe.Pointer(&s.summary[level][0])
+ return addrRange{
+ uintptr(add(base, baseOffset)),
+ uintptr(add(base, limitOffset)),
+ }
+ }
+
+ // addrRangeToSumAddrRange is a convienience function that converts
+ // an address range r to the address range of the given summary level
+ // that stores the summaries for r.
+ addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
+ sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
+ return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
+ }
+
+ // Find the first inUse index which is strictly greater than base.
+ //
+ // Because this function will never be asked remap the same memory
+ // twice, this index is effectively the index at which we would insert
+ // this new growth, and base will never overlap/be contained within
+ // any existing range.
+ //
+ // This will be used to look at what memory in the summary array is already
+ // mapped before and after this new range.
+ inUseIndex := s.inUse.findSucc(base)
+
// Walk up the radix tree and map summaries in as needed.
- cbase, climit := chunkBase(s.start), chunkBase(s.end)
- for l := len(s.summary) - 1; l >= 0; l-- {
+ for l := range s.summary {
// Figure out what part of the summary array this new address space needs.
- // Note that we need to align the ranges to the block width (1<<levelBits[l])
- // at this level because the full block is needed to compute the summary for
- // the next level.
- lo, hi := addrsToSummaryRange(l, base, limit)
- lo, hi = blockAlignSummaryRange(l, lo, hi)
+ needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, addrRange{base, limit})
// Update the summary slices with a new upper-bound. This ensures
// we get tight bounds checks on at least the top bound.
//
- // We must do this regardless of whether we map new memory, because we
- // may be extending further into the mapped memory.
- if hi > len(s.summary[l]) {
- s.summary[l] = s.summary[l][:hi]
+ // We must do this regardless of whether we map new memory.
+ if needIdxLimit > len(s.summary[l]) {
+ s.summary[l] = s.summary[l][:needIdxLimit]
}
- // Figure out what part of the summary array is already mapped.
- // If we're doing our first growth, just pass zero.
- // addrsToSummaryRange won't accept cbase == climit.
- var mlo, mhi int
- if s.start != 0 {
- mlo, mhi = addrsToSummaryRange(l, cbase, climit)
- mlo, mhi = blockAlignSummaryRange(l, mlo, mhi)
+ // Compute the needed address range in the summary array for level l.
+ need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
+
+ // Prune need down to what needs to be newly mapped. Some parts of it may
+ // already be mapped by what inUse describes due to page alignment requirements
+ // for mapping. prune's invariants are guaranteed by the fact that this
+ // function will never be asked to remap the same memory twice.
+ if inUseIndex > 0 {
+ need = need.subtract(addrRangeToSumAddrRange(l, s.inUse.ranges[inUseIndex-1]))
+ }
+ if inUseIndex < len(s.inUse.ranges) {
+ need = need.subtract(addrRangeToSumAddrRange(l, s.inUse.ranges[inUseIndex]))
+ }
+ // It's possible that after our pruning above, there's nothing new to map.
+ if need.size() == 0 {
+ continue
}
- // Extend the mappings for this summary level.
- extendMappedRegion(
- unsafe.Pointer(&s.summary[l][0]),
- uintptr(mlo)*pallocSumBytes,
- uintptr(mhi)*pallocSumBytes,
- uintptr(lo)*pallocSumBytes,
- uintptr(hi)*pallocSumBytes,
- s.sysStat,
- )
+ // Map and commit need.
+ sysMap(unsafe.Pointer(need.base), need.size(), s.sysStat)
+ sysUsed(unsafe.Pointer(need.base), need.size())
}
}
diff --git a/src/runtime/mpagealloc_test.go b/src/runtime/mpagealloc_test.go
index 9034f63064..89a4a2502c 100644
--- a/src/runtime/mpagealloc_test.go
+++ b/src/runtime/mpagealloc_test.go
@@ -22,8 +22,14 @@ func checkPageAlloc(t *testing.T, want, got *PageAlloc) {
}
for i := gotStart; i < gotEnd; i++ {
- // Check the bitmaps.
+ // Check the bitmaps. Note that we may have nil data.
gb, wb := got.PallocData(i), want.PallocData(i)
+ if gb == nil && wb == nil {
+ continue
+ }
+ if (gb == nil && wb != nil) || (gb != nil && wb == nil) {
+ t.Errorf("chunk %d nilness mismatch", i)
+ }
if !checkPallocBits(t, gb.PallocBits(), wb.PallocBits()) {
t.Logf("in chunk %d (mallocBits)", i)
}
@@ -34,16 +40,198 @@ func checkPageAlloc(t *testing.T, want, got *PageAlloc) {
// TODO(mknyszek): Verify summaries too?
}
+func TestPageAllocGrow(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
+ type test struct {
+ chunks []ChunkIdx
+ inUse []AddrRange
+ }
+ tests := map[string]test{
+ "One": {
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)},
+ },
+ },
+ "Contiguous2": {
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ BaseChunkIdx + 1,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+2, 0)},
+ },
+ },
+ "Contiguous5": {
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ BaseChunkIdx + 1,
+ BaseChunkIdx + 2,
+ BaseChunkIdx + 3,
+ BaseChunkIdx + 4,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+5, 0)},
+ },
+ },
+ "Discontiguous": {
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ BaseChunkIdx + 2,
+ BaseChunkIdx + 4,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)},
+ {PageBase(BaseChunkIdx+2, 0), PageBase(BaseChunkIdx+3, 0)},
+ {PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+5, 0)},
+ },
+ },
+ "Mixed": {
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ BaseChunkIdx + 1,
+ BaseChunkIdx + 2,
+ BaseChunkIdx + 4,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+3, 0)},
+ {PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+5, 0)},
+ },
+ },
+ "WildlyDiscontiguous": {
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ BaseChunkIdx + 1,
+ BaseChunkIdx + 0x10,
+ BaseChunkIdx + 0x21,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+2, 0)},
+ {PageBase(BaseChunkIdx+0x10, 0), PageBase(BaseChunkIdx+0x11, 0)},
+ {PageBase(BaseChunkIdx+0x21, 0), PageBase(BaseChunkIdx+0x22, 0)},
+ },
+ },
+ "ManyDiscontiguous": {
+ // The initial cap is 16. Test 33 ranges, to exercise the growth path (twice).
+ chunks: []ChunkIdx{
+ BaseChunkIdx, BaseChunkIdx + 2, BaseChunkIdx + 4, BaseChunkIdx + 6,
+ BaseChunkIdx + 8, BaseChunkIdx + 10, BaseChunkIdx + 12, BaseChunkIdx + 14,
+ BaseChunkIdx + 16, BaseChunkIdx + 18, BaseChunkIdx + 20, BaseChunkIdx + 22,
+ BaseChunkIdx + 24, BaseChunkIdx + 26, BaseChunkIdx + 28, BaseChunkIdx + 30,
+ BaseChunkIdx + 32, BaseChunkIdx + 34, BaseChunkIdx + 36, BaseChunkIdx + 38,
+ BaseChunkIdx + 40, BaseChunkIdx + 42, BaseChunkIdx + 44, BaseChunkIdx + 46,
+ BaseChunkIdx + 48, BaseChunkIdx + 50, BaseChunkIdx + 52, BaseChunkIdx + 54,
+ BaseChunkIdx + 56, BaseChunkIdx + 58, BaseChunkIdx + 60, BaseChunkIdx + 62,
+ BaseChunkIdx + 64,
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)},
+ {PageBase(BaseChunkIdx+2, 0), PageBase(BaseChunkIdx+3, 0)},
+ {PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+5, 0)},
+ {PageBase(BaseChunkIdx+6, 0), PageBase(BaseChunkIdx+7, 0)},
+ {PageBase(BaseChunkIdx+8, 0), PageBase(BaseChunkIdx+9, 0)},
+ {PageBase(BaseChunkIdx+10, 0), PageBase(BaseChunkIdx+11, 0)},
+ {PageBase(BaseChunkIdx+12, 0), PageBase(BaseChunkIdx+13, 0)},
+ {PageBase(BaseChunkIdx+14, 0), PageBase(BaseChunkIdx+15, 0)},
+ {PageBase(BaseChunkIdx+16, 0), PageBase(BaseChunkIdx+17, 0)},
+ {PageBase(BaseChunkIdx+18, 0), PageBase(BaseChunkIdx+19, 0)},
+ {PageBase(BaseChunkIdx+20, 0), PageBase(BaseChunkIdx+21, 0)},
+ {PageBase(BaseChunkIdx+22, 0), PageBase(BaseChunkIdx+23, 0)},
+ {PageBase(BaseChunkIdx+24, 0), PageBase(BaseChunkIdx+25, 0)},
+ {PageBase(BaseChunkIdx+26, 0), PageBase(BaseChunkIdx+27, 0)},
+ {PageBase(BaseChunkIdx+28, 0), PageBase(BaseChunkIdx+29, 0)},
+ {PageBase(BaseChunkIdx+30, 0), PageBase(BaseChunkIdx+31, 0)},
+ {PageBase(BaseChunkIdx+32, 0), PageBase(BaseChunkIdx+33, 0)},
+ {PageBase(BaseChunkIdx+34, 0), PageBase(BaseChunkIdx+35, 0)},
+ {PageBase(BaseChunkIdx+36, 0), PageBase(BaseChunkIdx+37, 0)},
+ {PageBase(BaseChunkIdx+38, 0), PageBase(BaseChunkIdx+39, 0)},
+ {PageBase(BaseChunkIdx+40, 0), PageBase(BaseChunkIdx+41, 0)},
+ {PageBase(BaseChunkIdx+42, 0), PageBase(BaseChunkIdx+43, 0)},
+ {PageBase(BaseChunkIdx+44, 0), PageBase(BaseChunkIdx+45, 0)},
+ {PageBase(BaseChunkIdx+46, 0), PageBase(BaseChunkIdx+47, 0)},
+ {PageBase(BaseChunkIdx+48, 0), PageBase(BaseChunkIdx+49, 0)},
+ {PageBase(BaseChunkIdx+50, 0), PageBase(BaseChunkIdx+51, 0)},
+ {PageBase(BaseChunkIdx+52, 0), PageBase(BaseChunkIdx+53, 0)},
+ {PageBase(BaseChunkIdx+54, 0), PageBase(BaseChunkIdx+55, 0)},
+ {PageBase(BaseChunkIdx+56, 0), PageBase(BaseChunkIdx+57, 0)},
+ {PageBase(BaseChunkIdx+58, 0), PageBase(BaseChunkIdx+59, 0)},
+ {PageBase(BaseChunkIdx+60, 0), PageBase(BaseChunkIdx+61, 0)},
+ {PageBase(BaseChunkIdx+62, 0), PageBase(BaseChunkIdx+63, 0)},
+ {PageBase(BaseChunkIdx+64, 0), PageBase(BaseChunkIdx+65, 0)},
+ },
+ },
+ }
+ if PageAlloc64Bit != 0 {
+ tests["ExtremelyDiscontiguous"] = test{
+ chunks: []ChunkIdx{
+ BaseChunkIdx,
+ BaseChunkIdx + 0x100000, // constant translates to O(TiB)
+ },
+ inUse: []AddrRange{
+ {PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0)},
+ {PageBase(BaseChunkIdx+0x100000, 0), PageBase(BaseChunkIdx+0x100001, 0)},
+ },
+ }
+ }
+ for name, v := range tests {
+ v := v
+ t.Run(name, func(t *testing.T) {
+ // By creating a new pageAlloc, we will
+ // grow it for each chunk defined in x.
+ x := make(map[ChunkIdx][]BitRange)
+ for _, c := range v.chunks {
+ x[c] = []BitRange{}
+ }
+ b := NewPageAlloc(x, nil)
+ defer FreePageAlloc(b)
+
+ got := b.InUse()
+ want := v.inUse
+
+ // Check for mismatches.
+ if len(got) != len(want) {
+ t.Fail()
+ } else {
+ for i := range want {
+ if want[i] != got[i] {
+ t.Fail()
+ break
+ }
+ }
+ }
+ if t.Failed() {
+ t.Logf("found inUse mismatch")
+ t.Logf("got:")
+ for i, r := range got {
+ t.Logf("\t#%d [0x%x, 0x%x)", i, r.Base, r.Limit)
+ }
+ t.Logf("want:")
+ for i, r := range want {
+ t.Logf("\t#%d [0x%x, 0x%x)", i, r.Base, r.Limit)
+ }
+ }
+ })
+ }
+}
+
func TestPageAllocAlloc(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
type hit struct {
npages, base, scav uintptr
}
- tests := map[string]struct {
+ type test struct {
scav map[ChunkIdx][]BitRange
before map[ChunkIdx][]BitRange
after map[ChunkIdx][]BitRange
hits []hit
- }{
+ }
+ tests := map[string]test{
"AllFree1": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@@ -184,7 +372,6 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx: {{0, 195}},
},
},
- // TODO(mknyszek): Add tests close to the chunk size.
"ExhaustPallocChunkPages-3": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@@ -384,6 +571,48 @@ func TestPageAllocAlloc(t *testing.T) {
},
},
}
+ if PageAlloc64Bit != 0 {
+ const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
+
+ // This test attempts to trigger a bug wherein we look at unmapped summary
+ // memory that isn't just in the case where we exhaust the heap.
+ //
+ // It achieves this by placing a chunk such that its summary will be
+ // at the very end of a physical page. It then also places another chunk
+ // much further up in the address space, such that any allocations into the
+ // first chunk do not exhaust the heap and the second chunk's summary is not in the
+ // page immediately adjacent to the first chunk's summary's page.
+ // Allocating into this first chunk to exhaustion and then into the second
+ // chunk may then trigger a check in the allocator which erroneously looks at
+ // unmapped summary memory and crashes.
+
+ // Figure out how many chunks are in a physical page, then align BaseChunkIdx
+ // to a physical page in the chunk summary array. Here we only assume that
+ // each summary array is aligned to some physical page.
+ sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
+ baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
+ tests["DiscontiguousMappedSumBoundary"] = test{
+ before: map[ChunkIdx][]BitRange{
+ baseChunkIdx + sumsPerPhysPage - 1: {},
+ baseChunkIdx + chunkIdxBigJump: {},
+ },
+ scav: map[ChunkIdx][]BitRange{
+ baseChunkIdx + sumsPerPhysPage - 1: {},
+ baseChunkIdx + chunkIdxBigJump: {},
+ },
+ hits: []hit{
+ {PallocChunkPages - 1, PageBase(baseChunkIdx+sumsPerPhysPage-1, 0), 0},
+ {1, PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-1), 0},
+ {1, PageBase(baseChunkIdx+chunkIdxBigJump, 0), 0},
+ {PallocChunkPages - 1, PageBase(baseChunkIdx+chunkIdxBigJump, 1), 0},
+ {1, 0, 0},
+ },
+ after: map[ChunkIdx][]BitRange{
+ baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
+ baseChunkIdx + chunkIdxBigJump: {{0, PallocChunkPages}},
+ },
+ }
+ }
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
@@ -408,6 +637,9 @@ func TestPageAllocAlloc(t *testing.T) {
}
func TestPageAllocExhaust(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
for _, npages := range []uintptr{1, 2, 3, 4, 5, 8, 16, 64, 1024, 1025, 2048, 2049} {
npages := npages
t.Run(fmt.Sprintf("%d", npages), func(t *testing.T) {
@@ -457,6 +689,9 @@ func TestPageAllocExhaust(t *testing.T) {
}
func TestPageAllocFree(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
tests := map[string]struct {
before map[ChunkIdx][]BitRange
after map[ChunkIdx][]BitRange
@@ -686,6 +921,9 @@ func TestPageAllocFree(t *testing.T) {
}
func TestPageAllocAllocAndFree(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
type hit struct {
alloc bool
npages uintptr
diff --git a/src/runtime/mpagecache.go b/src/runtime/mpagecache.go
index ec2f2d13ed..9fc338bd8e 100644
--- a/src/runtime/mpagecache.go
+++ b/src/runtime/mpagecache.go
@@ -83,10 +83,10 @@ func (c *pageCache) flush(s *pageAlloc) {
// slower, safer thing by iterating over each bit individually.
for i := uint(0); i < 64; i++ {
if c.cache&(1<<i) != 0 {
- s.chunks[ci].free1(pi + i)
+ s.chunkOf(ci).free1(pi + i)
}
if c.scav&(1<<i) != 0 {
- s.chunks[ci].scavenged.setRange(pi+i, 1)
+ s.chunkOf(ci).scavenged.setRange(pi+i, 1)
}
}
// Since this is a lot like a free, we need to make sure
@@ -113,14 +113,15 @@ func (s *pageAlloc) allocToCache() pageCache {
ci := chunkIndex(s.searchAddr) // chunk index
if s.summary[len(s.summary)-1][ci] != 0 {
// Fast path: there's free pages at or near the searchAddr address.
- j, _ := s.chunks[ci].find(1, chunkPageIndex(s.searchAddr))
+ chunk := s.chunkOf(ci)
+ j, _ := chunk.find(1, chunkPageIndex(s.searchAddr))
if j < 0 {
throw("bad summary data")
}
c = pageCache{
base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
- cache: ^s.chunks[ci].pages64(j),
- scav: s.chunks[ci].scavenged.block64(j),
+ cache: ^chunk.pages64(j),
+ scav: chunk.scavenged.block64(j),
}
} else {
// Slow path: the searchAddr address had nothing there, so go find
@@ -133,10 +134,11 @@ func (s *pageAlloc) allocToCache() pageCache {
return pageCache{}
}
ci := chunkIndex(addr)
+ chunk := s.chunkOf(ci)
c = pageCache{
base: alignDown(addr, 64*pageSize),
- cache: ^s.chunks[ci].pages64(chunkPageIndex(addr)),
- scav: s.chunks[ci].scavenged.block64(chunkPageIndex(addr)),
+ cache: ^chunk.pages64(chunkPageIndex(addr)),
+ scav: chunk.scavenged.block64(chunkPageIndex(addr)),
}
}
diff --git a/src/runtime/mpagecache_test.go b/src/runtime/mpagecache_test.go
index 6fdaa04d72..b8cc0bd965 100644
--- a/src/runtime/mpagecache_test.go
+++ b/src/runtime/mpagecache_test.go
@@ -180,6 +180,9 @@ func TestPageCacheAlloc(t *testing.T) {
}
func TestPageCacheFlush(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
bits64ToBitRanges := func(bits uint64, base uint) []BitRange {
var ranges []BitRange
start, size := uint(0), uint(0)
@@ -254,6 +257,9 @@ func TestPageCacheFlush(t *testing.T) {
}
func TestPageAllocAllocToCache(t *testing.T) {
+ if GOOS == "openbsd" && testing.Short() {
+ t.Skip("skipping because virtual memory is limited; see #36210")
+ }
tests := map[string]struct {
before map[ChunkIdx][]BitRange
scav map[ChunkIdx][]BitRange
diff --git a/src/runtime/mpallocbits.go b/src/runtime/mpallocbits.go
index dd13337c22..a8011341bc 100644
--- a/src/runtime/mpallocbits.go
+++ b/src/runtime/mpallocbits.go
@@ -202,17 +202,11 @@ func (b *pallocBits) summarize() pallocSum {
// If find fails to find any free space, it returns an index of ^uint(0) and
// the new searchIdx should be ignored.
//
-// The returned searchIdx is always the index of the first free page found
-// in this bitmap during the search, except if npages == 1, in which
-// case it will be the index just after the first free page, because the
-// index returned as the first result is assumed to be allocated and so
-// represents a minor optimization for that case.
+// Note that if npages == 1, the two returned values will always be identical.
func (b *pallocBits) find(npages uintptr, searchIdx uint) (uint, uint) {
if npages == 1 {
addr := b.find1(searchIdx)
- // Return a searchIdx of addr + 1 since we assume addr will be
- // allocated.
- return addr, addr + 1
+ return addr, addr
} else if npages <= 64 {
return b.findSmallN(npages, searchIdx)
}
@@ -369,6 +363,9 @@ func findBitRange64(c uint64, n uint) uint {
// whether or not a given page is scavenged in a single
// structure. It's effectively a pallocBits with
// additional functionality.
+//
+// Update the comment on (*pageAlloc).chunks should this
+// structure change.
type pallocData struct {
pallocBits
scavenged pageBits
diff --git a/src/runtime/mranges.go b/src/runtime/mranges.go
new file mode 100644
index 0000000000..b13385165b
--- /dev/null
+++ b/src/runtime/mranges.go
@@ -0,0 +1,161 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Address range data structure.
+//
+// This file contains an implementation of a data structure which
+// manages ordered address ranges.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// addrRange represents a region of address space.
+type addrRange struct {
+ // base and limit together represent the region of address space
+ // [base, limit). That is, base is inclusive, limit is exclusive.
+ base, limit uintptr
+}
+
+// size returns the size of the range represented in bytes.
+func (a addrRange) size() uintptr {
+ if a.limit <= a.base {
+ return 0
+ }
+ return a.limit - a.base
+}
+
+// contains returns whether or not the range contains a given address.
+func (a addrRange) contains(addr uintptr) bool {
+ return addr >= a.base && addr < a.limit
+}
+
+// subtract takes the addrRange toPrune and cuts out any overlap with
+// from, then returns the new range. subtract assumes that a and b
+// either don't overlap at all, only overlap on one side, or are equal.
+// If b is strictly contained in a, thus forcing a split, it will throw.
+func (a addrRange) subtract(b addrRange) addrRange {
+ if a.base >= b.base && a.limit <= b.limit {
+ return addrRange{}
+ } else if a.base < b.base && a.limit > b.limit {
+ throw("bad prune")
+ } else if a.limit > b.limit && a.base < b.limit {
+ a.base = b.limit
+ } else if a.base < b.base && a.limit > b.base {
+ a.limit = b.base
+ }
+ return a
+}
+
+// addrRanges is a data structure holding a collection of ranges of
+// address space.
+//
+// The ranges are coalesced eagerly to reduce the
+// number ranges it holds.
+//
+// The slice backing store for this field is persistentalloc'd
+// and thus there is no way to free it.
+//
+// addrRanges is not thread-safe.
+type addrRanges struct {
+ // ranges is a slice of ranges sorted by base.
+ ranges []addrRange
+
+ // sysStat is the stat to track allocations by this type
+ sysStat *uint64
+}
+
+func (a *addrRanges) init(sysStat *uint64) {
+ ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
+ ranges.len = 0
+ ranges.cap = 16
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
+ a.sysStat = sysStat
+}
+
+// findSucc returns the first index in a such that base is
+// less than the base of the addrRange at that index.
+func (a *addrRanges) findSucc(base uintptr) int {
+ // TODO(mknyszek): Consider a binary search for large arrays.
+ // While iterating over these ranges is potentially expensive,
+ // the expected number of ranges is small, ideally just 1,
+ // since Go heaps are usually mostly contiguous.
+ for i := range a.ranges {
+ if base < a.ranges[i].base {
+ return i
+ }
+ }
+ return len(a.ranges)
+}
+
+// contains returns true if a covers the address addr.
+func (a *addrRanges) contains(addr uintptr) bool {
+ i := a.findSucc(addr)
+ if i == 0 {
+ return false
+ }
+ return a.ranges[i-1].contains(addr)
+}
+
+// add inserts a new address range to a.
+//
+// r must not overlap with any address range in a.
+func (a *addrRanges) add(r addrRange) {
+ // The copies in this function are potentially expensive, but this data
+ // structure is meant to represent the Go heap. At worst, copying this
+ // would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
+ // copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
+ // arenas which is completely discontiguous. ~160µs is still a lot, but in
+ // practice most platforms have 64 MiB arenas (which cuts this by a factor
+ // of 16) and Go heaps are usually mostly contiguous, so the chance that
+ // an addrRanges even grows to that size is extremely low.
+
+ // Because we assume r is not currently represented in a,
+ // findSucc gives us our insertion index.
+ i := a.findSucc(r.base)
+ coalescesDown := i > 0 && a.ranges[i-1].limit == r.base
+ coalescesUp := i < len(a.ranges) && r.limit == a.ranges[i].base
+ if coalescesUp && coalescesDown {
+ // We have neighbors and they both border us.
+ // Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
+ a.ranges[i-1].limit = a.ranges[i].limit
+
+ // Delete a.ranges[i].
+ copy(a.ranges[i:], a.ranges[i+1:])
+ a.ranges = a.ranges[:len(a.ranges)-1]
+ } else if coalescesDown {
+ // We have a neighbor at a lower address only and it borders us.
+ // Merge the new space into a.ranges[i-1].
+ a.ranges[i-1].limit = r.limit
+ } else if coalescesUp {
+ // We have a neighbor at a higher address only and it borders us.
+ // Merge the new space into a.ranges[i].
+ a.ranges[i].base = r.base
+ } else {
+ // We may or may not have neighbors which don't border us.
+ // Add the new range.
+ if len(a.ranges)+1 > cap(a.ranges) {
+ // Grow the array. Note that this leaks the old array, but since
+ // we're doubling we have at most 2x waste. For a 1 TiB heap and
+ // 4 MiB arenas which are all discontiguous (both very conservative
+ // assumptions), this would waste at most 4 MiB of memory.
+ oldRanges := a.ranges
+ ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
+ ranges.len = len(oldRanges) + 1
+ ranges.cap = cap(oldRanges) * 2
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
+
+ // Copy in the old array, but make space for the new range.
+ copy(a.ranges[:i], oldRanges[:i])
+ copy(a.ranges[i+1:], oldRanges[i:])
+ } else {
+ a.ranges = a.ranges[:len(a.ranges)+1]
+ copy(a.ranges[i+1:], a.ranges[i:])
+ }
+ a.ranges[i] = r
+ }
+}
diff --git a/src/runtime/nbpipe_fcntl_libc_test.go b/src/runtime/nbpipe_fcntl_libc_test.go
index 70f4b8348b..b38c58399b 100644
--- a/src/runtime/nbpipe_fcntl_libc_test.go
+++ b/src/runtime/nbpipe_fcntl_libc_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix solaris
+// +build aix darwin solaris
package runtime_test
diff --git a/src/runtime/nbpipe_fcntl_unix_test.go b/src/runtime/nbpipe_fcntl_unix_test.go
index 06b3275f06..75acdb62dd 100644
--- a/src/runtime/nbpipe_fcntl_unix_test.go
+++ b/src/runtime/nbpipe_fcntl_unix_test.go
@@ -2,13 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd
+// +build dragonfly freebsd linux netbsd openbsd
package runtime_test
-import "syscall"
+import (
+ "internal/syscall/unix"
+ "syscall"
+)
func fcntl(fd uintptr, cmd int, arg uintptr) (uintptr, syscall.Errno) {
- res, _, err := syscall.Syscall(syscall.SYS_FCNTL, fd, uintptr(cmd), arg)
+ res, _, err := syscall.Syscall(unix.FcntlSyscall, fd, uintptr(cmd), arg)
return res, err
}
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 7c3cb27223..31ac6ddf79 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -38,6 +38,7 @@ var (
//go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_malloc malloc "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_mmap mmap "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_mprotect mprotect "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_open open "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_pipe pipe "libc.a/shr_64.o"
@@ -77,6 +78,7 @@ var (
//go:linkname libc_madvise libc_madvise
//go:linkname libc_malloc libc_malloc
//go:linkname libc_mmap libc_mmap
+//go:linkname libc_mprotect libc_mprotect
//go:linkname libc_munmap libc_munmap
//go:linkname libc_open libc_open
//go:linkname libc_pipe libc_pipe
@@ -118,6 +120,7 @@ var (
libc_madvise,
libc_malloc,
libc_mmap,
+ libc_mprotect,
libc_munmap,
libc_open,
libc_pipe,
@@ -459,6 +462,15 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (un
}
//go:nosplit
+func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (unsafe.Pointer, int) {
+ r, err0 := syscall3(&libc_mprotect, uintptr(addr), uintptr(n), uintptr(prot))
+ if r == ^uintptr(0) {
+ return nil, int(err0)
+ }
+ return unsafe.Pointer(r), int(err0)
+}
+
+//go:nosplit
func munmap(addr unsafe.Pointer, n uintptr) {
r, err := syscall2(&libc_munmap, uintptr(addr), uintptr(n))
if int32(r) == -1 {
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 27c66f7449..e0e3f4e341 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -116,6 +116,13 @@ const (
_CLONE_NEWUTS = 0x4000000
_CLONE_NEWIPC = 0x8000000
+ // As of QEMU 2.8.0 (5ea2fc84d), user emulation requires all six of these
+ // flags to be set when creating a thread; attempts to share the other
+ // five but leave SYSVSEM unshared will fail with -EINVAL.
+ //
+ // In non-QEMU environments CLONE_SYSVSEM is inconsequential as we do not
+ // use System V semaphores.
+
cloneFlags = _CLONE_VM | /* share memory */
_CLONE_FS | /* share cwd, etc */
_CLONE_FILES | /* share fd table */
@@ -289,6 +296,7 @@ func getHugePageSize() uintptr {
func osinit() {
ncpu = getproccount()
physHugePageSize = getHugePageSize()
+ osArchInit()
}
var urandom_dev = []byte("/dev/urandom\x00")
@@ -318,11 +326,20 @@ func libpreinit() {
initsig(true)
}
+// gsignalInitQuirk, if non-nil, is called for every allocated gsignal G.
+//
+// TODO(austin): Remove this after Go 1.15 when we remove the
+// mlockGsignal workaround.
+var gsignalInitQuirk func(gsignal *g)
+
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
mp.gsignal.m = mp
+ if gsignalInitQuirk != nil {
+ gsignalInitQuirk(mp.gsignal)
+ }
}
func gettid() uint32
@@ -333,7 +350,7 @@ func minit() {
minitSignals()
// Cgo-created threads and the bootstrap m are missing a
- // procid. We need this for asynchronous preemption and its
+ // procid. We need this for asynchronous preemption and it's
// useful in debuggers.
getg().m.procid = uint64(gettid())
}
diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go
index 5f89c30f7a..b590da750f 100644
--- a/src/runtime/os_linux_arm.go
+++ b/src/runtime/os_linux_arm.go
@@ -39,6 +39,8 @@ func archauxv(tag, val uintptr) {
}
}
+func osArchInit() {}
+
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
diff --git a/src/runtime/os_linux_arm64.go b/src/runtime/os_linux_arm64.go
index b51bc88820..19968dc164 100644
--- a/src/runtime/os_linux_arm64.go
+++ b/src/runtime/os_linux_arm64.go
@@ -27,6 +27,8 @@ func archauxv(tag, val uintptr) {
}
}
+func osArchInit() {}
+
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
diff --git a/src/runtime/os_linux_mips64x.go b/src/runtime/os_linux_mips64x.go
index 59d2a8f2c6..464a26a8a4 100644
--- a/src/runtime/os_linux_mips64x.go
+++ b/src/runtime/os_linux_mips64x.go
@@ -10,6 +10,8 @@ package runtime
func archauxv(tag, val uintptr) {
}
+func osArchInit() {}
+
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
diff --git a/src/runtime/os_linux_mipsx.go b/src/runtime/os_linux_mipsx.go
index ccdc3a7fe5..87962ed982 100644
--- a/src/runtime/os_linux_mipsx.go
+++ b/src/runtime/os_linux_mipsx.go
@@ -10,6 +10,8 @@ package runtime
func archauxv(tag, val uintptr) {
}
+func osArchInit() {}
+
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
diff --git a/src/runtime/os_linux_ppc64x.go b/src/runtime/os_linux_ppc64x.go
index cc79cc4a66..3aedc23ef9 100644
--- a/src/runtime/os_linux_ppc64x.go
+++ b/src/runtime/os_linux_ppc64x.go
@@ -20,3 +20,5 @@ func archauxv(tag, val uintptr) {
cpu.HWCap2 = uint(val)
}
}
+
+func osArchInit() {}
diff --git a/src/runtime/os_linux_riscv64.go b/src/runtime/os_linux_riscv64.go
new file mode 100644
index 0000000000..9be88a5ad2
--- /dev/null
+++ b/src/runtime/os_linux_riscv64.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+func osArchInit() {}
diff --git a/src/runtime/os_linux_s390x.go b/src/runtime/os_linux_s390x.go
index 55d35c7cff..ee18fd1dc2 100644
--- a/src/runtime/os_linux_s390x.go
+++ b/src/runtime/os_linux_s390x.go
@@ -17,3 +17,5 @@ func archauxv(tag, val uintptr) {
cpu.S390X.HasVX = val&_HWCAP_S390_VX != 0
}
}
+
+func osArchInit() {}
diff --git a/src/runtime/os_linux_x86.go b/src/runtime/os_linux_x86.go
new file mode 100644
index 0000000000..0e1c9185b1
--- /dev/null
+++ b/src/runtime/os_linux_x86.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build 386 amd64
+
+package runtime
+
+//go:noescape
+func uname(utsname *new_utsname) int
+
+func mlock(addr, len uintptr) int
+
+func osArchInit() {
+ // Linux 5.2 introduced a bug that can corrupt vector
+ // registers on return from a signal if the signal stack isn't
+ // faulted in:
+ // https://bugzilla.kernel.org/show_bug.cgi?id=205663
+ //
+ // It was fixed in 5.3.15, 5.4.2, and all 5.5 and later
+ // kernels.
+ //
+ // If we're on an affected kernel, work around this issue by
+ // mlocking the top page of every signal stack. This doesn't
+ // help for signal stacks created in C, but there's not much
+ // we can do about that.
+ //
+ // TODO(austin): Remove this in Go 1.15, at which point it
+ // will be unlikely to encounter any of the affected kernels
+ // in the wild.
+
+ var uts new_utsname
+ if uname(&uts) < 0 {
+ throw("uname failed")
+ }
+ // Check for null terminator to ensure gostringnocopy doesn't
+ // walk off the end of the release string.
+ found := false
+ for _, b := range uts.release {
+ if b == 0 {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return
+ }
+ rel := gostringnocopy(&uts.release[0])
+
+ major, minor, patch, ok := parseRelease(rel)
+ if !ok {
+ return
+ }
+
+ if major == 5 && (minor == 2 || minor == 3 && patch < 15 || minor == 4 && patch < 2) {
+ gsignalInitQuirk = mlockGsignal
+ if m0.gsignal != nil {
+ throw("gsignal quirk too late")
+ }
+ }
+}
+
+func mlockGsignal(gsignal *g) {
+ if err := mlock(gsignal.stack.hi-physPageSize, physPageSize); err < 0 {
+ printlock()
+ println("runtime: mlock of signal stack failed:", -err)
+ if err == -_ENOMEM {
+ println("runtime: increase the mlock limit (ulimit -l) or")
+ }
+ println("runtime: update your kernel to 5.3.15+, 5.4.2+, or 5.5+")
+ throw("mlock failed")
+ }
+}
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index cf5837c1f0..bddc25729a 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -151,6 +151,29 @@ type mOS struct {
waitsema uintptr // semaphore for parking on locks
resumesema uintptr // semaphore to indicate suspend/resume
+
+ // preemptExtLock synchronizes preemptM with entry/exit from
+ // external C code.
+ //
+ // This protects against races between preemptM calling
+ // SuspendThread and external code on this thread calling
+ // ExitProcess. If these happen concurrently, it's possible to
+ // exit the suspending thread and suspend the exiting thread,
+ // leading to deadlock.
+ //
+ // 0 indicates this M is not being preempted or in external
+ // code. Entering external code CASes this from 0 to 1. If
+ // this fails, a preemption is in progress, so the thread must
+ // wait for the preemption. preemptM also CASes this from 0 to
+ // 1. If this fails, the preemption fails (as it would if the
+ // PC weren't in Go code). The value is reset to 0 when
+ // returning from external code or after a preemption is
+ // complete.
+ //
+ // TODO(austin): We may not need this if preemption were more
+ // tightly synchronized on the G/P status and preemption
+ // blocked transition into _Gsyscall/_Psyscall.
+ preemptExtLock uint32
}
//go:linkname os_sigpipe os.sigpipe
@@ -270,7 +293,11 @@ func loadOptionalSyscalls() {
}
func monitorSuspendResume() {
- const _DEVICE_NOTIFY_CALLBACK = 2
+ const (
+ _DEVICE_NOTIFY_CALLBACK = 2
+ _ERROR_FILE_NOT_FOUND = 2
+ _ERROR_INVALID_PARAMETERS = 87
+ )
type _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS struct {
callback uintptr
context uintptr
@@ -296,10 +323,24 @@ func monitorSuspendResume() {
callback: compileCallback(*efaceOf(&fn), true),
}
handle := uintptr(0)
- if stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
- uintptr(unsafe.Pointer(&params)),
- uintptr(unsafe.Pointer(&handle))) != 0 {
- throw("PowerRegisterSuspendResumeNotification failure")
+ ret := stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
+ uintptr(unsafe.Pointer(&params)), uintptr(unsafe.Pointer(&handle)))
+ // This function doesn't use GetLastError(), so we use the return value directly.
+ switch ret {
+ case 0:
+ return // Successful, nothing more to do.
+ case _ERROR_FILE_NOT_FOUND:
+ // Systems without access to the suspend/resume notifier
+ // also have their clock on "program time", and therefore
+ // don't want or need this anyway.
+ return
+ case _ERROR_INVALID_PARAMETERS:
+ // This is seen when running in Windows Docker.
+ // See issue 36557.
+ return
+ default:
+ println("runtime: PowerRegisterSuspendResumeNotification failed with errno=", ret)
+ throw("runtime: PowerRegisterSuspendResumeNotification failure")
}
}
@@ -1108,11 +1149,20 @@ func preemptM(mp *m) {
throw("self-preempt")
}
+ // Synchronize with external code that may try to ExitProcess.
+ if !atomic.Cas(&mp.preemptExtLock, 0, 1) {
+ // External code is running. Fail the preemption
+ // attempt.
+ atomic.Xadd(&mp.preemptGen, 1)
+ return
+ }
+
// Acquire our own handle to mp's thread.
lock(&mp.threadLock)
if mp.thread == 0 {
// The M hasn't been minit'd yet (or was just unminit'd).
unlock(&mp.threadLock)
+ atomic.Store(&mp.preemptExtLock, 0)
atomic.Xadd(&mp.preemptGen, 1)
return
}
@@ -1138,6 +1188,7 @@ func preemptM(mp *m) {
if int32(stdcall1(_SuspendThread, thread)) == -1 {
unlock(&suspendLock)
stdcall1(_CloseHandle, thread)
+ atomic.Store(&mp.preemptExtLock, 0)
// The thread no longer exists. This shouldn't be
// possible, but just acknowledge the request.
atomic.Xadd(&mp.preemptGen, 1)
@@ -1178,9 +1229,43 @@ func preemptM(mp *m) {
stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
}
+ atomic.Store(&mp.preemptExtLock, 0)
+
// Acknowledge the preemption.
atomic.Xadd(&mp.preemptGen, 1)
stdcall1(_ResumeThread, thread)
stdcall1(_CloseHandle, thread)
}
+
+// osPreemptExtEnter is called before entering external code that may
+// call ExitProcess.
+//
+// This must be nosplit because it may be called from a syscall with
+// untyped stack slots, so the stack must not be grown or scanned.
+//
+//go:nosplit
+func osPreemptExtEnter(mp *m) {
+ for !atomic.Cas(&mp.preemptExtLock, 0, 1) {
+ // An asynchronous preemption is in progress. It's not
+ // safe to enter external code because it may call
+ // ExitProcess and deadlock with SuspendThread.
+ // Ideally we would do the preemption ourselves, but
+ // can't since there may be untyped syscall arguments
+ // on the stack. Instead, just wait and encourage the
+ // SuspendThread APC to run. The preemption should be
+ // done shortly.
+ osyield()
+ }
+ // Asynchronous preemption is now blocked.
+}
+
+// osPreemptExtExit is called after returning from external code that
+// may call ExitProcess.
+//
+// See osPreemptExtEnter for why this is nosplit.
+//
+//go:nosplit
+func osPreemptExtExit(mp *m) {
+ atomic.Store(&mp.preemptExtLock, 0)
+}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 0823f11e98..4cb6c8a360 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -561,6 +561,12 @@ func deferreturn(arg0 uintptr) {
d.fn = nil
gp._defer = d.link
freedefer(d)
+ // If the defer function pointer is nil, force the seg fault to happen
+ // here rather than in jmpdefer. gentraceback() throws an error if it is
+ // called with a callback on an LR architecture and jmpdefer is on the
+ // stack, because the stack trace can be incorrect in that case - see
+ // issue #8153).
+ _ = fn.fn
jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 2c38ac02f5..420a7f96e0 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -411,9 +411,17 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) bool {
// locals pointer map, like empty frame functions?
return false
}
- if hasPrefix(funcname(f), "runtime.") ||
- hasPrefix(funcname(f), "runtime/internal/") ||
- hasPrefix(funcname(f), "reflect.") {
+ name := funcname(f)
+ if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ ix := pcdatavalue(f, _PCDATA_InlTreeIndex, pc, nil)
+ if ix >= 0 {
+ name = funcnameFromNameoff(f, inltree[ix].func_)
+ }
+ }
+ if hasPrefix(name, "runtime.") ||
+ hasPrefix(name, "runtime/internal/") ||
+ hasPrefix(name, "reflect.") {
// For now we never async preempt the runtime or
// anything closely tied to the runtime. Known issues
// include: various points in the scheduler ("don't
diff --git a/src/runtime/preempt_nonwindows.go b/src/runtime/preempt_nonwindows.go
new file mode 100644
index 0000000000..3066a1521e
--- /dev/null
+++ b/src/runtime/preempt_nonwindows.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package runtime
+
+//go:nosplit
+func osPreemptExtEnter(mp *m) {}
+
+//go:nosplit
+func osPreemptExtExit(mp *m) {}
diff --git a/src/runtime/preempt_riscv64.s b/src/runtime/preempt_riscv64.s
new file mode 100644
index 0000000000..80c0636c7a
--- /dev/null
+++ b/src/runtime/preempt_riscv64.s
@@ -0,0 +1,8 @@
+// Code generated by mkpreempt.go; DO NOT EDIT.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
+ // No async preemption on riscv64 - see issue 36711
+ UNDEF
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index d264e1d120..2a91e82185 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -857,23 +857,8 @@ func casGFromPreempted(gp *g, old, new uint32) bool {
// goroutines.
func stopTheWorld(reason string) {
semacquire(&worldsema)
- gp := getg()
- gp.m.preemptoff = reason
- systemstack(func() {
- // Mark the goroutine which called stopTheWorld preemptible so its
- // stack may be scanned.
- // This lets a mark worker scan us while we try to stop the world
- // since otherwise we could get in a mutual preemption deadlock.
- // We must not modify anything on the G stack because a stack shrink
- // may occur. A stack shrink is otherwise OK though because in order
- // to return from this function (and to leave the system stack) we
- // must have preempted all goroutines, including any attempting
- // to scan our stack, in which case, any stack shrinking will
- // have already completed by the time we exit.
- casgstatus(gp, _Grunning, _Gwaiting)
- stopTheWorldWithSema()
- casgstatus(gp, _Gwaiting, _Grunning)
- })
+ getg().m.preemptoff = reason
+ systemstack(stopTheWorldWithSema)
}
// startTheWorld undoes the effects of stopTheWorld.
@@ -885,31 +870,10 @@ func startTheWorld() {
getg().m.preemptoff = ""
}
-// stopTheWorldGC has the same effect as stopTheWorld, but blocks
-// until the GC is not running. It also blocks a GC from starting
-// until startTheWorldGC is called.
-func stopTheWorldGC(reason string) {
- semacquire(&gcsema)
- stopTheWorld(reason)
-}
-
-// startTheWorldGC undoes the effects of stopTheWorldGC.
-func startTheWorldGC() {
- startTheWorld()
- semrelease(&gcsema)
-}
-
-// Holding worldsema grants an M the right to try to stop the world.
+// Holding worldsema grants an M the right to try to stop the world
+// and prevents gomaxprocs from changing concurrently.
var worldsema uint32 = 1
-// Holding gcsema grants the M the right to block a GC, and blocks
-// until the current GC is done. In particular, it prevents gomaxprocs
-// from changing concurrently.
-//
-// TODO(mknyszek): Once gomaxprocs and the execution tracer can handle
-// being changed/enabled during a GC, remove this.
-var gcsema uint32 = 1
-
// stopTheWorldWithSema is the core implementation of stopTheWorld.
// The caller is responsible for acquiring worldsema and disabling
// preemption first and then should stopTheWorldWithSema on the system
@@ -2621,6 +2585,27 @@ func dropg() {
// We pass now in and out to avoid extra calls of nanotime.
//go:yeswritebarrierrec
func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
+ // If there are no timers to adjust, and the first timer on
+ // the heap is not yet ready to run, then there is nothing to do.
+ if atomic.Load(&pp.adjustTimers) == 0 {
+ next := int64(atomic.Load64(&pp.timer0When))
+ if next == 0 {
+ return now, 0, false
+ }
+ if now == 0 {
+ now = nanotime()
+ }
+ if now < next {
+ // Next timer is not ready to run.
+ // But keep going if we would clear deleted timers.
+ // This corresponds to the condition below where
+ // we decide whether to call clearDeletedTimers.
+ if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
+ return now, next, false
+ }
+ }
+ }
+
lock(&pp.timersLock)
adjusttimers(pp)
@@ -2643,6 +2628,13 @@ func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
}
}
+ // If this is the local P, and there are a lot of deleted timers,
+ // clear them out. We only do this for the local P to reduce
+ // lock contention on timersLock.
+ if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
+ clearDeletedTimers(pp)
+ }
+
unlock(&pp.timersLock)
return rnow, pollUntil, ran
@@ -2767,7 +2759,7 @@ func preemptPark(gp *g) {
}
// goyield is like Gosched, but it:
-// - does not emit a GoSched trace event
+// - emits a GoPreempt trace event instead of a GoSched trace event
// - puts the current G on the runq of the current P instead of the globrunq
func goyield() {
checkTimeouts()
@@ -2775,6 +2767,9 @@ func goyield() {
}
func goyield_m(gp *g) {
+ if trace.enabled {
+ traceGoPreempt()
+ }
pp := gp.m.p.ptr()
casgstatus(gp, _Grunning, _Grunnable)
dropg()
@@ -4083,7 +4078,10 @@ func (pp *p) destroy() {
lock(&pp.timersLock)
moveTimers(plocal, pp.timers)
pp.timers = nil
+ pp.numTimers = 0
pp.adjustTimers = 0
+ pp.deletedTimers = 0
+ atomic.Store64(&pp.timer0When, 0)
unlock(&pp.timersLock)
unlock(&plocal.timersLock)
}
@@ -4410,23 +4408,26 @@ func checkdead() {
}
// Maybe jump time forward for playground.
- _p_ := timejump()
- if _p_ != nil {
- for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
- if (*pp).ptr() == _p_ {
- *pp = _p_.link
- break
+ if faketime != 0 {
+ when, _p_ := timeSleepUntil()
+ if _p_ != nil {
+ faketime = when
+ for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
+ if (*pp).ptr() == _p_ {
+ *pp = _p_.link
+ break
+ }
}
+ mp := mget()
+ if mp == nil {
+ // There should always be a free M since
+ // nothing is running.
+ throw("checkdead: no m for timer")
+ }
+ mp.nextp.set(_p_)
+ notewakeup(&mp.park)
+ return
}
- mp := mget()
- if mp == nil {
- // There should always be a free M since
- // nothing is running.
- throw("checkdead: no m for timer")
- }
- mp.nextp.set(_p_)
- notewakeup(&mp.park)
- return
}
// There are no goroutines running, so we can look at the P's.
@@ -4471,7 +4472,7 @@ func sysmon() {
}
usleep(delay)
now := nanotime()
- next := timeSleepUntil()
+ next, _ := timeSleepUntil()
if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
lock(&sched.lock)
if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
@@ -4493,7 +4494,7 @@ func sysmon() {
osRelax(false)
}
now = nanotime()
- next = timeSleepUntil()
+ next, _ = timeSleepUntil()
lock(&sched.lock)
atomic.Store(&sched.sysmonwait, 0)
noteclear(&sched.sysmonnote)
diff --git a/src/runtime/race.go b/src/runtime/race.go
index 52c9bd8201..53910f991c 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -403,6 +403,9 @@ func racefini() {
// already held it's assumed that the first caller exits the program
// so other calls can hang forever without an issue.
lock(&raceFiniLock)
+ // We're entering external code that may call ExitProcess on
+ // Windows.
+ osPreemptExtEnter(getg().m)
racecall(&__tsan_fini, 0, 0, 0, 0)
}
diff --git a/src/runtime/race/race.go b/src/runtime/race/race.go
index d298e805cf..c894de5f72 100644
--- a/src/runtime/race/race.go
+++ b/src/runtime/race/race.go
@@ -7,7 +7,7 @@
package race
// This file merely ensures that we link in runtime/cgo in race build,
-// this is turn ensures that runtime uses pthread_create to create threads.
+// this in turn ensures that runtime uses pthread_create to create threads.
// The prebuilt race runtime lives in race_GOOS_GOARCH.syso.
// Calls to the runtime are done directly from src/runtime/race.go.
diff --git a/src/runtime/rt0_linux_riscv64.s b/src/runtime/rt0_linux_riscv64.s
new file mode 100644
index 0000000000..f31f7f75e5
--- /dev/null
+++ b/src/runtime/rt0_linux_riscv64.s
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT _rt0_riscv64_linux(SB),NOSPLIT|NOFRAME,$0
+ MOV 0(X2), A0 // argc
+ ADD $8, X2, A1 // argv
+ JMP main(SB)
+
+TEXT main(SB),NOSPLIT|NOFRAME,$0
+ MOV $runtime·rt0_go(SB), T0
+ JALR ZERO, T0
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 180dd7c7e4..88a99fc08b 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -312,6 +312,7 @@ var debug struct {
madvdontneed int32 // for Linux; issue 28466
sbrk int32
scavenge int32
+ scavtrace int32
scheddetail int32
schedtrace int32
tracebackancestors int32
@@ -332,6 +333,7 @@ var dbgvars = []dbgVar{
{"madvdontneed", &debug.madvdontneed},
{"sbrk", &debug.sbrk},
{"scavenge", &debug.scavenge},
+ {"scavtrace", &debug.scavtrace},
{"scheddetail", &debug.scheddetail},
{"schedtrace", &debug.schedtrace},
{"tracebackancestors", &debug.tracebackancestors},
@@ -486,7 +488,7 @@ func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
}
-// reflect_resolveTextOff resolves an function pointer offset from a base type.
+// reflect_resolveTextOff resolves a function pointer offset from a base type.
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return (*_type)(rtype).textOff(textOff(off))
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 3f9e51c528..99eb19eb0c 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -613,6 +613,11 @@ type p struct {
_ uint32 // Alignment for atomic fields below
+ // The when field of the first entry on the timer heap.
+ // This is updated using atomic functions.
+ // This is 0 if the timer heap is empty.
+ timer0When uint64
+
// Per-P GC state
gcAssistTime int64 // Nanoseconds in assistAlloc
gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
@@ -644,12 +649,20 @@ type p struct {
// Must hold timersLock to access.
timers []*timer
+ // Number of timers in P's heap.
+ // Modified using atomic instructions.
+ numTimers uint32
+
// Number of timerModifiedEarlier timers on P's heap.
// This should only be modified while holding timersLock,
// or while the timer status is in a transient state
// such as timerModifying.
adjustTimers uint32
+ // Number of timerDeleted timers in P's heap.
+ // Modified using atomic instructions.
+ deletedTimers uint32
+
// Race context used while executing timer functions.
timerRaceCtx uintptr
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index 7bbf871caa..9bfd4f96d5 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -199,9 +199,9 @@ func semrelease1(addr *uint32, handoff bool, skipframes int) {
// the waiter G immediately.
// Note that waiter inherits our time slice: this is desirable
// to avoid having a highly contended semaphore hog the P
- // indefinitely. goyield is like Gosched, but it does not emit a
- // GoSched trace event and, more importantly, puts the current G
- // on the local runq instead of the global one.
+ // indefinitely. goyield is like Gosched, but it emits a
+ // "preempted" trace event instead and, more importantly, puts
+ // the current G on the local runq instead of the global one.
// We only do this in the starving regime (handoff=true), as in
// the non-starving case it is possible for a different waiter
// to acquire the semaphore while we are yielding/scheduling,
diff --git a/src/runtime/signal_linux_riscv64.go b/src/runtime/signal_linux_riscv64.go
new file mode 100644
index 0000000000..9f68e5c548
--- /dev/null
+++ b/src/runtime/signal_linux_riscv64.go
@@ -0,0 +1,68 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+
+func (c *sigctxt) ra() uint64 { return c.regs().sc_regs.ra }
+func (c *sigctxt) sp() uint64 { return c.regs().sc_regs.sp }
+func (c *sigctxt) gp() uint64 { return c.regs().sc_regs.gp }
+func (c *sigctxt) tp() uint64 { return c.regs().sc_regs.tp }
+func (c *sigctxt) t0() uint64 { return c.regs().sc_regs.t0 }
+func (c *sigctxt) t1() uint64 { return c.regs().sc_regs.t1 }
+func (c *sigctxt) t2() uint64 { return c.regs().sc_regs.t2 }
+func (c *sigctxt) s0() uint64 { return c.regs().sc_regs.s0 }
+func (c *sigctxt) s1() uint64 { return c.regs().sc_regs.s1 }
+func (c *sigctxt) a0() uint64 { return c.regs().sc_regs.a0 }
+func (c *sigctxt) a1() uint64 { return c.regs().sc_regs.a1 }
+func (c *sigctxt) a2() uint64 { return c.regs().sc_regs.a2 }
+func (c *sigctxt) a3() uint64 { return c.regs().sc_regs.a3 }
+func (c *sigctxt) a4() uint64 { return c.regs().sc_regs.a4 }
+func (c *sigctxt) a5() uint64 { return c.regs().sc_regs.a5 }
+func (c *sigctxt) a6() uint64 { return c.regs().sc_regs.a6 }
+func (c *sigctxt) a7() uint64 { return c.regs().sc_regs.a7 }
+func (c *sigctxt) s2() uint64 { return c.regs().sc_regs.s2 }
+func (c *sigctxt) s3() uint64 { return c.regs().sc_regs.s3 }
+func (c *sigctxt) s4() uint64 { return c.regs().sc_regs.s4 }
+func (c *sigctxt) s5() uint64 { return c.regs().sc_regs.s5 }
+func (c *sigctxt) s6() uint64 { return c.regs().sc_regs.s6 }
+func (c *sigctxt) s7() uint64 { return c.regs().sc_regs.s7 }
+func (c *sigctxt) s8() uint64 { return c.regs().sc_regs.s8 }
+func (c *sigctxt) s9() uint64 { return c.regs().sc_regs.s9 }
+func (c *sigctxt) s10() uint64 { return c.regs().sc_regs.s10 }
+func (c *sigctxt) s11() uint64 { return c.regs().sc_regs.s11 }
+func (c *sigctxt) t3() uint64 { return c.regs().sc_regs.t3 }
+func (c *sigctxt) t4() uint64 { return c.regs().sc_regs.t4 }
+func (c *sigctxt) t5() uint64 { return c.regs().sc_regs.t5 }
+func (c *sigctxt) t6() uint64 { return c.regs().sc_regs.t6 }
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) pc() uint64 { return c.regs().sc_regs.pc }
+
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) set_pc(x uint64) { c.regs().sc_regs.pc = x }
+func (c *sigctxt) set_ra(x uint64) { c.regs().sc_regs.ra = x }
+func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs.sp = x }
+func (c *sigctxt) set_gp(x uint64) { c.regs().sc_regs.gp = x }
+
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+}
diff --git a/src/runtime/signal_riscv64.go b/src/runtime/signal_riscv64.go
new file mode 100644
index 0000000000..cd0c393886
--- /dev/null
+++ b/src/runtime/signal_riscv64.go
@@ -0,0 +1,85 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,riscv64
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+func dumpregs(c *sigctxt) {
+ print("ra ", hex(c.ra()), "\t")
+ print("sp ", hex(c.sp()), "\n")
+ print("gp ", hex(c.gp()), "\t")
+ print("tp ", hex(c.tp()), "\n")
+ print("t0 ", hex(c.t0()), "\t")
+ print("t1 ", hex(c.t1()), "\n")
+ print("t2 ", hex(c.t2()), "\t")
+ print("s0 ", hex(c.s0()), "\n")
+ print("s1 ", hex(c.s1()), "\t")
+ print("a0 ", hex(c.a0()), "\n")
+ print("a1 ", hex(c.a1()), "\t")
+ print("a2 ", hex(c.a2()), "\n")
+ print("a3 ", hex(c.a3()), "\t")
+ print("a4 ", hex(c.a4()), "\n")
+ print("a5 ", hex(c.a5()), "\t")
+ print("a6 ", hex(c.a6()), "\n")
+ print("a7 ", hex(c.a7()), "\t")
+ print("s2 ", hex(c.s2()), "\n")
+ print("s3 ", hex(c.s3()), "\t")
+ print("s4 ", hex(c.s4()), "\n")
+ print("s5 ", hex(c.s5()), "\t")
+ print("s6 ", hex(c.s6()), "\n")
+ print("s7 ", hex(c.s7()), "\t")
+ print("s8 ", hex(c.s8()), "\n")
+ print("s9 ", hex(c.s9()), "\t")
+ print("s10 ", hex(c.s10()), "\n")
+ print("s11 ", hex(c.s11()), "\t")
+ print("t3 ", hex(c.t3()), "\n")
+ print("t4 ", hex(c.t4()), "\t")
+ print("t5 ", hex(c.t5()), "\n")
+ print("t6 ", hex(c.t6()), "\t")
+ print("pc ", hex(c.pc()), "\n")
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) sigpc() uintptr { return uintptr(c.pc()) }
+
+func (c *sigctxt) sigsp() uintptr { return uintptr(c.sp()) }
+func (c *sigctxt) siglr() uintptr { return uintptr(c.ra()) }
+func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) }
+
+// preparePanic sets up the stack to look like a call to sigpanic.
+func (c *sigctxt) preparePanic(sig uint32, gp *g) {
+ // We arrange RA, and pc to pretend the panicking
+ // function calls sigpanic directly.
+ // Always save RA to stack so that panics in leaf
+ // functions are correctly handled. This smashes
+ // the stack frame but we're not going back there
+ // anyway.
+ sp := c.sp() - sys.PtrSize
+ c.set_sp(sp)
+ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
+
+ pc := gp.sigpc
+
+ if shouldPushSigpanic(gp, pc, uintptr(c.ra())) {
+ // Make it look the like faulting PC called sigpanic.
+ c.set_ra(uint64(pc))
+ }
+
+ // In case we are panicking from external C code
+ c.set_gp(uint64(uintptr(unsafe.Pointer(gp))))
+ c.set_pc(uint64(funcPC(sigpanic)))
+}
+
+const pushCallSupported = false
+
+func (c *sigctxt) pushCall(targetPC uintptr) {
+ throw("unimplemented")
+}
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 3b2c06b39c..d123276d3e 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -129,7 +129,14 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// make the trace look like a call to runtime·sigpanic instead.
// (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
- if r.ip() != 0 {
+ // Also don't push a sigpanic frame if the faulting PC
+ // is the entry of asyncPreempt. In this case, we suspended
+ // the thread right between the fault and the exception handler
+ // starting to run, and we have pushed an asyncPreempt call.
+ // The exception is not from asyncPreempt, so not to push a
+ // sigpanic call to make it look like that. Instead, just
+ // overwrite the PC. (See issue #35773)
+ if r.ip() != 0 && r.ip() != funcPC(asyncPreempt) {
sp := unsafe.Pointer(r.sp())
sp = add(sp, ^(unsafe.Sizeof(uintptr(0)) - 1)) // sp--
r.set_sp(uintptr(sp))
diff --git a/src/runtime/string.go b/src/runtime/string.go
index d198f73756..184245b105 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -495,3 +495,37 @@ func gostringw(strw *uint16) string {
b[n2] = 0 // for luck
return s[:n2]
}
+
+// parseRelease parses a dot-separated version number. It follows the
+// semver syntax, but allows the minor and patch versions to be
+// elided.
+func parseRelease(rel string) (major, minor, patch int, ok bool) {
+ // Strip anything after a dash or plus.
+ for i := 0; i < len(rel); i++ {
+ if rel[i] == '-' || rel[i] == '+' {
+ rel = rel[:i]
+ break
+ }
+ }
+
+ next := func() (int, bool) {
+ for i := 0; i < len(rel); i++ {
+ if rel[i] == '.' {
+ ver, ok := atoi(rel[:i])
+ rel = rel[i+1:]
+ return ver, ok
+ }
+ }
+ ver, ok := atoi(rel)
+ rel = ""
+ return ver, ok
+ }
+ if major, ok = next(); !ok || rel == "" {
+ return
+ }
+ if minor, ok = next(); !ok || rel == "" {
+ return
+ }
+ patch, ok = next()
+ return
+}
diff --git a/src/runtime/string_test.go b/src/runtime/string_test.go
index a1716fa32f..80c5fa6406 100644
--- a/src/runtime/string_test.go
+++ b/src/runtime/string_test.go
@@ -454,3 +454,34 @@ func TestAtoi32(t *testing.T) {
}
}
}
+
+type parseReleaseTest struct {
+ in string
+ major, minor, patch int
+}
+
+var parseReleaseTests = []parseReleaseTest{
+ {"", -1, -1, -1},
+ {"x", -1, -1, -1},
+ {"5", 5, 0, 0},
+ {"5.12", 5, 12, 0},
+ {"5.12-x", 5, 12, 0},
+ {"5.12.1", 5, 12, 1},
+ {"5.12.1-x", 5, 12, 1},
+ {"5.12.1.0", 5, 12, 1},
+ {"5.20496382327982653440", -1, -1, -1},
+}
+
+func TestParseRelease(t *testing.T) {
+ for _, test := range parseReleaseTests {
+ major, minor, patch, ok := runtime.ParseRelease(test.in)
+ if !ok {
+ major, minor, patch = -1, -1, -1
+ }
+ if test.major != major || test.minor != minor || test.patch != patch {
+ t.Errorf("parseRelease(%q) = (%v, %v, %v) want (%v, %v, %v)",
+ test.in, major, minor, patch,
+ test.major, test.minor, test.patch)
+ }
+ }
+}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index a58f267e7f..b8d4d6b30a 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -83,7 +83,17 @@ func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
}
// memmove copies n bytes from "from" to "to".
-// in memmove_*.s
+//
+// memmove ensures that any pointer in "from" is written to "to" with
+// an indivisible write, so that racy reads cannot observe a
+// half-written pointer. This is necessary to prevent the garbage
+// collector from observing invalid pointers, and differs from memmove
+// in unmanaged languages. However, memmove is only required to do
+// this if "from" and "to" may contain pointers, which can only be the
+// case if "from", "to", and "n" are all be word-aligned.
+//
+// Implementations are in memmove_*.s.
+//
//go:noescape
func memmove(to, from unsafe.Pointer, n uintptr)
diff --git a/src/runtime/sys_freebsd_arm64.s b/src/runtime/sys_freebsd_arm64.s
index e0ef2f679d..2330f2ffe2 100644
--- a/src/runtime/sys_freebsd_arm64.s
+++ b/src/runtime/sys_freebsd_arm64.s
@@ -129,7 +129,7 @@ ok:
// func pipe() (r, w int32, errno int32)
TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- ADD $8, RSP, R0
+ MOVD $r+0(FP), R0
MOVW $0, R1
MOVD $SYS_pipe2, R8
SVC
@@ -141,7 +141,7 @@ ok:
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
- ADD $16, RSP, R0
+ MOVD $r+8(FP), R0
MOVW flags+0(FP), R1
MOVD $SYS_pipe2, R8
SVC
@@ -506,38 +506,33 @@ TEXT runtime·getCntxct(SB),NOSPLIT,$0
CMP $0, R0
BEQ 3(PC)
- // get CNTPCT (Physical Count Register) into x0
- // mrs x0, cntpct_el0 = d53be020
- WORD $0xd53be020 // SIGILL
+ // get CNTPCT (Physical Count Register) into R0
+ MRS CNTPCT_EL0, R0 // SIGILL
B 2(PC)
- // get CNTVCT (Virtual Count Register) into x0
- // mrs x0, cntvct_el0 = d53be040
- WORD $0xd53be040
+ // get CNTVCT (Virtual Count Register) into R0
+ MRS CNTVCT_EL0, R0
MOVW R0, ret+8(FP)
RET
// func getisar0() uint64
TEXT runtime·getisar0(SB),NOSPLIT,$0
- // get Instruction Set Attributes 0 into x0
- // mrs x0, ID_AA64ISAR0_EL1 = d5380600
- WORD $0xd5380600
+ // get Instruction Set Attributes 0 into R0
+ MRS ID_AA64ISAR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getisar1() uint64
TEXT runtime·getisar1(SB),NOSPLIT,$0
- // get Instruction Set Attributes 1 into x0
- // mrs x0, ID_AA64ISAR1_EL1 = d5380620
- WORD $0xd5380620
+ // get Instruction Set Attributes 1 into R0
+ MRS ID_AA64ISAR1_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT runtime·getpfr0(SB),NOSPLIT,$0
- // get Processor Feature Register 0 into x0
- // mrs x0, ID_AA64PFR0_EL1 = d5380400
- WORD $0xd5380400
+ // get Processor Feature Register 0 into R0
+ MRS ID_AA64PFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s
index 373d9d3bc2..8e05acf894 100644
--- a/src/runtime/sys_linux_386.s
+++ b/src/runtime/sys_linux_386.s
@@ -39,6 +39,8 @@
#define SYS_socketcall 102
#define SYS_setittimer 104
#define SYS_clone 120
+#define SYS_uname 122
+#define SYS_mlock 150
#define SYS_sched_yield 158
#define SYS_nanosleep 162
#define SYS_rt_sigreturn 173
@@ -776,3 +778,20 @@ TEXT runtime·sbrk0(SB),NOSPLIT,$0-4
INVOKE_SYSCALL
MOVL AX, ret+0(FP)
RET
+
+// func uname(utsname *new_utsname) int
+TEXT ·uname(SB),NOSPLIT,$0-8
+ MOVL $SYS_uname, AX
+ MOVL utsname+0(FP), BX
+ INVOKE_SYSCALL
+ MOVL AX, ret+4(FP)
+ RET
+
+// func mlock(addr, len uintptr) int
+TEXT ·mlock(SB),NOSPLIT,$0-12
+ MOVL $SYS_mlock, AX
+ MOVL addr+0(FP), BX
+ MOVL len+4(FP), CX
+ INVOKE_SYSCALL
+ MOVL AX, ret+8(FP)
+ RET
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index d16060f6fa..9493101460 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -33,8 +33,10 @@
#define SYS_clone 56
#define SYS_exit 60
#define SYS_kill 62
+#define SYS_uname 63
#define SYS_fcntl 72
#define SYS_sigaltstack 131
+#define SYS_mlock 149
#define SYS_arch_prctl 158
#define SYS_gettid 186
#define SYS_futex 202
@@ -203,7 +205,8 @@ TEXT runtime·mincore(SB),NOSPLIT,$0-28
RET
// func walltime1() (sec int64, nsec int32)
-TEXT runtime·walltime1(SB),NOSPLIT,$0-12
+// non-zero frame-size means bp is saved and restored
+TEXT runtime·walltime1(SB),NOSPLIT,$8-12
// We don't know how much stack space the VDSO code will need,
// so switch to g0.
// In particular, a kernel configured with CONFIG_OPTIMIZE_INLINING=n
@@ -260,7 +263,9 @@ fallback:
MOVL DX, nsec+8(FP)
RET
-TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
+// func nanotime1() int64
+// non-zero frame-size means bp is saved and restored
+TEXT runtime·nanotime1(SB),NOSPLIT,$8-8
// Switch to g0 stack. See comment above in runtime·walltime.
MOVQ SP, BP // Save old SP; BP unchanged by C code.
@@ -764,3 +769,20 @@ TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
SYSCALL
MOVQ AX, ret+0(FP)
RET
+
+// func uname(utsname *new_utsname) int
+TEXT ·uname(SB),NOSPLIT,$0-16
+ MOVQ utsname+0(FP), DI
+ MOVL $SYS_uname, AX
+ SYSCALL
+ MOVQ AX, ret+8(FP)
+ RET
+
+// func mlock(addr, len uintptr) int
+TEXT ·mlock(SB),NOSPLIT,$0-24
+ MOVQ addr+0(FP), DI
+ MOVQ len+8(FP), SI
+ MOVL $SYS_mlock, AX
+ SYSCALL
+ MOVQ AX, ret+16(FP)
+ RET
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index 8908b1bf23..e103da56dc 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -269,8 +269,8 @@ noswitch:
MOVW $CLOCK_REALTIME, R0
MOVW $8(R13), R1 // timespec
- MOVW runtime·vdsoClockgettimeSym(SB), R11
- CMP $0, R11
+ MOVW runtime·vdsoClockgettimeSym(SB), R2
+ CMP $0, R2
B.EQ fallback
// Store g on gsignal's stack, so if we receive a signal
@@ -292,7 +292,7 @@ noswitch:
MOVW (g_stack+stack_lo)(R6), R6 // g.m.gsignal.stack.lo
MOVW g, (R6)
- BL (R11)
+ BL (R2)
MOVW $0, R1
MOVW R1, (R6) // clear g slot, R6 is unchanged by C code
@@ -300,7 +300,7 @@ noswitch:
JMP finish
nosaveg:
- BL (R11)
+ BL (R2)
JMP finish
fallback:
@@ -347,8 +347,8 @@ noswitch:
MOVW $CLOCK_MONOTONIC, R0
MOVW $8(R13), R1 // timespec
- MOVW runtime·vdsoClockgettimeSym(SB), R11
- CMP $0, R11
+ MOVW runtime·vdsoClockgettimeSym(SB), R2
+ CMP $0, R2
B.EQ fallback
// Store g on gsignal's stack, so if we receive a signal
@@ -370,7 +370,7 @@ noswitch:
MOVW (g_stack+stack_lo)(R6), R6 // g.m.gsignal.stack.lo
MOVW g, (R6)
- BL (R11)
+ BL (R2)
MOVW $0, R1
MOVW R1, (R6) // clear g slot, R6 is unchanged by C code
@@ -378,7 +378,7 @@ noswitch:
JMP finish
nosaveg:
- BL (R11)
+ BL (R2)
JMP finish
fallback:
diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s
index 8a0f06f206..b9588cec30 100644
--- a/src/runtime/sys_linux_arm64.s
+++ b/src/runtime/sys_linux_arm64.s
@@ -112,7 +112,7 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
// func pipe() (r, w int32, errno int32)
TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- ADD $8, RSP, R0
+ MOVD $r+0(FP), R0
MOVW $0, R1
MOVW $SYS_pipe2, R8
SVC
@@ -121,7 +121,7 @@ TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
- ADD $16, RSP, R0
+ MOVD $r+8(FP), R0
MOVW flags+0(FP), R1
MOVW $SYS_pipe2, R8
SVC
@@ -688,7 +688,7 @@ TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
MOVD $SYS_fcntl, R8
SVC
MOVD $0x800, R2 // O_NONBLOCK
- EOR R0, R2
+ ORR R0, R2
MOVW fd+0(FP), R0 // fd
MOVD $4, R1 // F_SETFL
MOVD $SYS_fcntl, R8
diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s
index de14418338..8629fe3233 100644
--- a/src/runtime/sys_linux_ppc64x.s
+++ b/src/runtime/sys_linux_ppc64x.s
@@ -280,7 +280,7 @@ fallback:
ADD $32, R1, R4
SYSCALL $SYS_clock_gettime
MOVD 32(R1), R3
- MOVD 48(R1), R5
+ MOVD 40(R1), R5
JMP finish
TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28
diff --git a/src/runtime/sys_linux_riscv64.s b/src/runtime/sys_linux_riscv64.s
new file mode 100644
index 0000000000..9db8e3d068
--- /dev/null
+++ b/src/runtime/sys_linux_riscv64.s
@@ -0,0 +1,517 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for riscv64, Linux
+//
+
+#include "textflag.h"
+#include "go_asm.h"
+
+#define AT_FDCWD -100
+
+#define SYS_brk 214
+#define SYS_clock_gettime 113
+#define SYS_clone 220
+#define SYS_close 57
+#define SYS_connect 203
+#define SYS_epoll_create1 20
+#define SYS_epoll_ctl 21
+#define SYS_epoll_pwait 22
+#define SYS_exit 93
+#define SYS_exit_group 94
+#define SYS_faccessat 48
+#define SYS_fcntl 25
+#define SYS_futex 98
+#define SYS_getpid 172
+#define SYS_getrlimit 163
+#define SYS_gettid 178
+#define SYS_gettimeofday 169
+#define SYS_kill 129
+#define SYS_madvise 233
+#define SYS_mincore 232
+#define SYS_mmap 222
+#define SYS_munmap 215
+#define SYS_nanosleep 101
+#define SYS_openat 56
+#define SYS_pipe2 59
+#define SYS_pselect6 72
+#define SYS_read 63
+#define SYS_rt_sigaction 134
+#define SYS_rt_sigprocmask 135
+#define SYS_rt_sigreturn 139
+#define SYS_sched_getaffinity 123
+#define SYS_sched_yield 124
+#define SYS_setitimer 103
+#define SYS_sigaltstack 132
+#define SYS_socket 198
+#define SYS_tgkill 131
+#define SYS_tkill 130
+#define SYS_write 64
+
+#define FENCE WORD $0x0ff0000f
+
+// func exit(code int32)
+TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
+ MOVW code+0(FP), A0
+ MOV $SYS_exit_group, A7
+ ECALL
+ RET
+
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
+ MOV wait+0(FP), A0
+ // We're done using the stack.
+ FENCE
+ MOVW ZERO, (A0)
+ FENCE
+ MOV $0, A0 // exit code
+ MOV $SYS_exit, A7
+ ECALL
+ JMP 0(PC)
+
+// func open(name *byte, mode, perm int32) int32
+TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
+ MOV $AT_FDCWD, A0
+ MOV name+0(FP), A1
+ MOVW mode+8(FP), A2
+ MOVW perm+12(FP), A3
+ MOV $SYS_openat, A7
+ ECALL
+ MOV $-4096, T0
+ BGEU T0, A0, 2(PC)
+ MOV $-1, A0
+ MOVW A0, ret+16(FP)
+ RET
+
+// func closefd(fd int32) int32
+TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12
+ MOVW fd+0(FP), A0
+ MOV $SYS_close, A7
+ ECALL
+ MOV $-4096, T0
+ BGEU T0, A0, 2(PC)
+ MOV $-1, A0
+ MOVW A0, ret+8(FP)
+ RET
+
+// func write1(fd uintptr, p unsafe.Pointer, n int32) int32
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
+ MOV fd+0(FP), A0
+ MOV p+8(FP), A1
+ MOVW n+16(FP), A2
+ MOV $SYS_write, A7
+ ECALL
+ MOVW A0, ret+24(FP)
+ RET
+
+// func read(fd int32, p unsafe.Pointer, n int32) int32
+TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW fd+0(FP), A0
+ MOV p+8(FP), A1
+ MOVW n+16(FP), A2
+ MOV $SYS_read, A7
+ ECALL
+ MOVW A0, ret+24(FP)
+ RET
+
+// func pipe() (r, w int32, errno int32)
+TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
+ MOV $r+0(FP), A0
+ MOV ZERO, A1
+ MOV $SYS_pipe2, A7
+ ECALL
+ MOVW A0, errno+8(FP)
+ RET
+
+// func pipe2(flags int32) (r, w int32, errno int32)
+TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
+ MOV $r+8(FP), A0
+ MOVW flags+0(FP), A1
+ MOV $SYS_pipe2, A7
+ ECALL
+ MOVW A0, errno+16(FP)
+ RET
+
+// func getrlimit(kind int32, limit unsafe.Pointer) int32
+TEXT runtime·getrlimit(SB),NOSPLIT|NOFRAME,$0-20
+ MOVW kind+0(FP), A0
+ MOV limit+8(FP), A1
+ MOV $SYS_getrlimit, A7
+ ECALL
+ MOVW A0, ret+16(FP)
+ RET
+
+// func usleep(usec uint32)
+TEXT runtime·usleep(SB),NOSPLIT,$24-4
+ MOVWU usec+0(FP), A0
+ MOV $1000, A1
+ MUL A1, A0, A0
+ MOV $1000000000, A1
+ DIV A1, A0, A2
+ MOV A2, 8(X2)
+ REM A1, A0, A3
+ MOV A3, 16(X2)
+ ADD $8, X2, A0
+ MOV ZERO, A1
+ MOV $SYS_nanosleep, A7
+ ECALL
+ RET
+
+// func gettid() uint32
+TEXT runtime·gettid(SB),NOSPLIT,$0-4
+ MOV $SYS_gettid, A7
+ ECALL
+ MOVW A0, ret+0(FP)
+ RET
+
+// func raise(sig uint32)
+TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0
+ MOV $SYS_gettid, A7
+ ECALL
+ // arg 1 tid - already in A0
+ MOVW sig+0(FP), A1 // arg 2
+ MOV $SYS_tkill, A7
+ ECALL
+ RET
+
+// func raiseproc(sig uint32)
+TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
+ MOV $SYS_getpid, A7
+ ECALL
+ // arg 1 pid - already in A0
+ MOVW sig+0(FP), A1 // arg 2
+ MOV $SYS_kill, A7
+ ECALL
+ RET
+
+// func getpid() int
+TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8
+ MOV $SYS_getpid, A7
+ ECALL
+ MOV A0, ret+0(FP)
+ RET
+
+// func tgkill(tgid, tid, sig int)
+TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24
+ MOV tgid+0(FP), A0
+ MOV tid+8(FP), A1
+ MOV sig+16(FP), A2
+ MOV $SYS_tgkill, A7
+ ECALL
+ RET
+
+// func setitimer(mode int32, new, old *itimerval)
+TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
+ MOVW mode+0(FP), A0
+ MOV new+8(FP), A1
+ MOV old+16(FP), A2
+ MOV $SYS_setitimer, A7
+ ECALL
+ RET
+
+// func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
+TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28
+ MOV addr+0(FP), A0
+ MOV n+8(FP), A1
+ MOV dst+16(FP), A2
+ MOV $SYS_mincore, A7
+ ECALL
+ MOVW A0, ret+24(FP)
+ RET
+
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$24-12
+ MOV $0, A0 // CLOCK_REALTIME
+ MOV $8(X2), A1
+ MOV $SYS_clock_gettime, A7
+ ECALL
+ MOV 8(X2), T0 // sec
+ MOV 16(X2), T1 // nsec
+ MOV T0, sec+0(FP)
+ MOVW T1, nsec+8(FP)
+ RET
+
+// func nanotime1() int64
+TEXT runtime·nanotime1(SB),NOSPLIT,$24-8
+ MOV $1, A0 // CLOCK_MONOTONIC
+ MOV $8(X2), A1
+ MOV $SYS_clock_gettime, A7
+ ECALL
+ MOV 8(X2), T0 // sec
+ MOV 16(X2), T1 // nsec
+ // sec is in T0, nsec in T1
+ // return nsec in T0
+ MOV $1000000000, T2
+ MUL T2, T0
+ ADD T1, T0
+ MOV T0, ret+0(FP)
+ RET
+
+// func rtsigprocmask(how int32, new, old *sigset, size int32)
+TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW how+0(FP), A0
+ MOV new+8(FP), A1
+ MOV old+16(FP), A2
+ MOVW size+24(FP), A3
+ MOV $SYS_rt_sigprocmask, A7
+ ECALL
+ MOV $-4096, T0
+ BLTU A0, T0, 2(PC)
+ WORD $0 // crash
+ RET
+
+// func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
+TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36
+ MOV sig+0(FP), A0
+ MOV new+8(FP), A1
+ MOV old+16(FP), A2
+ MOV size+24(FP), A3
+ MOV $SYS_rt_sigaction, A7
+ ECALL
+ MOVW A0, ret+32(FP)
+ RET
+
+// func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+ MOVW sig+8(FP), A0
+ MOV info+16(FP), A1
+ MOV ctx+24(FP), A2
+ MOV fn+0(FP), T1
+ JALR RA, T1
+ RET
+
+// func sigtramp(signo, ureg, ctxt unsafe.Pointer)
+TEXT runtime·sigtramp(SB),NOSPLIT,$64
+ MOVW A0, 8(X2)
+ MOV A1, 16(X2)
+ MOV A2, 24(X2)
+
+ // this might be called in external code context,
+ // where g is not set.
+ MOVBU runtime·iscgo(SB), A0
+ BEQ A0, ZERO, 2(PC)
+ CALL runtime·load_g(SB)
+
+ MOV $runtime·sigtrampgo(SB), A0
+ JALR RA, A0
+ RET
+
+// func cgoSigtramp()
+TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
+ MOV $runtime·sigtramp(SB), T1
+ JALR ZERO, T1
+
+// func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
+TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0
+ MOV addr+0(FP), A0
+ MOV n+8(FP), A1
+ MOVW prot+16(FP), A2
+ MOVW flags+20(FP), A3
+ MOVW fd+24(FP), A4
+ MOVW off+28(FP), A5
+ MOV $SYS_mmap, A7
+ ECALL
+ MOV $-4096, T0
+ BGEU T0, A0, 5(PC)
+ SUB A0, ZERO, A0
+ MOV ZERO, p+32(FP)
+ MOV A0, err+40(FP)
+ RET
+ok:
+ MOV A0, p+32(FP)
+ MOV ZERO, err+40(FP)
+ RET
+
+// func munmap(addr unsafe.Pointer, n uintptr)
+TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0
+ MOV addr+0(FP), A0
+ MOV n+8(FP), A1
+ MOV $SYS_munmap, A7
+ ECALL
+ MOV $-4096, T0
+ BLTU A0, T0, 2(PC)
+ WORD $0 // crash
+ RET
+
+// func madvise(addr unsafe.Pointer, n uintptr, flags int32)
+TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0
+ MOV addr+0(FP), A0
+ MOV n+8(FP), A1
+ MOVW flags+16(FP), A2
+ MOV $SYS_madvise, A7
+ ECALL
+ MOVW A0, ret+24(FP)
+ RET
+
+// func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
+TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0
+ MOV addr+0(FP), A0
+ MOVW op+8(FP), A1
+ MOVW val+12(FP), A2
+ MOV ts+16(FP), A3
+ MOV addr2+24(FP), A4
+ MOVW val3+32(FP), A5
+ MOV $SYS_futex, A7
+ ECALL
+ MOVW A0, ret+40(FP)
+ RET
+
+// func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
+TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0
+ MOVW flags+0(FP), A0
+ MOV stk+8(FP), A1
+
+ // Copy mp, gp, fn off parent stack for use by child.
+ MOV mp+16(FP), T0
+ MOV gp+24(FP), T1
+ MOV fn+32(FP), T2
+
+ MOV T0, -8(A1)
+ MOV T1, -16(A1)
+ MOV T2, -24(A1)
+ MOV $1234, T0
+ MOV T0, -32(A1)
+
+ MOV $SYS_clone, A7
+ ECALL
+
+ // In parent, return.
+ BEQ ZERO, A0, child
+ MOVW ZERO, ret+40(FP)
+ RET
+
+child:
+ // In child, on new stack.
+ MOV -32(X2), T0
+ MOV $1234, A0
+ BEQ A0, T0, good
+ WORD $0 // crash
+
+good:
+ // Initialize m->procid to Linux tid
+ MOV $SYS_gettid, A7
+ ECALL
+
+ MOV -24(X2), T2 // fn
+ MOV -16(X2), T1 // g
+ MOV -8(X2), T0 // m
+
+ BEQ ZERO, T0, nog
+ BEQ ZERO, T1, nog
+
+ MOV A0, m_procid(T0)
+
+ // In child, set up new stack
+ MOV T0, g_m(T1)
+ MOV T1, g
+
+nog:
+ // Call fn
+ JALR RA, T2
+
+ // It shouldn't return. If it does, exit this thread.
+ MOV $111, A0
+ MOV $SYS_exit, A7
+ ECALL
+ JMP -3(PC) // keep exiting
+
+// func sigaltstack(new, old *stackt)
+TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0
+ MOV new+0(FP), A0
+ MOV old+8(FP), A1
+ MOV $SYS_sigaltstack, A7
+ ECALL
+ MOV $-4096, T0
+ BLTU A0, T0, 2(PC)
+ WORD $0 // crash
+ RET
+
+// func osyield()
+TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0
+ MOV $SYS_sched_yield, A7
+ ECALL
+ RET
+
+// func sched_getaffinity(pid, len uintptr, buf *uintptr) int32
+TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0
+ MOV pid+0(FP), A0
+ MOV len+8(FP), A1
+ MOV buf+16(FP), A2
+ MOV $SYS_sched_getaffinity, A7
+ ECALL
+ MOV A0, ret+24(FP)
+ RET
+
+// func epollcreate(size int32) int32
+TEXT runtime·epollcreate(SB),NOSPLIT|NOFRAME,$0
+ MOV $0, A0
+ MOV $SYS_epoll_create1, A7
+ ECALL
+ MOVW A0, ret+8(FP)
+ RET
+
+// func epollcreate1(flags int32) int32
+TEXT runtime·epollcreate1(SB),NOSPLIT|NOFRAME,$0
+ MOVW flags+0(FP), A0
+ MOV $SYS_epoll_create1, A7
+ ECALL
+ MOVW A0, ret+8(FP)
+ RET
+
+// func epollctl(epfd, op, fd int32, ev *epollevent) int32
+TEXT runtime·epollctl(SB),NOSPLIT|NOFRAME,$0
+ MOVW epfd+0(FP), A0
+ MOVW op+4(FP), A1
+ MOVW fd+8(FP), A2
+ MOV ev+16(FP), A3
+ MOV $SYS_epoll_ctl, A7
+ ECALL
+ MOVW A0, ret+24(FP)
+ RET
+
+// func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
+TEXT runtime·epollwait(SB),NOSPLIT|NOFRAME,$0
+ MOVW epfd+0(FP), A0
+ MOV ev+8(FP), A1
+ MOVW nev+16(FP), A2
+ MOVW timeout+20(FP), A3
+ MOV $0, A4
+ MOV $SYS_epoll_pwait, A7
+ ECALL
+ MOVW A0, ret+24(FP)
+ RET
+
+// func closeonexec(int32)
+TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
+ MOVW fd+0(FP), A0 // fd
+ MOV $2, A1 // F_SETFD
+ MOV $1, A2 // FD_CLOEXEC
+ MOV $SYS_fcntl, A7
+ ECALL
+ RET
+
+// func runtime·setNonblock(int32 fd)
+TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
+ MOVW fd+0(FP), A0 // fd
+ MOV $3, A1 // F_GETFL
+ MOV $0, A2
+ MOV $SYS_fcntl, A7
+ ECALL
+ MOV $0x800, A2 // O_NONBLOCK
+ OR A0, A2
+ MOVW fd+0(FP), A0 // fd
+ MOV $4, A1 // F_SETFL
+ MOV $SYS_fcntl, A7
+ ECALL
+ RET
+
+// func sbrk0() uintptr
+TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
+ // Implemented as brk(NULL).
+ MOV $0, A0
+ MOV $SYS_brk, A7
+ ECALL
+ MOVW A0, ret+0(FP)
+ RET
diff --git a/src/runtime/sys_openbsd_arm64.s b/src/runtime/sys_openbsd_arm64.s
index 8e1a5bc542..839aa57062 100644
--- a/src/runtime/sys_openbsd_arm64.s
+++ b/src/runtime/sys_openbsd_arm64.s
@@ -65,7 +65,7 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
// func pipe() (r, w int32, errno int32)
TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD RSP, R0
+ MOVD $r+0(FP), R0
MOVW $0, R1
MOVD $101, R8 // sys_pipe2
SVC
@@ -76,7 +76,7 @@ TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
- ADD $8, RSP, R0
+ MOVD $r+8(FP), R0
MOVW flags+0(FP), R1
MOVD $101, R8 // sys_pipe2
SVC
@@ -86,7 +86,7 @@ TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
RET
TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
- MOVW fd+0(FP), R0 // arg 1 - fd
+ MOVD fd+0(FP), R0 // arg 1 - fd
MOVD p+8(FP), R1 // arg 2 - buf
MOVW n+16(FP), R2 // arg 3 - nbyte
MOVD $4, R8 // sys_write
@@ -428,8 +428,8 @@ TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
MOVD $0, R2 // arg 3
MOVD $92, R8 // sys_fcntl
SVC
- MOVD $0x800, R2 // O_NONBLOCK
- EOR R0, R2 // arg 3 - flags
+ MOVD $4, R2 // O_NONBLOCK
+ ORR R0, R2 // arg 3 - flags
MOVW fd+0(FP), R0 // arg 1 - fd
MOVD $4, R1 // arg 2 - cmd (F_SETFL)
MOVD $92, R8 // sys_fcntl
diff --git a/src/runtime/sys_riscv64.go b/src/runtime/sys_riscv64.go
new file mode 100644
index 0000000000..e710840819
--- /dev/null
+++ b/src/runtime/sys_riscv64.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ if buf.lr != 0 {
+ throw("invalid use of gostartcall")
+ }
+ buf.lr = buf.pc
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
+}
diff --git a/src/runtime/syscall_solaris.go b/src/runtime/syscall_solaris.go
index 76db54d274..094516927f 100644
--- a/src/runtime/syscall_solaris.go
+++ b/src/runtime/syscall_solaris.go
@@ -142,6 +142,9 @@ func syscall_forkx(flags uintptr) (pid uintptr, err uintptr) {
args: uintptr(unsafe.Pointer(&flags)),
}
asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&call))
+ if int(call.r1) != -1 {
+ call.err = 0
+ }
return call.r1, call.err
}
diff --git a/src/runtime/testdata/testprog/checkptr.go b/src/runtime/testdata/testprog/checkptr.go
new file mode 100644
index 0000000000..177db38e5a
--- /dev/null
+++ b/src/runtime/testdata/testprog/checkptr.go
@@ -0,0 +1,36 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+func init() {
+ register("CheckPtrAlignment", CheckPtrAlignment)
+ register("CheckPtrArithmetic", CheckPtrArithmetic)
+ register("CheckPtrSize", CheckPtrSize)
+ register("CheckPtrSmall", CheckPtrSmall)
+}
+
+func CheckPtrAlignment() {
+ var x [2]int64
+ p := unsafe.Pointer(&x[0])
+ sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1))
+}
+
+func CheckPtrArithmetic() {
+ var x int
+ i := uintptr(unsafe.Pointer(&x))
+ sink2 = (*int)(unsafe.Pointer(i))
+}
+
+func CheckPtrSize() {
+ p := new(int64)
+ sink2 = p
+ sink2 = (*[100]int64)(unsafe.Pointer(p))
+}
+
+func CheckPtrSmall() {
+ sink2 = unsafe.Pointer(uintptr(1))
+}
diff --git a/src/runtime/testdata/testprog/preempt.go b/src/runtime/testdata/testprog/preempt.go
index 1454095cde..1c74d0e435 100644
--- a/src/runtime/testdata/testprog/preempt.go
+++ b/src/runtime/testdata/testprog/preempt.go
@@ -34,13 +34,19 @@ func AsyncPreempt() {
// This is an especially interesting case for
// LR machines.
go func() {
- atomic.StoreUint32(&ready2, 1)
+ atomic.AddUint32(&ready2, 1)
frameless()
}()
+ // Also test empty infinite loop.
+ go func() {
+ atomic.AddUint32(&ready2, 1)
+ for {
+ }
+ }()
// Wait for the goroutine to stop passing through sync
// safe-points.
- for atomic.LoadUint32(&ready) == 0 || atomic.LoadUint32(&ready2) == 0 {
+ for atomic.LoadUint32(&ready) == 0 || atomic.LoadUint32(&ready2) < 2 {
runtime.Gosched()
}
diff --git a/src/runtime/time.go b/src/runtime/time.go
index d64bea814f..af5db4cc58 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -74,14 +74,15 @@ type timer struct {
// timerNoStatus -> timerWaiting
// anything else -> panic: invalid value
// deltimer:
-// timerWaiting -> timerDeleted
-// timerModifiedXX -> timerDeleted
-// timerNoStatus -> do nothing
-// timerDeleted -> do nothing
-// timerRemoving -> do nothing
-// timerRemoved -> do nothing
-// timerRunning -> wait until status changes
-// timerMoving -> wait until status changes
+// timerWaiting -> timerDeleted
+// timerModifiedEarlier -> timerModifying -> timerDeleted
+// timerModifiedLater -> timerDeleted
+// timerNoStatus -> do nothing
+// timerDeleted -> do nothing
+// timerRemoving -> do nothing
+// timerRemoved -> do nothing
+// timerRunning -> wait until status changes
+// timerMoving -> wait until status changes
// timerModifying -> panic: concurrent deltimer/modtimer calls
// modtimer:
// timerWaiting -> timerModifying -> timerModifiedXX
@@ -169,6 +170,10 @@ const (
// maxWhen is the maximum value for timer's when field.
const maxWhen = 1<<63 - 1
+// verifyTimers can be set to true to add debugging checks that the
+// timer heaps are valid.
+const verifyTimers = false
+
// Package time APIs.
// Godoc uses the comments in package time, not these.
@@ -284,7 +289,12 @@ func doaddtimer(pp *p, t *timer) bool {
t.pp.set(pp)
i := len(pp.timers)
pp.timers = append(pp.timers, t)
- return siftupTimer(pp.timers, i)
+ ok := siftupTimer(pp.timers, i)
+ if t == pp.timers[0] {
+ atomic.Store64(&pp.timer0When, uint64(t.when))
+ }
+ atomic.Xadd(&pp.numTimers, 1)
+ return ok
}
// deltimer deletes the timer t. It may be on some other P, so we can't
@@ -295,7 +305,9 @@ func deltimer(t *timer) bool {
for {
switch s := atomic.Load(&t.status); s {
case timerWaiting, timerModifiedLater:
+ tpp := t.pp.ptr()
if atomic.Cas(&t.status, s, timerDeleted) {
+ atomic.Xadd(&tpp.deletedTimers, 1)
// Timer was not yet run.
return true
}
@@ -306,6 +318,7 @@ func deltimer(t *timer) bool {
if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
badTimer()
}
+ atomic.Xadd(&tpp.deletedTimers, 1)
// Timer was not yet run.
return true
}
@@ -356,6 +369,10 @@ func dodeltimer(pp *p, i int) bool {
ok = false
}
}
+ if i == 0 {
+ updateTimer0When(pp)
+ }
+ atomic.Xadd(&pp.numTimers, -1)
return ok
}
@@ -379,6 +396,8 @@ func dodeltimer0(pp *p) bool {
if last > 0 {
ok = siftdownTimer(pp.timers, 0)
}
+ updateTimer0When(pp)
+ atomic.Xadd(&pp.numTimers, -1)
return ok
}
@@ -486,6 +505,7 @@ func resettimer(t *timer, when int64) {
return
}
case timerDeleted:
+ tpp := t.pp.ptr()
if atomic.Cas(&t.status, s, timerModifying) {
t.nextwhen = when
newStatus := uint32(timerModifiedLater)
@@ -496,6 +516,7 @@ func resettimer(t *timer, when int64) {
if !atomic.Cas(&t.status, timerModifying, newStatus) {
badTimer()
}
+ atomic.Xadd(&tpp.deletedTimers, -1)
if newStatus == timerModifiedEarlier {
wakeNetPoller(when)
}
@@ -543,6 +564,7 @@ func cleantimers(pp *p) bool {
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
return false
}
+ atomic.Xadd(&pp.deletedTimers, -1)
case timerModifiedEarlier, timerModifiedLater:
if !atomic.Cas(&t.status, s, timerMoving) {
continue
@@ -631,9 +653,13 @@ func adjusttimers(pp *p) {
return
}
if atomic.Load(&pp.adjustTimers) == 0 {
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
return
}
var moved []*timer
+loop:
for i := 0; i < len(pp.timers); i++ {
t := pp.timers[i]
if t.pp.ptr() != pp {
@@ -648,6 +674,7 @@ func adjusttimers(pp *p) {
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
badTimer()
}
+ atomic.Xadd(&pp.deletedTimers, -1)
// Look at this heap position again.
i--
}
@@ -665,10 +692,11 @@ func adjusttimers(pp *p) {
moved = append(moved, t)
if s == timerModifiedEarlier {
if n := atomic.Xadd(&pp.adjustTimers, -1); int32(n) <= 0 {
- addAdjustedTimers(pp, moved)
- return
+ break loop
}
}
+ // Look at this heap position again.
+ i--
}
case timerNoStatus, timerRunning, timerRemoving, timerRemoved, timerMoving:
badTimer()
@@ -686,6 +714,10 @@ func adjusttimers(pp *p) {
if len(moved) > 0 {
addAdjustedTimers(pp, moved)
}
+
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
}
// addAdjustedTimers adds any timers we adjusted in adjusttimers
@@ -709,17 +741,11 @@ func addAdjustedTimers(pp *p, moved []*timer) {
// The netpoller M will wake up and adjust timers before sleeping again.
//go:nowritebarrierrec
func nobarrierWakeTime(pp *p) int64 {
- lock(&pp.timersLock)
- ret := int64(0)
- if len(pp.timers) > 0 {
- if atomic.Load(&pp.adjustTimers) > 0 {
- ret = nanotime()
- } else {
- ret = pp.timers[0].when
- }
+ if atomic.Load(&pp.adjustTimers) > 0 {
+ return nanotime()
+ } else {
+ return int64(atomic.Load64(&pp.timer0When))
}
- unlock(&pp.timersLock)
- return ret
}
// runtimer examines the first timer in timers. If it is ready based on now,
@@ -760,6 +786,7 @@ func runtimer(pp *p, now int64) int64 {
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
badTimer()
}
+ atomic.Xadd(&pp.deletedTimers, -1)
if len(pp.timers) == 0 {
return -1
}
@@ -826,6 +853,7 @@ func runOneTimer(pp *p, t *timer, now int64) {
if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
badTimer()
}
+ updateTimer0When(pp)
} else {
// Remove from heap.
if !dodeltimer0(pp) {
@@ -857,69 +885,131 @@ func runOneTimer(pp *p, t *timer, now int64) {
}
}
-func timejump() *p {
- if faketime == 0 {
- return nil
- }
-
- // Nothing is running, so we can look at all the P's.
- // Determine a timer bucket with minimum when.
- var (
- minT *timer
- minWhen int64
- minP *p
- )
- for _, pp := range allp {
- if pp.status != _Pidle && pp.status != _Pdead {
- throw("non-idle P in timejump")
- }
- if len(pp.timers) == 0 {
- continue
- }
- c := pp.adjustTimers
- for _, t := range pp.timers {
+// clearDeletedTimers removes all deleted timers from the P's timer heap.
+// This is used to avoid clogging up the heap if the program
+// starts a lot of long-running timers and then stops them.
+// For example, this can happen via context.WithTimeout.
+//
+// This is the only function that walks through the entire timer heap,
+// other than moveTimers which only runs when the world is stopped.
+//
+// The caller must have locked the timers for pp.
+func clearDeletedTimers(pp *p) {
+ cdel := int32(0)
+ cearlier := int32(0)
+ to := 0
+ changedHeap := false
+ timers := pp.timers
+nextTimer:
+ for _, t := range timers {
+ for {
switch s := atomic.Load(&t.status); s {
case timerWaiting:
- if minT == nil || t.when < minWhen {
- minT = t
- minWhen = t.when
- minP = pp
+ if changedHeap {
+ timers[to] = t
+ siftupTimer(timers, to)
}
+ to++
+ continue nextTimer
case timerModifiedEarlier, timerModifiedLater:
- if minT == nil || t.nextwhen < minWhen {
- minT = t
- minWhen = t.nextwhen
- minP = pp
+ if atomic.Cas(&t.status, s, timerMoving) {
+ t.when = t.nextwhen
+ timers[to] = t
+ siftupTimer(timers, to)
+ to++
+ changedHeap = true
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+ if s == timerModifiedEarlier {
+ cearlier++
+ }
+ continue nextTimer
}
- if s == timerModifiedEarlier {
- c--
+ case timerDeleted:
+ if atomic.Cas(&t.status, s, timerRemoving) {
+ t.pp = 0
+ cdel++
+ if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
+ badTimer()
+ }
+ changedHeap = true
+ continue nextTimer
}
- case timerRunning, timerModifying, timerMoving:
+ case timerModifying:
+ // Loop until modification complete.
+ osyield()
+ case timerNoStatus, timerRemoved:
+ // We should not see these status values in a timer heap.
+ badTimer()
+ case timerRunning, timerRemoving, timerMoving:
+ // Some other P thinks it owns this timer,
+ // which should not happen.
+ badTimer()
+ default:
badTimer()
- }
- // The timers are sorted, so we only have to check
- // the first timer for each P, unless there are
- // some timerModifiedEarlier timers. The number
- // of timerModifiedEarlier timers is in the adjustTimers
- // field, used to initialize c, above.
- if c == 0 {
- break
}
}
}
- if minT == nil || minWhen <= faketime {
- return nil
+ // Set remaining slots in timers slice to nil,
+ // so that the timer values can be garbage collected.
+ for i := to; i < len(timers); i++ {
+ timers[i] = nil
+ }
+
+ atomic.Xadd(&pp.deletedTimers, -cdel)
+ atomic.Xadd(&pp.numTimers, -cdel)
+ atomic.Xadd(&pp.adjustTimers, -cearlier)
+
+ timers = timers[:to]
+ pp.timers = timers
+ updateTimer0When(pp)
+
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
+}
+
+// verifyTimerHeap verifies that the timer heap is in a valid state.
+// This is only for debugging, and is only called if verifyTimers is true.
+// The caller must have locked the timers.
+func verifyTimerHeap(pp *p) {
+ for i, t := range pp.timers {
+ if i == 0 {
+ // First timer has no parent.
+ continue
+ }
+
+ // The heap is 4-ary. See siftupTimer and siftdownTimer.
+ p := (i - 1) / 4
+ if t.when < pp.timers[p].when {
+ print("bad timer heap at ", i, ": ", p, ": ", pp.timers[p].when, ", ", i, ": ", t.when, "\n")
+ throw("bad timer heap")
+ }
+ }
+ if numTimers := int(atomic.Load(&pp.numTimers)); len(pp.timers) != numTimers {
+ println("timer heap len", len(pp.timers), "!= numTimers", numTimers)
+ throw("bad timer heap len")
}
+}
- faketime = minWhen
- return minP
+// updateTimer0When sets the P's timer0When field.
+// The caller must have locked the timers for pp.
+func updateTimer0When(pp *p) {
+ if len(pp.timers) == 0 {
+ atomic.Store64(&pp.timer0When, 0)
+ } else {
+ atomic.Store64(&pp.timer0When, uint64(pp.timers[0].when))
+ }
}
-// timeSleepUntil returns the time when the next timer should fire.
-// This is only called by sysmon.
-func timeSleepUntil() int64 {
+// timeSleepUntil returns the time when the next timer should fire,
+// and the P that holds the timer heap that that timer is on.
+// This is only called by sysmon and checkdead.
+func timeSleepUntil() (int64, *p) {
next := int64(maxWhen)
+ var pret *p
// Prevent allp slice changes. This is like retake.
lock(&allpLock)
@@ -930,8 +1020,17 @@ func timeSleepUntil() int64 {
continue
}
- lock(&pp.timersLock)
c := atomic.Load(&pp.adjustTimers)
+ if c == 0 {
+ w := int64(atomic.Load64(&pp.timer0When))
+ if w != 0 && w < next {
+ next = w
+ pret = pp
+ }
+ continue
+ }
+
+ lock(&pp.timersLock)
for _, t := range pp.timers {
switch s := atomic.Load(&t.status); s {
case timerWaiting:
@@ -966,7 +1065,7 @@ func timeSleepUntil() int64 {
}
unlock(&allpLock)
- return next
+ return next, pret
}
// Heap maintenance algorithms.
diff --git a/src/runtime/tls_riscv64.s b/src/runtime/tls_riscv64.s
new file mode 100644
index 0000000000..8386980421
--- /dev/null
+++ b/src/runtime/tls_riscv64.s
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+// If !iscgo, this is a no-op.
+//
+// NOTE: mcall() assumes this clobbers only R23 (REGTMP).
+// FIXME: cgo
+TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
+ RET
+
+TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
+ RET
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 9aa9facabe..67a84425a8 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -180,12 +180,9 @@ func traceBufPtrOf(b *traceBuf) traceBufPtr {
// Most clients should use the runtime/trace package or the testing package's
// -test.trace flag instead of calling StartTrace directly.
func StartTrace() error {
- // Stop the world so that we can take a consistent snapshot
+ // Stop the world, so that we can take a consistent snapshot
// of all goroutines at the beginning of the trace.
- // Do not stop the world during GC so we ensure we always see
- // a consistent view of GC-related events (e.g. a start is always
- // paired with an end).
- stopTheWorldGC("start tracing")
+ stopTheWorld("start tracing")
// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
// Exitsyscall could check trace.enabled long before and then suddenly wake up
@@ -196,7 +193,7 @@ func StartTrace() error {
if trace.enabled || trace.shutdown {
unlock(&trace.bufLock)
- startTheWorldGC()
+ startTheWorld()
return errorString("tracing is already enabled")
}
@@ -267,7 +264,7 @@ func StartTrace() error {
unlock(&trace.bufLock)
- startTheWorldGC()
+ startTheWorld()
return nil
}
@@ -276,14 +273,14 @@ func StartTrace() error {
func StopTrace() {
// Stop the world so that we can collect the trace buffers from all p's below,
// and also to avoid races with traceEvent.
- stopTheWorldGC("stop tracing")
+ stopTheWorld("stop tracing")
// See the comment in StartTrace.
lock(&trace.bufLock)
if !trace.enabled {
unlock(&trace.bufLock)
- startTheWorldGC()
+ startTheWorld()
return
}
@@ -320,7 +317,7 @@ func StopTrace() {
trace.shutdown = true
unlock(&trace.bufLock)
- startTheWorldGC()
+ startTheWorld()
// The world is started but we've set trace.shutdown, so new tracing can't start.
// Wait for the trace reader to flush pending buffers and stop.
diff --git a/src/runtime/trace/trace_stack_test.go b/src/runtime/trace/trace_stack_test.go
index e3608c687f..62c06e67d9 100644
--- a/src/runtime/trace/trace_stack_test.go
+++ b/src/runtime/trace/trace_stack_test.go
@@ -233,7 +233,6 @@ func TestTraceSymbolize(t *testing.T) {
}},
{trace.EvGomaxprocs, []frame{
{"runtime.startTheWorld", 0}, // this is when the current gomaxprocs is logged.
- {"runtime.startTheWorldGC", 0},
{"runtime.GOMAXPROCS", 0},
{"runtime/trace_test.TestTraceSymbolize", 0},
{"testing.tRunner", 0},
diff --git a/src/runtime/utf8.go b/src/runtime/utf8.go
index 6bf596581d..52b757662d 100644
--- a/src/runtime/utf8.go
+++ b/src/runtime/utf8.go
@@ -7,7 +7,7 @@ package runtime
// Numbers fundamental to the encoding.
const (
runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
- runeSelf = 0x80 // characters below Runeself are represented as themselves in a single byte.
+ runeSelf = 0x80 // characters below runeSelf are represented as themselves in a single byte.
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
)
diff --git a/src/strconv/quote.go b/src/strconv/quote.go
index b50496a0ff..bcbdbc514d 100644
--- a/src/strconv/quote.go
+++ b/src/strconv/quote.go
@@ -145,8 +145,9 @@ func AppendQuoteToASCII(dst []byte, s string) []byte {
}
// QuoteToGraphic returns a double-quoted Go string literal representing s.
-// The returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
-// non-ASCII characters and non-printable characters as defined by IsGraphic.
+// The returned string leaves Unicode graphic characters, as defined by
+// IsGraphic, unchanged and uses Go escape sequences (\t, \n, \xFF, \u0100)
+// for non-graphic characters.
func QuoteToGraphic(s string) string {
return quoteWith(s, '"', false, true)
}
@@ -185,9 +186,9 @@ func AppendQuoteRuneToASCII(dst []byte, r rune) []byte {
}
// QuoteRuneToGraphic returns a single-quoted Go character literal representing
-// the rune. The returned string uses Go escape sequences (\t, \n, \xFF,
-// \u0100) for non-ASCII characters and non-printable characters as defined
-// by IsGraphic.
+// the rune. If the rune is not a Unicode graphic character,
+// as defined by IsGraphic, the returned string will use a Go escape sequence
+// (\t, \n, \xFF, \u0100).
func QuoteRuneToGraphic(r rune) string {
return quoteRuneWith(r, '\'', false, true)
}
diff --git a/src/strings/strings.go b/src/strings/strings.go
index 869cdcdcef..238d657f61 100644
--- a/src/strings/strings.go
+++ b/src/strings/strings.go
@@ -420,24 +420,24 @@ func FieldsFunc(s string, f func(rune) bool) []string {
return a
}
-// Join concatenates the elements of a to create a single string. The separator string
-// sep is placed between elements in the resulting string.
-func Join(a []string, sep string) string {
- switch len(a) {
+// Join concatenates the elements of its first argument to create a single string. The separator
+// string sep is placed between elements in the resulting string.
+func Join(elems []string, sep string) string {
+ switch len(elems) {
case 0:
return ""
case 1:
- return a[0]
+ return elems[0]
}
- n := len(sep) * (len(a) - 1)
- for i := 0; i < len(a); i++ {
- n += len(a[i])
+ n := len(sep) * (len(elems) - 1)
+ for i := 0; i < len(elems); i++ {
+ n += len(elems[i])
}
var b Builder
b.Grow(n)
- b.WriteString(a[0])
- for _, s := range a[1:] {
+ b.WriteString(elems[0])
+ for _, s := range elems[1:] {
b.WriteString(sep)
b.WriteString(s)
}
@@ -1094,7 +1094,7 @@ func Index(s, substr string) int {
i++
fails++
if fails >= 4+i>>4 && i < t {
- // See comment in ../bytes/bytes_generic.go.
+ // See comment in ../bytes/bytes.go.
j := indexRabinKarp(s[i:], substr)
if j < 0 {
return -1
diff --git a/src/sync/waitgroup_test.go b/src/sync/waitgroup_test.go
index 4ab438cbab..c569e0faa2 100644
--- a/src/sync/waitgroup_test.go
+++ b/src/sync/waitgroup_test.go
@@ -147,7 +147,7 @@ func TestWaitGroupMisuse3(t *testing.T) {
}
}()
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
- done := make(chan interface{}, 2)
+ done := make(chan interface{}, 3)
// The detection is opportunistically, so we want it to panic
// at least in one run out of a million.
for i := 0; i < 1e6; i++ {
@@ -171,8 +171,13 @@ func TestWaitGroupMisuse3(t *testing.T) {
}()
wg.Wait()
}()
- wg.Wait()
- for j := 0; j < 2; j++ {
+ go func() {
+ defer func() {
+ done <- recover()
+ }()
+ wg.Wait()
+ }()
+ for j := 0; j < 3; j++ {
if err := <-done; err != nil {
panic(err)
}
diff --git a/src/syscall/asm_linux_riscv64.s b/src/syscall/asm_linux_riscv64.s
index 5700d4d005..ad0b6b17d9 100644
--- a/src/syscall/asm_linux_riscv64.s
+++ b/src/syscall/asm_linux_riscv64.s
@@ -20,14 +20,14 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
BLTU T0, A0, err
MOV A0, r1+32(FP) // r1
MOV A1, r2+40(FP) // r2
- MOV $0, err+48(FP) // errno
+ MOV ZERO, err+48(FP) // errno
CALL runtime·exitsyscall(SB)
RET
err:
MOV $-1, T0
MOV T0, r1+32(FP) // r1
- MOV $0, r2+40(FP) // r2
- SUB A0, $0, A0
+ MOV ZERO, r2+40(FP) // r2
+ SUB A0, ZERO, A0
MOV A0, err+48(FP) // errno
CALL runtime·exitsyscall(SB)
RET
@@ -47,14 +47,14 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-80
BLTU T0, A0, err
MOV A0, r1+56(FP) // r1
MOV A1, r2+64(FP) // r2
- MOV $0, err+72(FP) // errno
+ MOV ZERO, err+72(FP) // errno
CALL runtime·exitsyscall(SB)
RET
err:
MOV $-1, T0
MOV T0, r1+56(FP) // r1
- MOV $0, r2+64(FP) // r2
- SUB A0, $0, A0
+ MOV ZERO, r2+64(FP) // r2
+ SUB A0, ZERO, A0
MOV A0, err+72(FP) // errno
CALL runtime·exitsyscall(SB)
RET
@@ -70,13 +70,13 @@ TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BLTU T0, A0, err
MOV A0, r1+32(FP) // r1
MOV A1, r2+40(FP) // r2
- MOV $0, err+48(FP) // errno
+ MOV ZERO, err+48(FP) // errno
RET
err:
MOV $-1, T0
MOV T0, r1+32(FP) // r1
- MOV $0, r2+40(FP) // r2
- SUB A0, $0, A0
+ MOV ZERO, r2+40(FP) // r2
+ SUB A0, ZERO, A0
MOV A0, err+48(FP) // errno
RET
@@ -94,13 +94,13 @@ TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BLTU T0, A0, err
MOV A0, r1+56(FP) // r1
MOV A1, r2+64(FP) // r2
- MOV $0, err+72(FP) // errno
+ MOV ZERO, err+72(FP) // errno
RET
err:
MOV $-1, T0
MOV T0, r1+56(FP) // r1
- MOV $0, r2+64(FP) // r2
- SUB A0, $0, A0
+ MOV ZERO, r2+64(FP) // r2
+ SUB A0, ZERO, A0
MOV A0, err+72(FP) // errno
RET
diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go
index 3540d511bf..d639565b75 100644
--- a/src/syscall/exec_linux.go
+++ b/src/syscall/exec_linux.go
@@ -438,7 +438,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att
if err1 != 0 {
goto childerror
}
- RawSyscall(SYS_FCNTL, uintptr(nextfd), F_SETFD, FD_CLOEXEC)
+ RawSyscall(fcntl64Syscall, uintptr(nextfd), F_SETFD, FD_CLOEXEC)
pipe = nextfd
nextfd++
}
@@ -451,7 +451,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att
if err1 != 0 {
goto childerror
}
- RawSyscall(SYS_FCNTL, uintptr(nextfd), F_SETFD, FD_CLOEXEC)
+ RawSyscall(fcntl64Syscall, uintptr(nextfd), F_SETFD, FD_CLOEXEC)
fd[i] = nextfd
nextfd++
}
@@ -466,7 +466,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att
if fd[i] == int(i) {
// dup2(i, i) won't clear close-on-exec flag on Linux,
// probably not elsewhere either.
- _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(fd[i]), F_SETFD, 0)
+ _, _, err1 = RawSyscall(fcntl64Syscall, uintptr(fd[i]), F_SETFD, 0)
if err1 != 0 {
goto childerror
}
diff --git a/src/syscall/flock_linux_32bit.go b/src/syscall/flock_linux_32bit.go
index e1548995b2..e11aed6ed1 100644
--- a/src/syscall/flock_linux_32bit.go
+++ b/src/syscall/flock_linux_32bit.go
@@ -1,9 +1,12 @@
-// +build linux,386 linux,arm linux,mips linux,mipsle
-
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// If you change the build tags here, see
+// internal/syscall/unix/fcntl_linux_32bit.go.
+
+// +build linux,386 linux,arm linux,mips linux,mipsle
+
package syscall
func init() {
diff --git a/src/syscall/fs_js.go b/src/syscall/fs_js.go
index f7079e9d09..16d9f58b8c 100644
--- a/src/syscall/fs_js.go
+++ b/src/syscall/fs_js.go
@@ -34,6 +34,7 @@ var (
type jsFile struct {
path string
entries []string
+ dirIdx int // entries[:dirIdx] have already been returned in ReadDirent
pos int64
seeked bool
}
@@ -141,8 +142,8 @@ func ReadDirent(fd int, buf []byte) (int, error) {
}
n := 0
- for len(f.entries) > 0 {
- entry := f.entries[0]
+ for f.dirIdx < len(f.entries) {
+ entry := f.entries[f.dirIdx]
l := 2 + len(entry)
if l > len(buf) {
break
@@ -152,7 +153,7 @@ func ReadDirent(fd int, buf []byte) (int, error) {
copy(buf[2:], entry)
buf = buf[l:]
n += l
- f.entries = f.entries[1:]
+ f.dirIdx++
}
return n, nil
@@ -470,6 +471,7 @@ func Seek(fd int, offset int64, whence int) (int64, error) {
}
f.seeked = true
+ f.dirIdx = 0 // Reset directory read position. See issue 35767.
f.pos = newPos
return newPos, nil
}
diff --git a/src/syscall/lsf_linux.go b/src/syscall/lsf_linux.go
index b89239eba8..28e96d54e6 100644
--- a/src/syscall/lsf_linux.go
+++ b/src/syscall/lsf_linux.go
@@ -23,6 +23,8 @@ func LsfJump(code, k, jt, jf int) *SockFilter {
// Deprecated: Use golang.org/x/net/bpf instead.
func LsfSocket(ifindex, proto int) (int, error) {
var lsall SockaddrLinklayer
+ // This is missing SOCK_CLOEXEC, but adding the flag
+ // could break callers.
s, e := Socket(AF_PACKET, SOCK_RAW, proto)
if e != nil {
return 0, e
@@ -46,7 +48,7 @@ type iflags struct {
// Deprecated: Use golang.org/x/net/bpf instead.
func SetLsfPromisc(name string, m bool) error {
- s, e := Socket(AF_INET, SOCK_DGRAM, 0)
+ s, e := cloexecSocket(AF_INET, SOCK_DGRAM, 0)
if e != nil {
return e
}
diff --git a/src/syscall/netlink_linux.go b/src/syscall/netlink_linux.go
index 1cda8c7704..0937ff797a 100644
--- a/src/syscall/netlink_linux.go
+++ b/src/syscall/netlink_linux.go
@@ -50,7 +50,7 @@ func newNetlinkRouteRequest(proto, seq, family int) []byte {
// NetlinkRIB returns routing information base, as known as RIB, which
// consists of network facility information, states and parameters.
func NetlinkRIB(proto, family int) ([]byte, error) {
- s, err := Socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE)
+ s, err := cloexecSocket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE)
if err != nil {
return nil, err
}
diff --git a/src/syscall/security_windows.go b/src/syscall/security_windows.go
index db80d98a08..3a75759606 100644
--- a/src/syscall/security_windows.go
+++ b/src/syscall/security_windows.go
@@ -163,7 +163,7 @@ func (sid *SID) String() (string, error) {
return "", e
}
defer LocalFree((Handle)(unsafe.Pointer(s)))
- return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil
+ return utf16PtrToString(s, 256), nil
}
// Len returns the length, in bytes, of a valid security identifier sid.
diff --git a/src/syscall/sock_cloexec_linux.go b/src/syscall/sock_cloexec_linux.go
new file mode 100644
index 0000000000..600cf25c15
--- /dev/null
+++ b/src/syscall/sock_cloexec_linux.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+// This is a stripped down version of sysSocket from net/sock_cloexec.go.
+func cloexecSocket(family, sotype, proto int) (int, error) {
+ s, err := Socket(family, sotype|SOCK_CLOEXEC, proto)
+ switch err {
+ case nil:
+ return s, nil
+ default:
+ return -1, err
+ case EINVAL:
+ }
+
+ ForkLock.RLock()
+ s, err = Socket(family, sotype, proto)
+ if err == nil {
+ CloseOnExec(s)
+ }
+ ForkLock.RUnlock()
+ if err != nil {
+ Close(s)
+ return -1, err
+ }
+ return s, nil
+}
diff --git a/src/syscall/syscall_aix.go b/src/syscall/syscall_aix.go
index d8010d35ce..8bb5fa9ead 100644
--- a/src/syscall/syscall_aix.go
+++ b/src/syscall/syscall_aix.go
@@ -23,12 +23,12 @@ func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err
const (
_ = iota
TIOCSCTTY
- F_DUPFD_CLOEXEC
SYS_EXECVE
SYS_FCNTL
)
const (
+ F_DUPFD_CLOEXEC = 0
// AF_LOCAL doesn't exist on AIX
AF_LOCAL = AF_UNIX
)
diff --git a/src/syscall/syscall_linux_riscv64.go b/src/syscall/syscall_linux_riscv64.go
index 4635490b9c..61e9c60e70 100644
--- a/src/syscall/syscall_linux_riscv64.go
+++ b/src/syscall/syscall_linux_riscv64.go
@@ -33,7 +33,7 @@ func EpollCreate(size int) (fd int, err error) {
//sys Listen(s int, n int) (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
-//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys Setfsgid(gid int) (err error)
@@ -46,6 +46,10 @@ func EpollCreate(size int) (fd int, err error) {
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ return renameat2(olddirfd, oldpath, newdirfd, newpath, 0)
+}
+
func Stat(path string, stat *Stat_t) (err error) {
return Fstatat(_AT_FDCWD, path, stat, 0)
}
diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go
index 992f6738ce..950c281e4d 100644
--- a/src/syscall/syscall_windows.go
+++ b/src/syscall/syscall_windows.go
@@ -57,6 +57,25 @@ func UTF16ToString(s []uint16) string {
return string(utf16.Decode(s))
}
+// utf16PtrToString is like UTF16ToString, but takes *uint16
+// as a parameter instead of []uint16.
+// max is how many times p can be advanced looking for the null terminator.
+// If max is hit, the string is truncated at that point.
+func utf16PtrToString(p *uint16, max int) string {
+ if p == nil {
+ return ""
+ }
+ // Find NUL terminator.
+ end := unsafe.Pointer(p)
+ n := 0
+ for *(*uint16)(end) != 0 && n < max {
+ end = unsafe.Pointer(uintptr(end) + unsafe.Sizeof(*p))
+ n++
+ }
+ s := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:n:n]
+ return string(utf16.Decode(s))
+}
+
// StringToUTF16Ptr returns pointer to the UTF-16 encoding of
// the UTF-8 string s, with a terminating NUL added. If s
// contains a NUL byte this function panics instead of
@@ -769,7 +788,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) {
for n < len(pp.Path) && pp.Path[n] != 0 {
n++
}
- bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n:n]
sa.Name = string(bytes)
return sa, nil
diff --git a/src/syscall/types_linux.go b/src/syscall/types_linux.go
index b47c3236c0..9de32d9c01 100644
--- a/src/syscall/types_linux.go
+++ b/src/syscall/types_linux.go
@@ -50,6 +50,7 @@ package syscall
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/icmpv6.h>
+#include <poll.h>
#include <termios.h>
#include <time.h>
#include <unistd.h>
diff --git a/src/syscall/zsyscall_linux_riscv64.go b/src/syscall/zsyscall_linux_riscv64.go
index afba4167f3..7bdb8046d1 100644
--- a/src/syscall/zsyscall_linux_riscv64.go
+++ b/src/syscall/zsyscall_linux_riscv64.go
@@ -1220,7 +1220,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+func renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
if err != nil {
@@ -1231,7 +1231,7 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/src/syscall/ztypes_linux_riscv64.go b/src/syscall/ztypes_linux_riscv64.go
index 725e99c84d..6ee71ebbdc 100644
--- a/src/syscall/ztypes_linux_riscv64.go
+++ b/src/syscall/ztypes_linux_riscv64.go
@@ -31,13 +31,11 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -46,14 +44,13 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -131,12 +128,12 @@ type Statfs_t struct {
}
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]uint8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]uint8
+ _ [5]byte
}
type Fsid struct {
@@ -144,13 +141,12 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type RawSockaddrInet4 struct {
@@ -231,13 +227,12 @@ type IPv6Mreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -279,7 +274,6 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -497,9 +491,8 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -570,12 +563,11 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
- X_f [0]uint8
- Pad_cgo_1 [4]byte
+ _ [0]uint8
+ _ [4]byte
}
type Utsname struct {
@@ -588,12 +580,11 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]uint8
- Fpack [6]uint8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ Tinode uint64
+ Fname [6]uint8
+ Fpack [6]uint8
+ _ [4]byte
}
type EpollEvent struct {
@@ -610,16 +601,21 @@ const (
_AT_EACCESS = 0x200
)
+type pollFd struct {
+ Fd int32
+ Events int16
+ Revents int16
+}
+
type Termios struct {
- Iflag uint32
- Oflag uint32
- Cflag uint32
- Lflag uint32
- Line uint8
- Cc [32]uint8
- Pad_cgo_0 [3]byte
- Ispeed uint32
- Ospeed uint32
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Line uint8
+ Cc [19]uint8
+ Ispeed uint32
+ Ospeed uint32
}
const (
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
index e954fc6ccb..93f461b07a 100644
--- a/src/testing/benchmark.go
+++ b/src/testing/benchmark.go
@@ -69,8 +69,8 @@ var benchmarkLock sync.Mutex
// Used for every benchmark for measuring memory.
var memStats runtime.MemStats
-// An internal type but exported because it is cross-package; part of the implementation
-// of the "go test" command.
+// InternalBenchmark is an internal type but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
type InternalBenchmark struct {
Name string
F func(b *B)
@@ -86,7 +86,7 @@ type InternalBenchmark struct {
// may be called simultaneously from multiple goroutines.
//
// Like in tests, benchmark logs are accumulated during execution
-// and dumped to standard error when done. Unlike in tests, benchmark logs
+// and dumped to standard output when done. Unlike in tests, benchmark logs
// are always printed, so as not to hide output whose existence may be
// affecting benchmark results.
type B struct {
@@ -342,7 +342,7 @@ func (b *B) ReportMetric(n float64, unit string) {
b.extra[unit] = n
}
-// The results of a benchmark run.
+// BenchmarkResult contains the results of a benchmark run.
type BenchmarkResult struct {
N int // The number of iterations.
T time.Duration // The total time taken.
@@ -488,8 +488,8 @@ type benchContext struct {
extLen int // Maximum extension length.
}
-// An internal function but exported because it is cross-package; part of the implementation
-// of the "go test" command.
+// RunBenchmarks is an internal function but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
runBenchmarks("", matchString, benchmarks)
}
diff --git a/src/testing/example.go b/src/testing/example.go
index c122121289..adc91d5faf 100644
--- a/src/testing/example.go
+++ b/src/testing/example.go
@@ -19,8 +19,8 @@ type InternalExample struct {
Unordered bool
}
-// An internal function but exported because it is cross-package; part of the implementation
-// of the "go test" command.
+// RunExamples is an internal function but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
func RunExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ok bool) {
_, ok = runExamples(matchString, examples)
return ok
diff --git a/src/testing/panic_test.go b/src/testing/panic_test.go
index 3491510b81..6b8b95391d 100644
--- a/src/testing/panic_test.go
+++ b/src/testing/panic_test.go
@@ -16,6 +16,9 @@ import (
)
var testPanicTest = flag.String("test_panic_test", "", "TestPanic: indicates which test should panic")
+var testPanicParallel = flag.Bool("test_panic_parallel", false, "TestPanic: run subtests in parallel")
+var testPanicCleanup = flag.Bool("test_panic_cleanup", false, "TestPanic: indicates whether test should call Cleanup")
+var testPanicCleanupPanic = flag.String("test_panic_cleanup_panic", "", "TestPanic: indicate whether test should call Cleanup function that panics")
func TestPanic(t *testing.T) {
testenv.MustHaveExec(t)
@@ -40,6 +43,98 @@ func TestPanic(t *testing.T) {
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
+ }, {
+ desc: "subtest panics with cleanup",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "subtest panics with outer cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+`,
+ }, {
+ desc: "subtest panics with middle cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "subtest panics with inner cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "parallel subtest panics with cleanup",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "parallel subtest panics with outer cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+`,
+ }, {
+ desc: "parallel subtest panics with middle cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "parallel subtest panics with inner cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
}}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
@@ -72,10 +167,42 @@ func TestPanicHelper(t *testing.T) {
if t.Name() == *testPanicTest {
panic("panic")
}
+ switch *testPanicCleanupPanic {
+ case "", "outer", "middle", "inner":
+ default:
+ t.Fatalf("bad -test_panic_cleanup_panic: %s", *testPanicCleanupPanic)
+ }
+ t.Cleanup(func() {
+ fmt.Println("ran outer cleanup")
+ if *testPanicCleanupPanic == "outer" {
+ panic("outer cleanup")
+ }
+ })
for i := 0; i < 3; i++ {
+ i := i
t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
+ chosen := t.Name() == *testPanicTest
+ if chosen && *testPanicCleanup {
+ t.Cleanup(func() {
+ fmt.Printf("ran middle cleanup %d\n", i)
+ if *testPanicCleanupPanic == "middle" {
+ panic("middle cleanup")
+ }
+ })
+ }
+ if chosen && *testPanicParallel {
+ t.Parallel()
+ }
t.Log(t.Name())
- if t.Name() == *testPanicTest {
+ if chosen {
+ if *testPanicCleanup {
+ t.Cleanup(func() {
+ fmt.Printf("ran inner cleanup %d\n", i)
+ if *testPanicCleanupPanic == "inner" {
+ panic("inner cleanup")
+ }
+ })
+ }
panic("panic")
}
})
diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go
index 3f0f71f647..3dc30ee72e 100644
--- a/src/testing/sub_test.go
+++ b/src/testing/sub_test.go
@@ -460,6 +460,21 @@ func TestTRun(t *T) {
<-ch
t.Errorf("error")
},
+ }, {
+ // If a subtest panics we should run cleanups.
+ desc: "cleanup when subtest panics",
+ ok: false,
+ chatty: false,
+ output: `
+--- FAIL: cleanup when subtest panics (N.NNs)
+ --- FAIL: cleanup when subtest panics/sub (N.NNs)
+ sub_test.go:NNN: running cleanup`,
+ f: func(t *T) {
+ t.Cleanup(func() { t.Log("running cleanup") })
+ t.Run("sub", func(t2 *T) {
+ t2.FailNow()
+ })
+ },
}}
for _, tc := range testCases {
ctx := newTestContext(tc.maxPar, newMatcher(regexp.MatchString, "", ""))
@@ -855,3 +870,19 @@ func TestRunCleanup(t *T) {
t.Errorf("unexpected outer cleanup count; got %d want 0", outerCleanup)
}
}
+
+func TestCleanupParallelSubtests(t *T) {
+ ranCleanup := 0
+ t.Run("test", func(t *T) {
+ t.Cleanup(func() { ranCleanup++ })
+ t.Run("x", func(t *T) {
+ t.Parallel()
+ if ranCleanup > 0 {
+ t.Error("outer cleanup ran before parallel subtest")
+ }
+ })
+ })
+ if ranCleanup != 1 {
+ t.Errorf("unexpected cleanup count; got %d want 1", ranCleanup)
+ }
+}
diff --git a/src/testing/testing.go b/src/testing/testing.go
index 59128e8a29..030feb7112 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -99,7 +99,7 @@
// line order:
//
// func ExamplePerm() {
-// for _, value := range Perm(4) {
+// for _, value := range Perm(5) {
// fmt.Println(value)
// }
// // Unordered output: 4
@@ -776,9 +776,9 @@ func (c *common) Helper() {
c.helpers[callerName(1)] = struct{}{}
}
-// Cleanup registers a function to be called when the test finishes.
-// Cleanup functions will be called in last added, first called
-// order.
+// Cleanup registers a function to be called when the test and all its
+// subtests complete. Cleanup functions will be called in last added,
+// first called order.
func (c *common) Cleanup(f func()) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -791,15 +791,34 @@ func (c *common) Cleanup(f func()) {
}
}
+// panicHanding is an argument to runCleanup.
+type panicHandling int
+
+const (
+ normalPanic panicHandling = iota
+ recoverAndReturnPanic
+)
+
// runCleanup is called at the end of the test.
-func (c *common) runCleanup() {
+// If catchPanic is true, this will catch panics, and return the recovered
+// value if any.
+func (c *common) runCleanup(ph panicHandling) (panicVal interface{}) {
c.mu.Lock()
cleanup := c.cleanup
c.cleanup = nil
c.mu.Unlock()
- if cleanup != nil {
- cleanup()
+ if cleanup == nil {
+ return nil
+ }
+
+ if ph == recoverAndReturnPanic {
+ defer func() {
+ panicVal = recover()
+ }()
}
+
+ cleanup()
+ return nil
}
// callerName gives the function name (qualified with a package path)
@@ -863,8 +882,8 @@ func (t *T) Parallel() {
t.raceErrors += -race.Errors()
}
-// An internal type but exported because it is cross-package; part of the implementation
-// of the "go test" command.
+// InternalTest is an internal type but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
type InternalTest struct {
Name string
F func(*T)
@@ -902,19 +921,29 @@ func tRunner(t *T, fn func(t *T)) {
}
}
}
- if err != nil {
+
+ doPanic := func(err interface{}) {
t.Fail()
+ if r := t.runCleanup(recoverAndReturnPanic); r != nil {
+ t.Logf("cleanup panicked with %v", r)
+ }
// Flush the output log up to the root before dying.
t.mu.Lock()
root := &t.common
for ; root.parent != nil; root = root.parent {
root.duration += time.Since(root.start)
fmt.Fprintf(root.parent.w, "--- FAIL: %s (%s)\n", root.name, fmtDuration(root.duration))
+ if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
+ fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
+ }
root.parent.mu.Lock()
io.Copy(root.parent.w, bytes.NewReader(root.output))
}
panic(err)
}
+ if err != nil {
+ doPanic(err)
+ }
t.duration += time.Since(t.start)
@@ -928,6 +957,12 @@ func tRunner(t *T, fn func(t *T)) {
for _, sub := range t.sub {
<-sub.signal
}
+ cleanupStart := time.Now()
+ err := t.runCleanup(recoverAndReturnPanic)
+ t.duration += time.Since(cleanupStart)
+ if err != nil {
+ doPanic(err)
+ }
if !t.isParallel {
// Reacquire the count for sequential tests. See comment in Run.
t.context.waitParallel()
@@ -947,7 +982,11 @@ func tRunner(t *T, fn func(t *T)) {
}
t.signal <- signal
}()
- defer t.runCleanup()
+ defer func() {
+ if len(t.sub) == 0 {
+ t.runCleanup(normalPanic)
+ }
+ }()
t.start = time.Now()
t.raceErrors = -race.Errors()
@@ -1214,8 +1253,8 @@ func listTests(matchString func(pat, str string) (bool, error), tests []Internal
}
}
-// An internal function but exported because it is cross-package; part of the implementation
-// of the "go test" command.
+// RunTests is an internal function but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
ran, ok := runTests(matchString, tests)
if !ran && !haveExamples {
diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
index aa5cd4c552..77294eda4b 100644
--- a/src/text/template/exec_test.go
+++ b/src/text/template/exec_test.go
@@ -502,6 +502,7 @@ var execTests = []execTest{
{"map MUI64S", "{{index .MUI64S 3}}", "ui643", tVal, true},
{"map MI8S", "{{index .MI8S 3}}", "i83", tVal, true},
{"map MUI8S", "{{index .MUI8S 2}}", "u82", tVal, true},
+ {"index of an interface field", "{{index .Empty3 0}}", "7", tVal, true},
// Slicing.
{"slice[:]", "{{slice .SI}}", "[3 4 5]", tVal, true},
@@ -527,12 +528,14 @@ var execTests = []execTest{
{"string[1:2]", "{{slice .S 1 2}}", "y", tVal, true},
{"out of range", "{{slice .S 1 5}}", "", tVal, false},
{"3-index slice of string", "{{slice .S 1 2 2}}", "", tVal, false},
+ {"slice of an interface field", "{{slice .Empty3 0 1}}", "[7]", tVal, true},
// Len.
{"slice", "{{len .SI}}", "3", tVal, true},
{"map", "{{len .MSI }}", "3", tVal, true},
{"len of int", "{{len 3}}", "", tVal, false},
{"len of nothing", "{{len .Empty0}}", "", tVal, false},
+ {"len of an interface field", "{{len .Empty3}}", "2", tVal, true},
// With.
{"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
index 0568c798a8..46125bc216 100644
--- a/src/text/template/funcs.go
+++ b/src/text/template/funcs.go
@@ -264,13 +264,13 @@ func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error)
return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
}
if len(indexes) < 3 {
- return item.Slice(idx[0], idx[1]), nil
+ return v.Slice(idx[0], idx[1]), nil
}
// given item[i:j:k], make sure i <= j <= k.
if idx[1] > idx[2] {
return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
}
- return item.Slice3(idx[0], idx[1], idx[2]), nil
+ return v.Slice3(idx[0], idx[1], idx[2]), nil
}
// Length
diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
index 3d57708796..30371f2862 100644
--- a/src/text/template/parse/lex.go
+++ b/src/text/template/parse/lex.go
@@ -411,7 +411,6 @@ func lexInsideAction(l *lexer) stateFn {
}
case r <= unicode.MaxASCII && unicode.IsPrint(r):
l.emit(itemChar)
- return lexInsideAction
default:
return l.errorf("unrecognized character in action: %#U", r)
}
diff --git a/src/text/template/template.go b/src/text/template/template.go
index 2c5ff013e3..e0c096207c 100644
--- a/src/text/template/template.go
+++ b/src/text/template/template.go
@@ -119,12 +119,12 @@ func (t *Template) copy(c *common) *Template {
}
}
-// AddParseTree adds parse tree for template with given name and associates it with t.
-// If the template does not already exist, it will create a new one.
-// If the template does exist, it will be replaced.
+// AddParseTree associates the argument parse tree with the template t, giving
+// it the specified name. If the template has not been defined, this tree becomes
+// its definition. If it has been defined and already has that name, the existing
+// definition is replaced; otherwise a new template is created, defined, and returned.
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
t.init()
- // If the name is the name of this template, overwrite this template.
nt := t
if name != t.name {
nt = t.New(name)
diff --git a/src/time/format.go b/src/time/format.go
index b531cb4760..9beb5d9a48 100644
--- a/src/time/format.go
+++ b/src/time/format.go
@@ -792,6 +792,9 @@ func skip(value, prefix string) (string, error) {
// Years must be in the range 0000..9999. The day of the week is checked
// for syntax but it is otherwise ignored.
//
+// For layouts specifying the two-digit year 06, a value NN >= 69 will be treated
+// as 19NN and a value NN < 69 will be treated as 20NN.
+//
// In the absence of a time zone indicator, Parse returns a time in UTC.
//
// When parsing a time with a zone offset like -0700, if the offset corresponds
diff --git a/src/time/sleep.go b/src/time/sleep.go
index 4e61d0a6c1..37de846b11 100644
--- a/src/time/sleep.go
+++ b/src/time/sleep.go
@@ -64,7 +64,7 @@ type Timer struct {
// }
//
// This cannot be done concurrent to other receives from the Timer's
-// channel.
+// channel or other calls to the Timer's Stop method.
//
// For a timer created with AfterFunc(d, f), if t.Stop returns false, then the timer
// has already expired and the function f has been started in its own goroutine;
diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go
index 950e0eabe1..f5678020b9 100644
--- a/src/time/sleep_test.go
+++ b/src/time/sleep_test.go
@@ -356,7 +356,7 @@ func TestTimerStopStress(t *testing.T) {
for i := 0; i < 100; i++ {
go func(i int) {
timer := AfterFunc(2*Second, func() {
- t.Fatalf("timer %d was not stopped", i)
+ t.Errorf("timer %d was not stopped", i)
})
Sleep(1 * Second)
timer.Stop()
diff --git a/src/time/time.go b/src/time/time.go
index 10a132fa23..5dc9fa68ac 100644
--- a/src/time/time.go
+++ b/src/time/time.go
@@ -1148,6 +1148,9 @@ func (t Time) Zone() (name string, offset int) {
// Unix returns t as a Unix time, the number of seconds elapsed
// since January 1, 1970 UTC. The result does not depend on the
// location associated with t.
+// Unix-like operating systems often record time as a 32-bit
+// count of seconds, but since the method here returns a 64-bit
+// value it is valid for billions of years into the past or future.
func (t Time) Unix() int64 {
return t.unixSec()
}
diff --git a/src/unicode/utf8/utf8.go b/src/unicode/utf8/utf8.go
index b722a03923..b8368fce41 100644
--- a/src/unicode/utf8/utf8.go
+++ b/src/unicode/utf8/utf8.go
@@ -14,7 +14,7 @@ package utf8
// Numbers fundamental to the encoding.
const (
RuneError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
- RuneSelf = 0x80 // characters below Runeself are represented as themselves in a single byte.
+ RuneSelf = 0x80 // characters below RuneSelf are represented as themselves in a single byte.
MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
UTFMax = 4 // maximum number of bytes of a UTF-8 encoded Unicode character.
)
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 528b9bff67..f930f7e526 100644
--- a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -470,7 +470,8 @@ func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
// It reports whether the read was successful.
func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
var bytes String
- if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 {
+ if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
+ len(bytes)*8/8 != len(bytes) {
return false
}
@@ -740,7 +741,7 @@ func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {
length = headerLen + len32
}
- if uint32(int(length)) != length || !s.ReadBytes((*[]byte)(out), int(length)) {
+ if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
return false
}
if skipHeader && !out.Skip(int(headerLen)) {
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/string.go b/src/vendor/golang.org/x/crypto/cryptobyte/string.go
index 39bf98aeea..589d297e6b 100644
--- a/src/vendor/golang.org/x/crypto/cryptobyte/string.go
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/string.go
@@ -24,7 +24,7 @@ type String []byte
// read advances a String by n bytes and returns them. If less than n bytes
// remain, it returns nil.
func (s *String) read(n int) []byte {
- if len(*s) < n {
+ if len(*s) < n || n < 0 {
return nil
}
v := (*s)[:n]
@@ -105,11 +105,6 @@ func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
length = length << 8
length = length | uint32(b)
}
- if int(length) < 0 {
- // This currently cannot overflow because we read uint24 at most, but check
- // anyway in case that changes in the future.
- return false
- }
v := s.read(int(length))
if v == nil {
return false
diff --git a/src/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/src/vendor/golang.org/x/crypto/poly1305/sum_arm.go
deleted file mode 100644
index 6e695e4272..0000000000
--- a/src/vendor/golang.org/x/crypto/poly1305/sum_arm.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build arm,!gccgo,!appengine,!nacl
-
-package poly1305
-
-// poly1305_auth_armv6 is implemented in sum_arm.s
-//go:noescape
-func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
-
-func sum(out *[16]byte, m []byte, key *[32]byte) {
- var mPtr *byte
- if len(m) > 0 {
- mPtr = &m[0]
- }
- poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
-}
diff --git a/src/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/src/vendor/golang.org/x/crypto/poly1305/sum_arm.s
deleted file mode 100644
index f70b4ac484..0000000000
--- a/src/vendor/golang.org/x/crypto/poly1305/sum_arm.s
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build arm,!gccgo,!appengine,!nacl
-
-#include "textflag.h"
-
-// This code was translated into a form compatible with 5a from the public
-// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
-
-DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
-DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
-DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
-DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
-DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
-GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
-
-// Warning: the linker may use R11 to synthesize certain instructions. Please
-// take care and verify that no synthetic instructions use it.
-
-TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
- // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It
- // might look like it's only 60 bytes of space but the final four bytes
- // will be written by another function.) We need to skip over four
- // bytes of stack because that's saving the value of 'g'.
- ADD $4, R13, R8
- MOVM.IB [R4-R7], (R8)
- MOVM.IA.W (R1), [R2-R5]
- MOVW $·poly1305_init_constants_armv6<>(SB), R7
- MOVW R2, R8
- MOVW R2>>26, R9
- MOVW R3>>20, g
- MOVW R4>>14, R11
- MOVW R5>>8, R12
- ORR R3<<6, R9, R9
- ORR R4<<12, g, g
- ORR R5<<18, R11, R11
- MOVM.IA (R7), [R2-R6]
- AND R8, R2, R2
- AND R9, R3, R3
- AND g, R4, R4
- AND R11, R5, R5
- AND R12, R6, R6
- MOVM.IA.W [R2-R6], (R0)
- EOR R2, R2, R2
- EOR R3, R3, R3
- EOR R4, R4, R4
- EOR R5, R5, R5
- EOR R6, R6, R6
- MOVM.IA.W [R2-R6], (R0)
- MOVM.IA.W (R1), [R2-R5]
- MOVM.IA [R2-R6], (R0)
- ADD $20, R13, R0
- MOVM.DA (R0), [R4-R7]
- RET
-
-#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
- MOVBU (offset+0)(Rsrc), Rtmp; \
- MOVBU Rtmp, (offset+0)(Rdst); \
- MOVBU (offset+1)(Rsrc), Rtmp; \
- MOVBU Rtmp, (offset+1)(Rdst); \
- MOVBU (offset+2)(Rsrc), Rtmp; \
- MOVBU Rtmp, (offset+2)(Rdst); \
- MOVBU (offset+3)(Rsrc), Rtmp; \
- MOVBU Rtmp, (offset+3)(Rdst)
-
-TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
- // Needs 24 bytes of stack for saved registers and then 88 bytes of
- // scratch space after that. We assume that 24 bytes at (R13) have
- // already been used: four bytes for the link register saved in the
- // prelude of poly1305_auth_armv6, four bytes for saving the value of g
- // in that function and 16 bytes of scratch space used around
- // poly1305_finish_ext_armv6_skip1.
- ADD $24, R13, R12
- MOVM.IB [R4-R8, R14], (R12)
- MOVW R0, 88(R13)
- MOVW R1, 92(R13)
- MOVW R2, 96(R13)
- MOVW R1, R14
- MOVW R2, R12
- MOVW 56(R0), R8
- WORD $0xe1180008 // TST R8, R8 not working see issue 5921
- EOR R6, R6, R6
- MOVW.EQ $(1<<24), R6
- MOVW R6, 84(R13)
- ADD $116, R13, g
- MOVM.IA (R0), [R0-R9]
- MOVM.IA [R0-R4], (g)
- CMP $16, R12
- BLO poly1305_blocks_armv6_done
-
-poly1305_blocks_armv6_mainloop:
- WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
- BEQ poly1305_blocks_armv6_mainloop_aligned
- ADD $100, R13, g
- MOVW_UNALIGNED(R14, g, R0, 0)
- MOVW_UNALIGNED(R14, g, R0, 4)
- MOVW_UNALIGNED(R14, g, R0, 8)
- MOVW_UNALIGNED(R14, g, R0, 12)
- MOVM.IA (g), [R0-R3]
- ADD $16, R14
- B poly1305_blocks_armv6_mainloop_loaded
-
-poly1305_blocks_armv6_mainloop_aligned:
- MOVM.IA.W (R14), [R0-R3]
-
-poly1305_blocks_armv6_mainloop_loaded:
- MOVW R0>>26, g
- MOVW R1>>20, R11
- MOVW R2>>14, R12
- MOVW R14, 92(R13)
- MOVW R3>>8, R4
- ORR R1<<6, g, g
- ORR R2<<12, R11, R11
- ORR R3<<18, R12, R12
- BIC $0xfc000000, R0, R0
- BIC $0xfc000000, g, g
- MOVW 84(R13), R3
- BIC $0xfc000000, R11, R11
- BIC $0xfc000000, R12, R12
- ADD R0, R5, R5
- ADD g, R6, R6
- ORR R3, R4, R4
- ADD R11, R7, R7
- ADD $116, R13, R14
- ADD R12, R8, R8
- ADD R4, R9, R9
- MOVM.IA (R14), [R0-R4]
- MULLU R4, R5, (R11, g)
- MULLU R3, R5, (R14, R12)
- MULALU R3, R6, (R11, g)
- MULALU R2, R6, (R14, R12)
- MULALU R2, R7, (R11, g)
- MULALU R1, R7, (R14, R12)
- ADD R4<<2, R4, R4
- ADD R3<<2, R3, R3
- MULALU R1, R8, (R11, g)
- MULALU R0, R8, (R14, R12)
- MULALU R0, R9, (R11, g)
- MULALU R4, R9, (R14, R12)
- MOVW g, 76(R13)
- MOVW R11, 80(R13)
- MOVW R12, 68(R13)
- MOVW R14, 72(R13)
- MULLU R2, R5, (R11, g)
- MULLU R1, R5, (R14, R12)
- MULALU R1, R6, (R11, g)
- MULALU R0, R6, (R14, R12)
- MULALU R0, R7, (R11, g)
- MULALU R4, R7, (R14, R12)
- ADD R2<<2, R2, R2
- ADD R1<<2, R1, R1
- MULALU R4, R8, (R11, g)
- MULALU R3, R8, (R14, R12)
- MULALU R3, R9, (R11, g)
- MULALU R2, R9, (R14, R12)
- MOVW g, 60(R13)
- MOVW R11, 64(R13)
- MOVW R12, 52(R13)
- MOVW R14, 56(R13)
- MULLU R0, R5, (R11, g)
- MULALU R4, R6, (R11, g)
- MULALU R3, R7, (R11, g)
- MULALU R2, R8, (R11, g)
- MULALU R1, R9, (R11, g)
- ADD $52, R13, R0
- MOVM.IA (R0), [R0-R7]
- MOVW g>>26, R12
- MOVW R4>>26, R14
- ORR R11<<6, R12, R12
- ORR R5<<6, R14, R14
- BIC $0xfc000000, g, g
- BIC $0xfc000000, R4, R4
- ADD.S R12, R0, R0
- ADC $0, R1, R1
- ADD.S R14, R6, R6
- ADC $0, R7, R7
- MOVW R0>>26, R12
- MOVW R6>>26, R14
- ORR R1<<6, R12, R12
- ORR R7<<6, R14, R14
- BIC $0xfc000000, R0, R0
- BIC $0xfc000000, R6, R6
- ADD R14<<2, R14, R14
- ADD.S R12, R2, R2
- ADC $0, R3, R3
- ADD R14, g, g
- MOVW R2>>26, R12
- MOVW g>>26, R14
- ORR R3<<6, R12, R12
- BIC $0xfc000000, g, R5
- BIC $0xfc000000, R2, R7
- ADD R12, R4, R4
- ADD R14, R0, R0
- MOVW R4>>26, R12
- BIC $0xfc000000, R4, R8
- ADD R12, R6, R9
- MOVW 96(R13), R12
- MOVW 92(R13), R14
- MOVW R0, R6
- CMP $32, R12
- SUB $16, R12, R12
- MOVW R12, 96(R13)
- BHS poly1305_blocks_armv6_mainloop
-
-poly1305_blocks_armv6_done:
- MOVW 88(R13), R12
- MOVW R5, 20(R12)
- MOVW R6, 24(R12)
- MOVW R7, 28(R12)
- MOVW R8, 32(R12)
- MOVW R9, 36(R12)
- ADD $48, R13, R0
- MOVM.DA (R0), [R4-R8, R14]
- RET
-
-#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
- MOVBU.P 1(Rsrc), Rtmp; \
- MOVBU.P Rtmp, 1(Rdst); \
- MOVBU.P 1(Rsrc), Rtmp; \
- MOVBU.P Rtmp, 1(Rdst)
-
-#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
- MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
- MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
-
-// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
-TEXT ·poly1305_auth_armv6(SB), $196-16
- // The value 196, just above, is the sum of 64 (the size of the context
- // structure) and 132 (the amount of stack needed).
- //
- // At this point, the stack pointer (R13) has been moved down. It
- // points to the saved link register and there's 196 bytes of free
- // space above it.
- //
- // The stack for this function looks like:
- //
- // +---------------------
- // |
- // | 64 bytes of context structure
- // |
- // +---------------------
- // |
- // | 112 bytes for poly1305_blocks_armv6
- // |
- // +---------------------
- // | 16 bytes of final block, constructed at
- // | poly1305_finish_ext_armv6_skip8
- // +---------------------
- // | four bytes of saved 'g'
- // +---------------------
- // | lr, saved by prelude <- R13 points here
- // +---------------------
- MOVW g, 4(R13)
-
- MOVW out+0(FP), R4
- MOVW m+4(FP), R5
- MOVW mlen+8(FP), R6
- MOVW key+12(FP), R7
-
- ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112
- MOVW R7, R1
-
- // poly1305_init_ext_armv6 will write to the stack from R13+4, but
- // that's ok because none of the other values have been written yet.
- BL poly1305_init_ext_armv6<>(SB)
- BIC.S $15, R6, R2
- BEQ poly1305_auth_armv6_noblocks
- ADD $136, R13, R0
- MOVW R5, R1
- ADD R2, R5, R5
- SUB R2, R6, R6
- BL poly1305_blocks_armv6<>(SB)
-
-poly1305_auth_armv6_noblocks:
- ADD $136, R13, R0
- MOVW R5, R1
- MOVW R6, R2
- MOVW R4, R3
-
- MOVW R0, R5
- MOVW R1, R6
- MOVW R2, R7
- MOVW R3, R8
- AND.S R2, R2, R2
- BEQ poly1305_finish_ext_armv6_noremaining
- EOR R0, R0
- ADD $8, R13, R9 // 8 = offset to 16 byte scratch space
- MOVW R0, (R9)
- MOVW R0, 4(R9)
- MOVW R0, 8(R9)
- MOVW R0, 12(R9)
- WORD $0xe3110003 // TST R1, #3 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_aligned
- WORD $0xe3120008 // TST R2, #8 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip8
- MOVWP_UNALIGNED(R1, R9, g)
- MOVWP_UNALIGNED(R1, R9, g)
-
-poly1305_finish_ext_armv6_skip8:
- WORD $0xe3120004 // TST $4, R2 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip4
- MOVWP_UNALIGNED(R1, R9, g)
-
-poly1305_finish_ext_armv6_skip4:
- WORD $0xe3120002 // TST $2, R2 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip2
- MOVHUP_UNALIGNED(R1, R9, g)
- B poly1305_finish_ext_armv6_skip2
-
-poly1305_finish_ext_armv6_aligned:
- WORD $0xe3120008 // TST R2, #8 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip8_aligned
- MOVM.IA.W (R1), [g-R11]
- MOVM.IA.W [g-R11], (R9)
-
-poly1305_finish_ext_armv6_skip8_aligned:
- WORD $0xe3120004 // TST $4, R2 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip4_aligned
- MOVW.P 4(R1), g
- MOVW.P g, 4(R9)
-
-poly1305_finish_ext_armv6_skip4_aligned:
- WORD $0xe3120002 // TST $2, R2 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip2
- MOVHU.P 2(R1), g
- MOVH.P g, 2(R9)
-
-poly1305_finish_ext_armv6_skip2:
- WORD $0xe3120001 // TST $1, R2 not working see issue 5921
- BEQ poly1305_finish_ext_armv6_skip1
- MOVBU.P 1(R1), g
- MOVBU.P g, 1(R9)
-
-poly1305_finish_ext_armv6_skip1:
- MOVW $1, R11
- MOVBU R11, 0(R9)
- MOVW R11, 56(R5)
- MOVW R5, R0
- ADD $8, R13, R1
- MOVW $16, R2
- BL poly1305_blocks_armv6<>(SB)
-
-poly1305_finish_ext_armv6_noremaining:
- MOVW 20(R5), R0
- MOVW 24(R5), R1
- MOVW 28(R5), R2
- MOVW 32(R5), R3
- MOVW 36(R5), R4
- MOVW R4>>26, R12
- BIC $0xfc000000, R4, R4
- ADD R12<<2, R12, R12
- ADD R12, R0, R0
- MOVW R0>>26, R12
- BIC $0xfc000000, R0, R0
- ADD R12, R1, R1
- MOVW R1>>26, R12
- BIC $0xfc000000, R1, R1
- ADD R12, R2, R2
- MOVW R2>>26, R12
- BIC $0xfc000000, R2, R2
- ADD R12, R3, R3
- MOVW R3>>26, R12
- BIC $0xfc000000, R3, R3
- ADD R12, R4, R4
- ADD $5, R0, R6
- MOVW R6>>26, R12
- BIC $0xfc000000, R6, R6
- ADD R12, R1, R7
- MOVW R7>>26, R12
- BIC $0xfc000000, R7, R7
- ADD R12, R2, g
- MOVW g>>26, R12
- BIC $0xfc000000, g, g
- ADD R12, R3, R11
- MOVW $-(1<<26), R12
- ADD R11>>26, R12, R12
- BIC $0xfc000000, R11, R11
- ADD R12, R4, R9
- MOVW R9>>31, R12
- SUB $1, R12
- AND R12, R6, R6
- AND R12, R7, R7
- AND R12, g, g
- AND R12, R11, R11
- AND R12, R9, R9
- MVN R12, R12
- AND R12, R0, R0
- AND R12, R1, R1
- AND R12, R2, R2
- AND R12, R3, R3
- AND R12, R4, R4
- ORR R6, R0, R0
- ORR R7, R1, R1
- ORR g, R2, R2
- ORR R11, R3, R3
- ORR R9, R4, R4
- ORR R1<<26, R0, R0
- MOVW R1>>6, R1
- ORR R2<<20, R1, R1
- MOVW R2>>12, R2
- ORR R3<<14, R2, R2
- MOVW R3>>18, R3
- ORR R4<<8, R3, R3
- MOVW 40(R5), R6
- MOVW 44(R5), R7
- MOVW 48(R5), g
- MOVW 52(R5), R11
- ADD.S R6, R0, R0
- ADC.S R7, R1, R1
- ADC.S g, R2, R2
- ADC.S R11, R3, R3
- MOVM.IA [R0-R3], (R8)
- MOVW R5, R12
- EOR R0, R0, R0
- EOR R1, R1, R1
- EOR R2, R2, R2
- EOR R3, R3, R3
- EOR R4, R4, R4
- EOR R5, R5, R5
- EOR R6, R6, R6
- EOR R7, R7, R7
- MOVM.IA.W [R0-R7], (R12)
- MOVM.IA [R0-R7], (R12)
- MOVW 4(R13), g
- RET
diff --git a/src/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/src/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
index 1682eda45f..32a9cef6bb 100644
--- a/src/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
+++ b/src/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl
+// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl
package poly1305
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
new file mode 100644
index 0000000000..fe9feb7980
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
@@ -0,0 +1,11 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build riscv64
+
+package cpu
+
+const cacheLineSize = 32
+
+func doinit() {}
diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt
index 88ea5f1e56..0944c9a533 100644
--- a/src/vendor/modules.txt
+++ b/src/vendor/modules.txt
@@ -1,4 +1,4 @@
-# golang.org/x/crypto v0.0.0-20191111213947-16651526fdb4
+# golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d
## explicit
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
@@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/subtle
golang.org/x/crypto/poly1305
-# golang.org/x/net v0.0.0-20191105084925-a882066a44e0
+# golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933
## explicit
golang.org/x/net/dns/dnsmessage
golang.org/x/net/http/httpguts