aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Chase <drchase@google.com>2022-03-07 18:27:14 -0500
committerDavid Chase <drchase@google.com>2022-03-07 18:27:14 -0500
commitf49279383901af656e40f225eb67ff62f507e13d (patch)
tree456dc18ec82112c48d73ef2a3f369e15dc84ed05 /src
parent768804dfdd81b04a1184c86e538634872b907149 (diff)
parentc9b60632ebb08a428a9bd15a89798a693667cb05 (diff)
downloadgo-f49279383901af656e40f225eb67ff62f507e13d.tar.xz
[dev.boringcrypto] all: merge master into dev.boringcrypto
Change-Id: I4e09d4f2cc77c4c2dc12f1ff40d8c36053ab7ab6
Diffstat (limited to 'src')
-rw-r--r--src/builtin/builtin.go6
-rw-r--r--src/bytes/bytes.go4
-rw-r--r--src/cmd/compile/abi-internal.md2
-rw-r--r--src/cmd/compile/internal/base/base.go3
-rw-r--r--src/cmd/compile/internal/base/debug.go1
-rw-r--r--src/cmd/compile/internal/base/flag.go2
-rw-r--r--src/cmd/compile/internal/escape/call.go9
-rw-r--r--src/cmd/compile/internal/escape/escape.go4
-rw-r--r--src/cmd/compile/internal/gc/main.go12
-rw-r--r--src/cmd/compile/internal/gc/obj.go8
-rw-r--r--src/cmd/compile/internal/importer/iimport.go22
-rw-r--r--src/cmd/compile/internal/importer/support.go15
-rw-r--r--src/cmd/compile/internal/importer/ureader.go (renamed from src/cmd/compile/internal/noder/reader2.go)274
-rw-r--r--src/cmd/compile/internal/ir/copy.go2
-rw-r--r--src/cmd/compile/internal/ir/expr.go5
-rw-r--r--src/cmd/compile/internal/ir/fmt.go52
-rw-r--r--src/cmd/compile/internal/ir/func.go9
-rw-r--r--src/cmd/compile/internal/ir/name.go20
-rw-r--r--src/cmd/compile/internal/ir/node.go10
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go123
-rw-r--r--src/cmd/compile/internal/ir/op_string.go313
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/ir/type.go116
-rw-r--r--src/cmd/compile/internal/noder/codes.go65
-rw-r--r--src/cmd/compile/internal/noder/decoder.go302
-rw-r--r--src/cmd/compile/internal/noder/encoder.go285
-rw-r--r--src/cmd/compile/internal/noder/expr.go53
-rw-r--r--src/cmd/compile/internal/noder/helpers.go89
-rw-r--r--src/cmd/compile/internal/noder/import.go175
-rw-r--r--src/cmd/compile/internal/noder/irgen.go9
-rw-r--r--src/cmd/compile/internal/noder/linker.go174
-rw-r--r--src/cmd/compile/internal/noder/noder.go1444
-rw-r--r--src/cmd/compile/internal/noder/quirks.go369
-rw-r--r--src/cmd/compile/internal/noder/reader.go408
-rw-r--r--src/cmd/compile/internal/noder/stencil.go28
-rw-r--r--src/cmd/compile/internal/noder/sync.go187
-rw-r--r--src/cmd/compile/internal/noder/syncmarker_string.go156
-rw-r--r--src/cmd/compile/internal/noder/transform.go6
-rw-r--r--src/cmd/compile/internal/noder/unified.go110
-rw-r--r--src/cmd/compile/internal/noder/unified_test.go160
-rw-r--r--src/cmd/compile/internal/noder/writer.go542
-rw-r--r--src/cmd/compile/internal/ppc64/ggen.go7
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go10
-rw-r--r--src/cmd/compile/internal/ssa/debug_lines_test.go4
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules4
-rw-r--r--src/cmd/compile/internal/ssa/poset.go2
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go10
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go32
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go1
-rw-r--r--src/cmd/compile/internal/test/testdata/ptrsort.go2
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go131
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go21
-rw-r--r--src/cmd/compile/internal/typecheck/func.go14
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go20
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go11
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go96
-rw-r--r--src/cmd/compile/internal/typecheck/type.go126
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go52
-rw-r--r--src/cmd/compile/internal/types/fmt.go33
-rw-r--r--src/cmd/compile/internal/types/universe.go4
-rw-r--r--src/cmd/compile/internal/types2/api.go10
-rw-r--r--src/cmd/compile/internal/types2/api_test.go68
-rw-r--r--src/cmd/compile/internal/types2/assignments.go13
-rw-r--r--src/cmd/compile/internal/types2/builtins.go28
-rw-r--r--src/cmd/compile/internal/types2/call.go8
-rw-r--r--src/cmd/compile/internal/types2/check.go58
-rw-r--r--src/cmd/compile/internal/types2/compilersupport.go8
-rw-r--r--src/cmd/compile/internal/types2/conversions.go5
-rw-r--r--src/cmd/compile/internal/types2/decl.go2
-rw-r--r--src/cmd/compile/internal/types2/errors.go11
-rw-r--r--src/cmd/compile/internal/types2/expr.go14
-rw-r--r--src/cmd/compile/internal/types2/index.go6
-rw-r--r--src/cmd/compile/internal/types2/infer.go205
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go2
-rw-r--r--src/cmd/compile/internal/types2/interface.go28
-rw-r--r--src/cmd/compile/internal/types2/lookup.go29
-rw-r--r--src/cmd/compile/internal/types2/named.go32
-rw-r--r--src/cmd/compile/internal/types2/predicates.go15
-rw-r--r--src/cmd/compile/internal/types2/signature.go139
-rw-r--r--src/cmd/compile/internal/types2/stmt.go32
-rw-r--r--src/cmd/compile/internal/types2/subst.go16
-rw-r--r--src/cmd/compile/internal/types2/termlist.go9
-rw-r--r--src/cmd/compile/internal/types2/termlist_test.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.go26
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.src8
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/funcinference.go214
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinference.go212
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeparams.go212
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/inference.go218
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/methods.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/types.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/typesets.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go212
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go225
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go247
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go18
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go218
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2164
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go229
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go225
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go246
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go13
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go224
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go17
-rw-r--r--src/cmd/compile/internal/types2/type.go14
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go22
-rw-r--r--src/cmd/compile/internal/types2/typeset.go5
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go22
-rw-r--r--src/cmd/compile/internal/types2/unify.go91
-rw-r--r--src/cmd/compile/internal/types2/validtype.go2
-rw-r--r--src/cmd/compile/internal/walk/closure.go12
-rw-r--r--src/cmd/compile/internal/walk/complit.go2
-rw-r--r--src/cmd/compile/internal/walk/order.go9
-rw-r--r--src/cmd/compile/internal/walk/select.go25
-rw-r--r--src/cmd/dist/buildtool.go1
-rw-r--r--src/cmd/dist/test.go23
-rw-r--r--src/cmd/go/alldocs.go45
-rw-r--r--src/cmd/go/internal/base/flag.go7
-rw-r--r--src/cmd/go/internal/cfg/cfg.go6
-rw-r--r--src/cmd/go/internal/envcmd/env.go4
-rw-r--r--src/cmd/go/internal/help/helpdoc.go8
-rw-r--r--src/cmd/go/internal/list/list.go99
-rw-r--r--src/cmd/go/internal/load/pkg.go22
-rw-r--r--src/cmd/go/internal/modcmd/download.go1
-rw-r--r--src/cmd/go/internal/modcmd/graph.go1
-rw-r--r--src/cmd/go/internal/modcmd/verify.go1
-rw-r--r--src/cmd/go/internal/modcmd/why.go1
-rw-r--r--src/cmd/go/internal/modfetch/coderepo.go53
-rw-r--r--src/cmd/go/internal/modfetch/coderepo_test.go48
-rw-r--r--src/cmd/go/internal/modfetch/fetch.go11
-rw-r--r--src/cmd/go/internal/modload/import.go18
-rw-r--r--src/cmd/go/internal/modload/init.go37
-rw-r--r--src/cmd/go/internal/modload/load.go30
-rw-r--r--src/cmd/go/internal/modload/modfile.go2
-rw-r--r--src/cmd/go/internal/run/run.go6
-rw-r--r--src/cmd/go/internal/test/testflag.go1
-rw-r--r--src/cmd/go/internal/version/version.go9
-rw-r--r--src/cmd/go/internal/vet/vet.go2
-rw-r--r--src/cmd/go/internal/work/build.go9
-rw-r--r--src/cmd/go/internal/work/exec.go1
-rw-r--r--src/cmd/go/internal/workcmd/edit.go9
-rw-r--r--src/cmd/go/internal/workcmd/init.go13
-rw-r--r--src/cmd/go/internal/workcmd/sync.go6
-rw-r--r--src/cmd/go/internal/workcmd/use.go149
-rw-r--r--src/cmd/go/internal/workcmd/work.go8
-rw-r--r--src/cmd/go/testdata/script/build_internal.txt2
-rw-r--r--src/cmd/go/testdata/script/list_json_fields.txt52
-rw-r--r--src/cmd/go/testdata/script/mod_download_partial.txt11
-rw-r--r--src/cmd/go/testdata/script/mod_fs_patterns.txt6
-rw-r--r--src/cmd/go/testdata/script/mod_list_dir.txt2
-rw-r--r--src/cmd/go/testdata/script/mod_list_replace_dir.txt2
-rw-r--r--src/cmd/go/testdata/script/run_issue51125.txt54
-rw-r--r--src/cmd/go/testdata/script/run_work_versioned.txt16
-rw-r--r--src/cmd/go/testdata/script/test_fuzz_return.txt19
-rw-r--r--src/cmd/go/testdata/script/test_relative_cmdline.txt4
-rw-r--r--src/cmd/go/testdata/script/work.txt12
-rw-r--r--src/cmd/go/testdata/script/work_edit.txt13
-rw-r--r--src/cmd/go/testdata/script/work_env.txt4
-rw-r--r--src/cmd/go/testdata/script/work_gowork.txt24
-rw-r--r--src/cmd/go/testdata/script/work_init_gowork.txt19
-rw-r--r--src/cmd/go/testdata/script/work_init_path.txt17
-rw-r--r--src/cmd/go/testdata/script/work_issue51204.txt57
-rw-r--r--src/cmd/go/testdata/script/work_module_not_in_go_work.txt25
-rw-r--r--src/cmd/go/testdata/script/work_nowork.txt10
-rw-r--r--src/cmd/go/testdata/script/work_replace_conflict.txt6
-rw-r--r--src/cmd/go/testdata/script/work_use.txt10
-rw-r--r--src/cmd/go/testdata/script/work_use_deleted.txt22
-rw-r--r--src/cmd/go/testdata/script/work_use_dot.txt30
-rw-r--r--src/cmd/go/testdata/script/work_use_noargs.txt11
-rw-r--r--src/cmd/go/testdata/script/work_vet.txt19
-rw-r--r--src/cmd/go/testdata/script/work_workfile.txt21
-rw-r--r--src/cmd/gofmt/gofmt.go128
-rw-r--r--src/cmd/internal/obj/link.go1
-rw-r--r--src/cmd/internal/obj/objfile.go1
-rw-r--r--src/cmd/internal/obj/ppc64/asm9.go8
-rw-r--r--src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go1
-rw-r--r--src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s1
-rw-r--r--src/cmd/internal/objabi/funcdata.go1
-rw-r--r--src/cmd/internal/src/pos.go5
-rw-r--r--src/cmd/internal/src/pos_test.go14
-rw-r--r--src/cmd/link/internal/ld/dwarf.go5
-rw-r--r--src/cmd/link/internal/ld/symtab.go1
-rw-r--r--src/compress/gzip/gunzip.go4
-rw-r--r--src/compress/gzip/gunzip_test.go80
-rw-r--r--src/crypto/aes/asm_amd64.s12
-rw-r--r--src/crypto/aes/asm_ppc64le.s225
-rw-r--r--src/crypto/aes/cbc_ppc64le.go71
-rw-r--r--src/crypto/cipher/cbc.go22
-rw-r--r--src/crypto/cipher/export_test.go2
-rw-r--r--src/crypto/cipher/fuzz_test.go103
-rw-r--r--src/crypto/ed25519/ed25519.go25
-rw-r--r--src/crypto/ed25519/internal/edwards25519/edwards25519.go6
-rw-r--r--src/crypto/ed25519/internal/edwards25519/field/fe.go40
-rw-r--r--src/crypto/ed25519/internal/edwards25519/field/fe_alias_test.go18
-rw-r--r--src/crypto/ed25519/internal/edwards25519/field/fe_generic.go2
-rw-r--r--src/crypto/ed25519/internal/edwards25519/field/fe_test.go16
-rw-r--r--src/crypto/ed25519/internal/edwards25519/scalar.go25
-rw-r--r--src/crypto/ed25519/internal/edwards25519/scalar_test.go6
-rw-r--r--src/crypto/rand/eagain.go27
-rw-r--r--src/crypto/rand/rand_plan9.go109
-rw-r--r--src/crypto/rand/rand_unix.go120
-rw-r--r--src/crypto/sha256/sha256block_decl.go2
-rw-r--r--src/crypto/sha256/sha256block_generic.go2
-rw-r--r--src/crypto/sha256/sha256block_ppc64x.s (renamed from src/crypto/sha256/sha256block_ppc64le.s)84
-rw-r--r--src/crypto/sha512/sha512block_decl.go2
-rw-r--r--src/crypto/sha512/sha512block_generic.go2
-rw-r--r--src/crypto/sha512/sha512block_ppc64x.s (renamed from src/crypto/sha512/sha512block_ppc64le.s)80
-rw-r--r--src/crypto/x509/internal/macos/corefoundation.go8
-rw-r--r--src/database/sql/fakedb_test.go3
-rw-r--r--src/database/sql/sql_test.go31
-rw-r--r--src/debug/buildinfo/buildinfo.go4
-rw-r--r--src/debug/buildinfo/buildinfo_test.go10
-rw-r--r--src/encoding/binary/binary.go73
-rw-r--r--src/encoding/binary/binary_test.go114
-rw-r--r--src/encoding/json/encode.go6
-rw-r--r--src/encoding/xml/marshal.go2
-rw-r--r--src/encoding/xml/marshal_test.go36
-rw-r--r--src/go/build/deps_test.go2
-rw-r--r--src/go/doc/reader.go1
-rw-r--r--src/go/doc/testdata/b.0.golden3
-rw-r--r--src/go/doc/testdata/b.1.golden6
-rw-r--r--src/go/doc/testdata/b.2.golden3
-rw-r--r--src/go/doc/testdata/b.go6
-rw-r--r--src/go/internal/gcimporter/iimport.go23
-rw-r--r--src/go/parser/parser.go161
-rw-r--r--src/go/parser/short_test.go14
-rw-r--r--src/go/parser/testdata/issue49482.go235
-rw-r--r--src/go/parser/testdata/typeparams.src2
-rw-r--r--src/go/printer/nodes.go28
-rw-r--r--src/go/printer/testdata/generics.golden26
-rw-r--r--src/go/printer/testdata/generics.input26
-rw-r--r--src/go/token/token.go5
-rw-r--r--src/go/types/api.go10
-rw-r--r--src/go/types/api_test.go64
-rw-r--r--src/go/types/assignments.go13
-rw-r--r--src/go/types/builtins.go28
-rw-r--r--src/go/types/call.go9
-rw-r--r--src/go/types/check.go71
-rw-r--r--src/go/types/conversions.go5
-rw-r--r--src/go/types/decl.go2
-rw-r--r--src/go/types/errorcodes.go7
-rw-r--r--src/go/types/errors.go11
-rw-r--r--src/go/types/expr.go14
-rw-r--r--src/go/types/index.go5
-rw-r--r--src/go/types/infer.go205
-rw-r--r--src/go/types/instantiate.go2
-rw-r--r--src/go/types/interface.go28
-rw-r--r--src/go/types/lookup.go29
-rw-r--r--src/go/types/named.go32
-rw-r--r--src/go/types/predicates.go15
-rw-r--r--src/go/types/signature.go144
-rw-r--r--src/go/types/stmt.go27
-rw-r--r--src/go/types/subst.go16
-rw-r--r--src/go/types/termlist.go9
-rw-r--r--src/go/types/termlist_test.go29
-rw-r--r--src/go/types/testdata/check/builtins.go26
-rw-r--r--src/go/types/testdata/check/builtins.src8
-rw-r--r--src/go/types/testdata/check/funcinference.go212
-rw-r--r--src/go/types/testdata/check/typeinference.go212
-rw-r--r--src/go/types/testdata/check/typeparams.go212
-rw-r--r--src/go/types/testdata/examples/inference.go218
-rw-r--r--src/go/types/testdata/examples/methods.go22
-rw-r--r--src/go/types/testdata/examples/types.go24
-rw-r--r--src/go/types/testdata/examples/typesets.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue43527.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue43671.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue45548.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue47115.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue48619.go211
-rw-r--r--src/go/types/testdata/fixedbugs/issue48656.go212
-rw-r--r--src/go/types/testdata/fixedbugs/issue49482.go228
-rw-r--r--src/go/types/testdata/fixedbugs/issue49735.go211
-rw-r--r--src/go/types/testdata/fixedbugs/issue50417.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue50755.go247
-rw-r--r--src/go/types/testdata/fixedbugs/issue51145.go18
-rw-r--r--src/go/types/testdata/fixedbugs/issue51158.go218
-rw-r--r--src/go/types/testdata/fixedbugs/issue51229.go2164
-rw-r--r--src/go/types/testdata/fixedbugs/issue51232.go229
-rw-r--r--src/go/types/testdata/fixedbugs/issue51233.go225
-rw-r--r--src/go/types/testdata/fixedbugs/issue51257.go246
-rw-r--r--src/go/types/testdata/fixedbugs/issue51335.go216
-rw-r--r--src/go/types/testdata/fixedbugs/issue51339.go216
-rw-r--r--src/go/types/testdata/fixedbugs/issue51360.go13
-rw-r--r--src/go/types/testdata/fixedbugs/issue51376.go224
-rw-r--r--src/go/types/testdata/fixedbugs/issue51386.go217
-rw-r--r--src/go/types/testdata/fixedbugs/issue51437.go17
-rw-r--r--src/go/types/type.go14
-rw-r--r--src/go/types/typeparam.go22
-rw-r--r--src/go/types/typeset.go5
-rw-r--r--src/go/types/typexpr.go22
-rw-r--r--src/go/types/unify.go91
-rw-r--r--src/go/types/validtype.go2
-rw-r--r--src/internal/cfg/cfg.go1
-rw-r--r--src/internal/goversion/goversion.go2
-rw-r--r--src/internal/pkgbits/codes.go60
-rw-r--r--src/internal/pkgbits/decoder.go336
-rw-r--r--src/internal/pkgbits/encoder.go287
-rw-r--r--src/internal/pkgbits/frames_go1.go (renamed from src/cmd/compile/internal/noder/frames_go1.go)2
-rw-r--r--src/internal/pkgbits/frames_go17.go (renamed from src/cmd/compile/internal/noder/frames_go17.go)2
-rw-r--r--src/internal/pkgbits/reloc.go (renamed from src/cmd/compile/internal/noder/reloc.go)38
-rw-r--r--src/internal/pkgbits/support.go17
-rw-r--r--src/internal/pkgbits/sync.go125
-rw-r--r--src/internal/pkgbits/syncmarker_string.go87
-rw-r--r--src/math/big/intmarsh.go5
-rw-r--r--src/math/big/intmarsh_test.go13
-rw-r--r--src/net/dnsclient_unix.go19
-rw-r--r--src/net/dnsclient_unix_test.go57
-rw-r--r--src/net/http/transport.go6
-rw-r--r--src/net/http/transport_internal_test.go9
-rw-r--r--src/net/lookup_test.go69
-rw-r--r--src/net/net.go12
-rw-r--r--src/net/net_test.go64
-rw-r--r--src/net/smtp/auth.go3
-rw-r--r--src/os/example_test.go22
-rw-r--r--src/os/os_test.go40
-rw-r--r--src/reflect/all_test.go29
-rw-r--r--src/reflect/asm_arm64.s14
-rw-r--r--src/reflect/value.go26
-rw-r--r--src/regexp/regexp.go2
-rw-r--r--src/regexp/syntax/parse.go72
-rw-r--r--src/regexp/syntax/parse_test.go7
-rw-r--r--src/runtime/asm_386.s33
-rw-r--r--src/runtime/crash_cgo_test.go14
-rw-r--r--src/runtime/crash_unix_test.go2
-rw-r--r--src/runtime/debug/mod.go157
-rw-r--r--src/runtime/debug/mod_test.go75
-rw-r--r--src/runtime/defs1_netbsd_386.go1
-rw-r--r--src/runtime/defs1_netbsd_amd64.go1
-rw-r--r--src/runtime/defs1_netbsd_arm.go1
-rw-r--r--src/runtime/defs1_netbsd_arm64.go1
-rw-r--r--src/runtime/defs1_solaris_amd64.go1
-rw-r--r--src/runtime/defs_dragonfly.go1
-rw-r--r--src/runtime/defs_dragonfly_amd64.go1
-rw-r--r--src/runtime/defs_freebsd.go1
-rw-r--r--src/runtime/defs_freebsd_386.go1
-rw-r--r--src/runtime/defs_freebsd_amd64.go1
-rw-r--r--src/runtime/defs_freebsd_arm.go1
-rw-r--r--src/runtime/defs_freebsd_arm64.go1
-rw-r--r--src/runtime/defs_linux.go3
-rw-r--r--src/runtime/defs_linux_386.go3
-rw-r--r--src/runtime/defs_linux_amd64.go3
-rw-r--r--src/runtime/defs_linux_arm.go2
-rw-r--r--src/runtime/defs_linux_arm64.go3
-rw-r--r--src/runtime/defs_linux_mips64x.go3
-rw-r--r--src/runtime/defs_linux_mipsx.go3
-rw-r--r--src/runtime/defs_linux_ppc64.go3
-rw-r--r--src/runtime/defs_linux_ppc64le.go3
-rw-r--r--src/runtime/defs_linux_riscv64.go3
-rw-r--r--src/runtime/defs_linux_s390x.go3
-rw-r--r--src/runtime/defs_netbsd.go1
-rw-r--r--src/runtime/defs_openbsd.go1
-rw-r--r--src/runtime/defs_openbsd_386.go1
-rw-r--r--src/runtime/defs_openbsd_amd64.go1
-rw-r--r--src/runtime/defs_openbsd_arm.go1
-rw-r--r--src/runtime/defs_openbsd_arm64.go1
-rw-r--r--src/runtime/defs_openbsd_mips64.go1
-rw-r--r--src/runtime/defs_solaris.go1
-rw-r--r--src/runtime/export_aix_test.go1
-rw-r--r--src/runtime/export_darwin_test.go2
-rw-r--r--src/runtime/export_pipe2_test.go6
-rw-r--r--src/runtime/export_test.go20
-rw-r--r--src/runtime/export_unix_test.go1
-rw-r--r--src/runtime/funcdata.h1
-rw-r--r--src/runtime/histogram.go52
-rw-r--r--src/runtime/histogram_test.go40
-rw-r--r--src/runtime/internal/atomic/atomic_arm.s42
-rw-r--r--src/runtime/internal/syscall/asm_linux_386.s34
-rw-r--r--src/runtime/internal/syscall/asm_linux_amd64.s33
-rw-r--r--src/runtime/internal/syscall/asm_linux_arm.s32
-rw-r--r--src/runtime/internal/syscall/asm_linux_arm64.s29
-rw-r--r--src/runtime/internal/syscall/asm_linux_mips64x.s29
-rw-r--r--src/runtime/internal/syscall/asm_linux_mipsx.s34
-rw-r--r--src/runtime/internal/syscall/asm_linux_ppc64x.s28
-rw-r--r--src/runtime/internal/syscall/asm_linux_riscv64.s29
-rw-r--r--src/runtime/internal/syscall/asm_linux_s390x.s28
-rw-r--r--src/runtime/internal/syscall/syscall_linux.go12
-rw-r--r--src/runtime/memmove_ppc64x.s46
-rw-r--r--src/runtime/mfinal.go24
-rw-r--r--src/runtime/mfinal_test.go9
-rw-r--r--src/runtime/mgcpacer.go72
-rw-r--r--src/runtime/mgcpacer_test.go45
-rw-r--r--src/runtime/mgcscavenge.go58
-rw-r--r--src/runtime/mkpreempt.go41
-rw-r--r--src/runtime/nbpipe_pipe2.go13
-rw-r--r--src/runtime/nbpipe_pipe_test.go38
-rw-r--r--src/runtime/nbpipe_test.go25
-rw-r--r--src/runtime/os3_solaris.go22
-rw-r--r--src/runtime/os_aix.go9
-rw-r--r--src/runtime/os_darwin.go9
-rw-r--r--src/runtime/os_dragonfly.go11
-rw-r--r--src/runtime/os_freebsd.go11
-rw-r--r--src/runtime/os_linux.go214
-rw-r--r--src/runtime/os_netbsd.go11
-rw-r--r--src/runtime/os_openbsd.go9
-rw-r--r--src/runtime/os_openbsd_syscall2.go2
-rw-r--r--src/runtime/pprof/pprof_test.go67
-rw-r--r--src/runtime/preempt_arm64.s178
-rw-r--r--src/runtime/proc.go304
-rw-r--r--src/runtime/proc_runtime_test.go17
-rw-r--r--src/runtime/race_arm64.s18
-rw-r--r--src/runtime/runtime-gdb_test.go8
-rw-r--r--src/runtime/runtime2.go19
-rw-r--r--src/runtime/signal_unix.go16
-rw-r--r--src/runtime/sigqueue.go48
-rw-r--r--src/runtime/sigqueue_plan9.go7
-rw-r--r--src/runtime/symtab.go1
-rw-r--r--src/runtime/sys_darwin.go56
-rw-r--r--src/runtime/sys_darwin_amd64.s9
-rw-r--r--src/runtime/sys_darwin_arm64.s9
-rw-r--r--src/runtime/sys_dragonfly_amd64.s30
-rw-r--r--src/runtime/sys_freebsd_386.s32
-rw-r--r--src/runtime/sys_freebsd_amd64.s30
-rw-r--r--src/runtime/sys_freebsd_arm.s32
-rw-r--r--src/runtime/sys_freebsd_arm64.s26
-rw-r--r--src/runtime/sys_linux_386.s24
-rw-r--r--src/runtime/sys_linux_amd64.s24
-rw-r--r--src/runtime/sys_linux_arm.s23
-rw-r--r--src/runtime/sys_linux_arm64.s168
-rw-r--r--src/runtime/sys_linux_mips64x.s26
-rw-r--r--src/runtime/sys_linux_mipsx.s33
-rw-r--r--src/runtime/sys_linux_ppc64x.s20
-rw-r--r--src/runtime/sys_linux_riscv64.s24
-rw-r--r--src/runtime/sys_linux_s390x.s24
-rw-r--r--src/runtime/sys_netbsd_386.s32
-rw-r--r--src/runtime/sys_netbsd_amd64.s30
-rw-r--r--src/runtime/sys_netbsd_arm.s28
-rw-r--r--src/runtime/sys_netbsd_arm64.s24
-rw-r--r--src/runtime/sys_openbsd2.go10
-rw-r--r--src/runtime/sys_openbsd_mips64.s26
-rw-r--r--src/runtime/testdata/testprogcgo/aprof.go2
-rw-r--r--src/runtime/trace.go18
-rw-r--r--src/runtime/traceback.go6
-rw-r--r--src/sort/gen_sort_variants.go526
-rw-r--r--src/sort/genzfunc.go127
-rw-r--r--src/sort/sort.go337
-rw-r--r--src/sort/zfuncversion.go265
-rw-r--r--src/sort/zsortfunc.go342
-rw-r--r--src/sort/zsortinterface.go342
-rw-r--r--src/strings/builder.go7
-rw-r--r--src/strings/strings.go4
-rw-r--r--src/syscall/asm_linux_ppc64x.s10
-rw-r--r--src/syscall/syscall_linux.go90
-rw-r--r--src/syscall/syscall_linux_386.go14
-rw-r--r--src/syscall/syscall_linux_amd64.go7
-rw-r--r--src/syscall/syscall_linux_arm.go7
-rw-r--r--src/syscall/syscall_linux_arm64.go7
-rw-r--r--src/syscall/syscall_linux_mips64x.go7
-rw-r--r--src/syscall/syscall_linux_mipsx.go7
-rw-r--r--src/syscall/syscall_linux_ppc64x.go7
-rw-r--r--src/syscall/syscall_linux_riscv64.go7
-rw-r--r--src/syscall/syscall_linux_s390x.go14
-rw-r--r--src/syscall/syscall_linux_test.go71
-rw-r--r--src/syscall/syscall_unix_test.go21
-rw-r--r--src/syscall/zsyscall_linux_amd64.go11
-rw-r--r--src/syscall/zsyscall_linux_arm.go11
-rw-r--r--src/syscall/zsyscall_linux_arm64.go11
-rw-r--r--src/syscall/zsyscall_linux_mips.go11
-rw-r--r--src/syscall/zsyscall_linux_mips64.go11
-rw-r--r--src/syscall/zsyscall_linux_mips64le.go11
-rw-r--r--src/syscall/zsyscall_linux_mipsle.go11
-rw-r--r--src/syscall/zsyscall_linux_ppc64.go11
-rw-r--r--src/syscall/zsyscall_linux_ppc64le.go11
-rw-r--r--src/syscall/zsyscall_linux_riscv64.go11
-rw-r--r--src/testing/fuzz.go3
-rw-r--r--src/testing/testing.go2
-rw-r--r--src/testing/testing_other.go6
-rw-r--r--src/testing/testing_windows.go22
-rw-r--r--src/time/format.go1
-rw-r--r--src/unicode/utf8/utf8.go5
-rw-r--r--src/unicode/utf8/utf8_test.go58
480 files changed, 10141 insertions, 8854 deletions
diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go
index 08ae7ed313..5657be4564 100644
--- a/src/builtin/builtin.go
+++ b/src/builtin/builtin.go
@@ -95,11 +95,11 @@ type rune = int32
type any = interface{}
// comparable is an interface that is implemented by all comparable types
-// (booleans, numbers, strings, pointers, channels, interfaces,
-// arrays of comparable types, structs whose fields are all comparable types).
+// (booleans, numbers, strings, pointers, channels, arrays of comparable types,
+// structs whose fields are all comparable types).
// The comparable interface may only be used as a type parameter constraint,
// not as the type of a variable.
-type comparable comparable
+type comparable interface{ comparable }
// iota is a predeclared identifier representing the untyped integer ordinal
// number of the current const specification in a (usually parenthesized)
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index 6fdaa49c73..41323ad549 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -372,6 +372,8 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
+//
+// To split around the first instance of a separator, see Cut.
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into subslices after each instance of sep and
@@ -389,6 +391,8 @@ func SplitAfterN(s, sep []byte, n int) [][]byte {
// the subslices between those separators.
// If sep is empty, Split splits after each UTF-8 sequence.
// It is equivalent to SplitN with a count of -1.
+//
+// To split around the first instance of a separator, see Cut.
func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all subslices after each instance of sep and
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
index 7fe4463665..53eaa84d54 100644
--- a/src/cmd/compile/abi-internal.md
+++ b/src/cmd/compile/abi-internal.md
@@ -155,7 +155,7 @@ as follows:
1. Remember I and FP.
1. If T has zero size, add T to the stack sequence S and return.
1. Try to register-assign V.
-1. If step 2 failed, reset I and FP to the values from step 1, add T
+1. If step 3 failed, reset I and FP to the values from step 1, add T
to the stack sequence S, and assign V to this field in S.
Register-assignment of a value V of underlying type T works as follows:
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
index be6d49fac7..39ce8e66f7 100644
--- a/src/cmd/compile/internal/base/base.go
+++ b/src/cmd/compile/internal/base/base.go
@@ -62,8 +62,9 @@ func Compiling(pkgs []string) bool {
// at best instrumentation would cause infinite recursion.
var NoInstrumentPkgs = []string{
"runtime/internal/atomic",
- "runtime/internal/sys",
"runtime/internal/math",
+ "runtime/internal/sys",
+ "runtime/internal/syscall",
"runtime",
"runtime/race",
"runtime/msan",
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index b105e46e35..80b2ff5bd6 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -39,7 +39,6 @@ type DebugFlags struct {
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
Unified int `help:"enable unified IR construction"`
- UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
MayMoreStack string `help:"call named function before all stack growth checks"`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index d78f93b343..6377091ce0 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -55,7 +55,6 @@ type CmdFlags struct {
C CountFlag "help:\"disable printing of columns in error messages\""
D string "help:\"set relative `path` for local imports\""
E CountFlag "help:\"debug symbol export\""
- G CountFlag "help:\"accept generic code\""
I func(string) "help:\"add `directory` to import search path\""
K CountFlag "help:\"debug missing line numbers\""
L CountFlag "help:\"show full file names in error messages\""
@@ -141,7 +140,6 @@ type CmdFlags struct {
// ParseFlags parses the command-line flags into Flag.
func ParseFlags() {
- Flag.G = 3
Flag.I = addImportDir
Flag.LowerC = 1
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
index d1215afca8..ee76adb0fa 100644
--- a/src/cmd/compile/internal/escape/call.go
+++ b/src/cmd/compile/internal/escape/call.go
@@ -238,6 +238,15 @@ func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
fn.SetWrapper(true)
fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
fn.Body = []ir.Node{call}
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ // If the callee is a named function, link to the original callee.
+ x := call.X
+ if x.Op() == ir.ONAME && x.(*ir.Name).Class == ir.PFUNC {
+ fn.WrappedFunc = call.X.(*ir.Name).Func
+ } else if x.Op() == ir.OMETHEXPR && ir.MethodExprFunc(x).Nname != nil {
+ fn.WrappedFunc = ir.MethodExprName(x).Func
+ }
+ }
clo := fn.OClosure
if n.Op() == ir.OGO {
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index c2145bdf91..bc6f7c93bb 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
@@ -243,6 +244,9 @@ func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
if !n.Byval() {
n.SetAddrtaken(true)
+ if n.Sym().Name == typecheck.LocalDictName {
+ base.FatalfAt(n.Pos(), "dictionary variable not captured by value")
+ }
}
if base.Flag.LowerM > 1 {
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 4c4a724cdf..5a9a889894 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -32,7 +32,6 @@ import (
"log"
"os"
"runtime"
- "sort"
)
// handlePanic ensures that we print out an "internal compiler error" for any panic
@@ -205,17 +204,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
// removal can skew the results (e.g., #43444).
pkginit.MakeInit()
- // Stability quirk: sort top-level declarations, so we're not
- // sensitive to the order that functions are added. In particular,
- // the order that noder+typecheck add function closures is very
- // subtle, and not important to reproduce.
- if base.Debug.UnifiedQuirks != 0 {
- s := typecheck.Target.Decls
- sort.SliceStable(s, func(i, j int) bool {
- return s[i].Pos().Before(s[j].Pos())
- })
- }
-
// Eliminate some obviously dead code.
// Must happen after typechecking.
for _, n := range typecheck.Target.Decls {
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index dcb54047f1..74e4c0a890 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -217,6 +217,10 @@ func dumpGlobalConst(n ir.Node) {
if ir.ConstOverflow(v, t) {
return
}
+ } else {
+ // If the type of the constant is an instantiated generic, we need to emit
+ // that type so the linker knows about it. See issue 51245.
+ _ = reflectdata.TypeLinksym(t)
}
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
}
@@ -263,6 +267,10 @@ func addGCLocals() {
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
x.Set(obj.AttrStatic, true)
}
+ if x := fn.WrapInfo; x != nil && !x.OnList() {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
}
}
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index a827987a48..bed4fbb016 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -180,6 +180,14 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
p.doDecl(localpkg, name)
}
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the 'P' case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
// record all referenced packages as imports
list := append(([]*types2.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
@@ -191,6 +199,11 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
return localpkg, nil
}
+type setConstraintArgs struct {
+ t *types2.TypeParam
+ constraint types2.Type
+}
+
type iimporter struct {
exportVersion int64
ipath string
@@ -206,6 +219,9 @@ type iimporter struct {
tparamIndex map[ident]*types2.TypeParam
interfaceList []*types2.Interface
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
}
func (p *iimporter) doDecl(pkg *types2.Package, name string) {
@@ -401,7 +417,11 @@ func (r *importReader) obj(name string) {
}
iface.MarkImplicit()
}
- t.SetConstraint(constraint)
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
case 'V':
typ := r.typ()
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index 9377d99779..e382b2f28b 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -7,12 +7,17 @@
package importer
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/types2"
"fmt"
"go/token"
"sync"
)
+func assert(p bool) {
+ base.Assert(p)
+}
+
func errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
@@ -132,3 +137,13 @@ type anyType struct{}
func (t anyType) Underlying() types2.Type { return t }
func (t anyType) String() string { return "any" }
+
+type derivedInfo struct {
+ idx int
+ needed bool
+}
+
+type typeInfo struct {
+ idx int
+ derived bool
+}
diff --git a/src/cmd/compile/internal/noder/reader2.go b/src/cmd/compile/internal/importer/ureader.go
index c028d21c67..a22cd2bb53 100644
--- a/src/cmd/compile/internal/noder/reader2.go
+++ b/src/cmd/compile/internal/importer/ureader.go
@@ -4,17 +4,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package noder
+package importer
import (
"cmd/compile/internal/base"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"cmd/internal/src"
+ "internal/pkgbits"
)
-type pkgReader2 struct {
- pkgDecoder
+type pkgReader struct {
+ pkgbits.PkgDecoder
ctxt *types2.Context
imports map[string]*types2.Package
@@ -24,46 +25,46 @@ type pkgReader2 struct {
typs []types2.Type
}
-func readPackage2(ctxt *types2.Context, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
- pr := pkgReader2{
- pkgDecoder: input,
+func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pkgbits.PkgDecoder) *types2.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
ctxt: ctxt,
imports: imports,
- posBases: make([]*syntax.PosBase, input.numElems(relocPosBase)),
- pkgs: make([]*types2.Package, input.numElems(relocPkg)),
- typs: make([]types2.Type, input.numElems(relocType)),
+ posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types2.Type, input.NumElems(pkgbits.RelocType)),
}
- r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
- r.bool() // has init
+ r.Bool() // has init
- for i, n := 0, r.len(); i < n; i++ {
+ for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
// to avoid eager loading of imports.
- r.sync(syncObject)
- assert(!r.bool())
- r.p.objIdx(r.reloc(relocObj))
- assert(r.len() == 0)
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
}
- r.sync(syncEOF)
+ r.Sync(pkgbits.SyncEOF)
pkg.MarkComplete()
return pkg
}
-type reader2 struct {
- decoder
+type reader struct {
+ pkgbits.Decoder
- p *pkgReader2
+ p *pkgReader
- dict *reader2Dict
+ dict *readerDict
}
-type reader2Dict struct {
+type readerDict struct {
bounds []typeInfo
tparams []*types2.TypeParam
@@ -72,53 +73,53 @@ type reader2Dict struct {
derivedTypes []types2.Type
}
-type reader2TypeBound struct {
+type readerTypeBound struct {
derived bool
boundIdx int
}
-func (pr *pkgReader2) newReader(k reloc, idx int, marker syncMarker) *reader2 {
- return &reader2{
- decoder: pr.newDecoder(k, idx, marker),
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx int, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
p: pr,
}
}
// @@@ Positions
-func (r *reader2) pos() syntax.Pos {
- r.sync(syncPos)
- if !r.bool() {
+func (r *reader) pos() syntax.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
return syntax.Pos{}
}
// TODO(mdempsky): Delta encoding.
posBase := r.posBase()
- line := r.uint()
- col := r.uint()
+ line := r.Uint()
+ col := r.Uint()
return syntax.MakePos(posBase, line, col)
}
-func (r *reader2) posBase() *syntax.PosBase {
- return r.p.posBaseIdx(r.reloc(relocPosBase))
+func (r *reader) posBase() *syntax.PosBase {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
}
-func (pr *pkgReader2) posBaseIdx(idx int) *syntax.PosBase {
+func (pr *pkgReader) posBaseIdx(idx int) *syntax.PosBase {
if b := pr.posBases[idx]; b != nil {
return b
}
- r := pr.newReader(relocPosBase, idx, syncPosBase)
+ r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
var b *syntax.PosBase
- filename := r.string()
+ filename := r.String()
- if r.bool() {
+ if r.Bool() {
b = syntax.NewTrimmedFileBase(filename, true)
} else {
pos := r.pos()
- line := r.uint()
- col := r.uint()
+ line := r.Uint()
+ col := r.Uint()
b = syntax.NewLineBase(pos, filename, true, line, col)
}
@@ -128,45 +129,45 @@ func (pr *pkgReader2) posBaseIdx(idx int) *syntax.PosBase {
// @@@ Packages
-func (r *reader2) pkg() *types2.Package {
- r.sync(syncPkg)
- return r.p.pkgIdx(r.reloc(relocPkg))
+func (r *reader) pkg() *types2.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
}
-func (pr *pkgReader2) pkgIdx(idx int) *types2.Package {
+func (pr *pkgReader) pkgIdx(idx int) *types2.Package {
// TODO(mdempsky): Consider using some non-nil pointer to indicate
// the universe scope, so we don't need to keep re-reading it.
if pkg := pr.pkgs[idx]; pkg != nil {
return pkg
}
- pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
pr.pkgs[idx] = pkg
return pkg
}
-func (r *reader2) doPkg() *types2.Package {
- path := r.string()
+func (r *reader) doPkg() *types2.Package {
+ path := r.String()
if path == "builtin" {
return nil // universe
}
if path == "" {
- path = r.p.pkgPath
+ path = r.p.PkgPath()
}
if pkg := r.p.imports[path]; pkg != nil {
return pkg
}
- name := r.string()
- height := r.len()
+ name := r.String()
+ height := r.Len()
pkg := types2.NewPackageHeight(path, name, height)
r.p.imports[path] = pkg
// TODO(mdempsky): The list of imported packages is important for
// go/types, but we could probably skip populating it for types2.
- imports := make([]*types2.Package, r.len())
+ imports := make([]*types2.Package, r.Len())
for i := range imports {
imports[i] = r.pkg()
}
@@ -177,19 +178,19 @@ func (r *reader2) doPkg() *types2.Package {
// @@@ Types
-func (r *reader2) typ() types2.Type {
+func (r *reader) typ() types2.Type {
return r.p.typIdx(r.typInfo(), r.dict)
}
-func (r *reader2) typInfo() typeInfo {
- r.sync(syncType)
- if r.bool() {
- return typeInfo{idx: r.len(), derived: true}
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: r.Len(), derived: true}
}
- return typeInfo{idx: r.reloc(relocType), derived: false}
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
}
-func (pr *pkgReader2) typIdx(info typeInfo, dict *reader2Dict) types2.Type {
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types2.Type {
idx := info.idx
var where *types2.Type
if info.derived {
@@ -203,7 +204,7 @@ func (pr *pkgReader2) typIdx(info typeInfo, dict *reader2Dict) types2.Type {
return typ
}
- r := pr.newReader(relocType, idx, syncTypeIdx)
+ r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
r.dict = dict
typ := r.doTyp()
@@ -218,16 +219,16 @@ func (pr *pkgReader2) typIdx(info typeInfo, dict *reader2Dict) types2.Type {
return typ
}
-func (r *reader2) doTyp() (res types2.Type) {
- switch tag := codeType(r.code(syncType)); tag {
+func (r *reader) doTyp() (res types2.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
default:
base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag)
panic("unreachable")
- case typeBasic:
- return types2.Typ[r.len()]
+ case pkgbits.TypeBasic:
+ return types2.Typ[r.Len()]
- case typeNamed:
+ case pkgbits.TypeNamed:
obj, targs := r.obj()
name := obj.(*types2.TypeName)
if len(targs) != 0 {
@@ -236,41 +237,41 @@ func (r *reader2) doTyp() (res types2.Type) {
}
return name.Type()
- case typeTypeParam:
- return r.dict.tparams[r.len()]
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
- case typeArray:
- len := int64(r.uint64())
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
return types2.NewArray(r.typ(), len)
- case typeChan:
- dir := types2.ChanDir(r.len())
+ case pkgbits.TypeChan:
+ dir := types2.ChanDir(r.Len())
return types2.NewChan(dir, r.typ())
- case typeMap:
+ case pkgbits.TypeMap:
return types2.NewMap(r.typ(), r.typ())
- case typePointer:
+ case pkgbits.TypePointer:
return types2.NewPointer(r.typ())
- case typeSignature:
+ case pkgbits.TypeSignature:
return r.signature(nil, nil, nil)
- case typeSlice:
+ case pkgbits.TypeSlice:
return types2.NewSlice(r.typ())
- case typeStruct:
+ case pkgbits.TypeStruct:
return r.structType()
- case typeInterface:
+ case pkgbits.TypeInterface:
return r.interfaceType()
- case typeUnion:
+ case pkgbits.TypeUnion:
return r.unionType()
}
}
-func (r *reader2) structType() *types2.Struct {
- fields := make([]*types2.Var, r.len())
+func (r *reader) structType() *types2.Struct {
+ fields := make([]*types2.Var, r.Len())
var tags []string
for i := range fields {
pos := r.pos()
pkg, name := r.selector()
ftyp := r.typ()
- tag := r.string()
- embedded := r.bool()
+ tag := r.String()
+ embedded := r.Bool()
fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded)
if tag != "" {
@@ -283,17 +284,18 @@ func (r *reader2) structType() *types2.Struct {
return types2.NewStruct(fields, tags)
}
-func (r *reader2) unionType() *types2.Union {
- terms := make([]*types2.Term, r.len())
+func (r *reader) unionType() *types2.Union {
+ terms := make([]*types2.Term, r.Len())
for i := range terms {
- terms[i] = types2.NewTerm(r.bool(), r.typ())
+ terms[i] = types2.NewTerm(r.Bool(), r.typ())
}
return types2.NewUnion(terms)
}
-func (r *reader2) interfaceType() *types2.Interface {
- methods := make([]*types2.Func, r.len())
- embeddeds := make([]types2.Type, r.len())
+func (r *reader) interfaceType() *types2.Interface {
+ methods := make([]*types2.Func, r.Len())
+ embeddeds := make([]types2.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
for i := range methods {
pos := r.pos()
@@ -306,30 +308,34 @@ func (r *reader2) interfaceType() *types2.Interface {
embeddeds[i] = r.typ()
}
- return types2.NewInterfaceType(methods, embeddeds)
+ iface := types2.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+ return iface
}
-func (r *reader2) signature(recv *types2.Var, rtparams, tparams []*types2.TypeParam) *types2.Signature {
- r.sync(syncSignature)
+func (r *reader) signature(recv *types2.Var, rtparams, tparams []*types2.TypeParam) *types2.Signature {
+ r.Sync(pkgbits.SyncSignature)
params := r.params()
results := r.params()
- variadic := r.bool()
+ variadic := r.Bool()
return types2.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
}
-func (r *reader2) params() *types2.Tuple {
- r.sync(syncParams)
- params := make([]*types2.Var, r.len())
+func (r *reader) params() *types2.Tuple {
+ r.Sync(pkgbits.SyncParams)
+ params := make([]*types2.Var, r.Len())
for i := range params {
params[i] = r.param()
}
return types2.NewTuple(params...)
}
-func (r *reader2) param() *types2.Var {
- r.sync(syncParam)
+func (r *reader) param() *types2.Var {
+ r.Sync(pkgbits.SyncParam)
pos := r.pos()
pkg, name := r.localIdent()
@@ -340,15 +346,15 @@ func (r *reader2) param() *types2.Var {
// @@@ Objects
-func (r *reader2) obj() (types2.Object, []types2.Type) {
- r.sync(syncObject)
+func (r *reader) obj() (types2.Object, []types2.Type) {
+ r.Sync(pkgbits.SyncObject)
- assert(!r.bool())
+ assert(!r.Bool())
- pkg, name := r.p.objIdx(r.reloc(relocObj))
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
obj := pkg.Scope().Lookup(name)
- targs := make([]types2.Type, r.len())
+ targs := make([]types2.Type, r.Len())
for i := range targs {
targs[i] = r.typ()
}
@@ -356,47 +362,47 @@ func (r *reader2) obj() (types2.Object, []types2.Type) {
return obj, targs
}
-func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
- rname := pr.newReader(relocName, idx, syncObject1)
+func (pr *pkgReader) objIdx(idx int) (*types2.Package, string) {
+ rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
objPkg, objName := rname.qualifiedIdent()
assert(objName != "")
- tag := codeObj(rname.code(syncCodeObj))
+ tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
- if tag == objStub {
+ if tag == pkgbits.ObjStub {
assert(objPkg == nil || objPkg == types2.Unsafe)
return objPkg, objName
}
- dict := pr.objDictIdx(idx)
+ objPkg.Scope().InsertLazy(objName, func() types2.Object {
+ dict := pr.objDictIdx(idx)
- r := pr.newReader(relocObj, idx, syncObject1)
- r.dict = dict
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
- objPkg.Scope().InsertLazy(objName, func() types2.Object {
switch tag {
default:
panic("weird")
- case objAlias:
+ case pkgbits.ObjAlias:
pos := r.pos()
typ := r.typ()
return types2.NewTypeName(pos, objPkg, objName, typ)
- case objConst:
+ case pkgbits.ObjConst:
pos := r.pos()
typ := r.typ()
- val := r.value()
+ val := r.Value()
return types2.NewConst(pos, objPkg, objName, typ, val)
- case objFunc:
+ case pkgbits.ObjFunc:
pos := r.pos()
tparams := r.typeParamNames()
sig := r.signature(nil, nil, tparams)
return types2.NewFunc(pos, objPkg, objName, sig)
- case objType:
+ case pkgbits.ObjType:
pos := r.pos()
return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeParam, underlying types2.Type, methods []*types2.Func) {
@@ -408,7 +414,7 @@ func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
// about it, so maybe we can avoid worrying about that here.
underlying = r.typ().Underlying()
- methods = make([]*types2.Func, r.len())
+ methods = make([]*types2.Func, r.Len())
for i := range methods {
methods[i] = r.method()
}
@@ -416,7 +422,7 @@ func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
return
})
- case objVar:
+ case pkgbits.ObjVar:
pos := r.pos()
typ := r.typ()
return types2.NewVar(pos, objPkg, objName, typ)
@@ -426,37 +432,37 @@ func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
return objPkg, objName
}
-func (pr *pkgReader2) objDictIdx(idx int) *reader2Dict {
- r := pr.newReader(relocObjDict, idx, syncObject1)
+func (pr *pkgReader) objDictIdx(idx int) *readerDict {
+ r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
- var dict reader2Dict
+ var dict readerDict
- if implicits := r.len(); implicits != 0 {
+ if implicits := r.Len(); implicits != 0 {
base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits)
}
- dict.bounds = make([]typeInfo, r.len())
+ dict.bounds = make([]typeInfo, r.Len())
for i := range dict.bounds {
dict.bounds[i] = r.typInfo()
}
- dict.derived = make([]derivedInfo, r.len())
+ dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types2.Type, len(dict.derived))
for i := range dict.derived {
- dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
}
- // function references follow, but reader2 doesn't need those
+ // function references follow, but reader doesn't need those
return &dict
}
-func (r *reader2) typeParamNames() []*types2.TypeParam {
- r.sync(syncTypeParamNames)
+func (r *reader) typeParamNames() []*types2.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
// Note: This code assumes it only processes objects without
// implement type parameters. This is currently fine, because
- // reader2 is only used to read in exported declarations, which are
+ // reader is only used to read in exported declarations, which are
// always package scoped.
if len(r.dict.bounds) == 0 {
@@ -484,8 +490,8 @@ func (r *reader2) typeParamNames() []*types2.TypeParam {
return r.dict.tparams
}
-func (r *reader2) method() *types2.Func {
- r.sync(syncMethod)
+func (r *reader) method() *types2.Func {
+ r.Sync(pkgbits.SyncMethod)
pos := r.pos()
pkg, name := r.selector()
@@ -496,11 +502,11 @@ func (r *reader2) method() *types2.Func {
return types2.NewFunc(pos, pkg, name, sig)
}
-func (r *reader2) qualifiedIdent() (*types2.Package, string) { return r.ident(syncSym) }
-func (r *reader2) localIdent() (*types2.Package, string) { return r.ident(syncLocalIdent) }
-func (r *reader2) selector() (*types2.Package, string) { return r.ident(syncSelector) }
+func (r *reader) qualifiedIdent() (*types2.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types2.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types2.Package, string) { return r.ident(pkgbits.SyncSelector) }
-func (r *reader2) ident(marker syncMarker) (*types2.Package, string) {
- r.sync(marker)
- return r.pkg(), r.string()
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types2.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
}
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
index 7da9b24940..be57a8fbc6 100644
--- a/src/cmd/compile/internal/ir/copy.go
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -79,7 +79,7 @@ func DeepCopy(pos src.XPos, n Node) Node {
var edit func(Node) Node
edit = func(x Node) Node {
switch x.Op() {
- case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
+ case ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
return x
}
x = Copy(x)
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index 68303c0581..156fe96493 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -202,7 +202,10 @@ type CompLitExpr struct {
Ntype Ntype
List Nodes // initialized values
Prealloc *Name
- Len int64 // backing array length for OSLICELIT
+ // For OSLICELIT, Len is the backing array length.
+ // For OMAPLIT, Len is the number of entries that we've removed from List and
+ // generated explicit mapassign calls for. This is used to inform the map alloc hint.
+ Len int64
}
func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index 033188547b..12a463c8a4 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -202,7 +202,6 @@ var OpPrec = []int{
ONIL: 8,
ONONAME: 8,
OOFFSETOF: 8,
- OPACK: 8,
OPANIC: 8,
OPAREN: 8,
OPRINTN: 8,
@@ -213,13 +212,7 @@ var OpPrec = []int{
OSTR2BYTES: 8,
OSTR2RUNES: 8,
OSTRUCTLIT: 8,
- OTARRAY: 8,
- OTSLICE: 8,
- OTCHAN: 8,
OTFUNC: 8,
- OTINTER: 8,
- OTMAP: 8,
- OTSTRUCT: 8,
OTYPE: 8,
OUNSAFEADD: 8,
OUNSAFESLICE: 8,
@@ -640,7 +633,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
fallthrough
- case OPACK, ONONAME:
+ case ONONAME:
fmt.Fprint(s, n.Sym())
case OLINKSYMOFFSET:
@@ -654,49 +647,6 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
fmt.Fprintf(s, "%v", n.Type())
- case OTSLICE:
- n := n.(*SliceType)
- if n.DDD {
- fmt.Fprintf(s, "...%v", n.Elem)
- } else {
- fmt.Fprintf(s, "[]%v", n.Elem) // happens before typecheck
- }
-
- case OTARRAY:
- n := n.(*ArrayType)
- if n.Len == nil {
- fmt.Fprintf(s, "[...]%v", n.Elem)
- } else {
- fmt.Fprintf(s, "[%v]%v", n.Len, n.Elem)
- }
-
- case OTMAP:
- n := n.(*MapType)
- fmt.Fprintf(s, "map[%v]%v", n.Key, n.Elem)
-
- case OTCHAN:
- n := n.(*ChanType)
- switch n.Dir {
- case types.Crecv:
- fmt.Fprintf(s, "<-chan %v", n.Elem)
-
- case types.Csend:
- fmt.Fprintf(s, "chan<- %v", n.Elem)
-
- default:
- if n.Elem != nil && n.Elem.Op() == OTCHAN && n.Elem.(*ChanType).Dir == types.Crecv {
- fmt.Fprintf(s, "chan (%v)", n.Elem)
- } else {
- fmt.Fprintf(s, "chan %v", n.Elem)
- }
- }
-
- case OTSTRUCT:
- fmt.Fprint(s, "<struct>")
-
- case OTINTER:
- fmt.Fprint(s, "<inter>")
-
case OTFUNC:
fmt.Fprint(s, "<func>")
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 41c96079f7..29c77444a2 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -31,8 +31,7 @@ import (
// using a special data structure passed in a register.
//
// A method declaration is represented like functions, except f.Sym
-// will be the qualified method name (e.g., "T.m") and
-// f.Func.Shortname is the bare method name (e.g., "m").
+// will be the qualified method name (e.g., "T.m").
//
// A method expression (T.M) is represented as an OMETHEXPR node,
// in which n.Left and n.Right point to the type and method, respectively.
@@ -56,8 +55,6 @@ type Func struct {
Nname *Name // ONAME node
OClosure *ClosureExpr // OCLOSURE node
- Shortname *types.Sym
-
// Extra entry code for the function. For example, allocate and initialize
// memory for escaping parameters.
Enter Nodes
@@ -133,6 +130,10 @@ type Func struct {
// function for go:nowritebarrierrec analysis. Only filled in
// if nowritebarrierrecCheck != nil.
NWBRCalls *[]SymAndPos
+
+ // For wrapper functions, WrappedFunc point to the original Func.
+ // Currently only used for go/defer wrappers.
+ WrappedFunc *Func
}
func NewFunc(pos src.XPos) *Func {
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index 1d4110c73c..f522d3e76a 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -48,7 +48,6 @@ type Name struct {
Opt interface{} // for use by escape analysis
Embed *[]Embed // list of embedded files, for ONAME var
- PkgName *PkgName // real package for import . names
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the outer captured variable.
// For the case-local variables of a type switch, the type switch guard (OTYPESW).
@@ -536,22 +535,3 @@ type Embed struct {
Pos src.XPos
Patterns []string
}
-
-// A Pack is an identifier referring to an imported package.
-type PkgName struct {
- miniNode
- sym *types.Sym
- Pkg *types.Pkg
- Used bool
-}
-
-func (p *PkgName) Sym() *types.Sym { return p.sym }
-
-func (*PkgName) CanBeNtype() {}
-
-func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
- p := &PkgName{sym: sym, Pkg: pkg}
- p.op = OPACK
- p.pos = pos
- return p
-}
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index 5fdccf8927..e4cff85136 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -118,7 +118,6 @@ const (
// Also used for a qualified package identifier that hasn't been resolved yet.
ONONAME
OTYPE // type name
- OPACK // import
OLITERAL // literal
ONIL // nil
@@ -291,15 +290,10 @@ const (
OFUNCINST // instantiation of a generic function
// types
- OTCHAN // chan int
- OTMAP // map[string]int
- OTSTRUCT // struct{}
- OTINTER // interface{}
// OTFUNC: func() - Recv is receiver field, Params is list of param fields, Results is
// list of result fields.
+ // TODO(mdempsky): Remove.
OTFUNC
- OTARRAY // [8]int or [...]int
- OTSLICE // []int
// misc
// intermediate representation of an inlined call. Uses Init (assignments
@@ -533,7 +527,7 @@ func HasNamedResults(fn *Func) bool {
// their usage position.
func HasUniquePos(n Node) bool {
switch n.Op() {
- case ONAME, OPACK:
+ case ONAME:
return false
case OLITERAL, ONIL, OTYPE:
if n.Sym() != nil {
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index 44988880c8..22ff885d68 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -59,29 +59,6 @@ func (n *AddrExpr) editChildren(edit func(Node) Node) {
}
}
-func (n *ArrayType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *ArrayType) copy() Node {
- c := *n
- return &c
-}
-func (n *ArrayType) doChildren(do func(Node) bool) bool {
- if n.Len != nil && do(n.Len) {
- return true
- }
- if n.Elem != nil && do(n.Elem) {
- return true
- }
- return false
-}
-func (n *ArrayType) editChildren(edit func(Node) Node) {
- if n.Len != nil {
- n.Len = edit(n.Len).(Node)
- }
- if n.Elem != nil {
- n.Elem = edit(n.Elem).(Ntype)
- }
-}
-
func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *AssignListStmt) copy() Node {
c := *n
@@ -309,23 +286,6 @@ func (n *CaseClause) editChildren(edit func(Node) Node) {
editNodes(n.Body, edit)
}
-func (n *ChanType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *ChanType) copy() Node {
- c := *n
- return &c
-}
-func (n *ChanType) doChildren(do func(Node) bool) bool {
- if n.Elem != nil && do(n.Elem) {
- return true
- }
- return false
-}
-func (n *ChanType) editChildren(edit func(Node) Node) {
- if n.Elem != nil {
- n.Elem = edit(n.Elem).(Ntype)
- }
-}
-
func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ClosureExpr) copy() Node {
c := *n
@@ -752,22 +712,6 @@ func (n *InstExpr) editChildren(edit func(Node) Node) {
editNodes(n.Targs, edit)
}
-func (n *InterfaceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *InterfaceType) copy() Node {
- c := *n
- c.Methods = copyFields(c.Methods)
- return &c
-}
-func (n *InterfaceType) doChildren(do func(Node) bool) bool {
- if doFields(n.Methods, do) {
- return true
- }
- return false
-}
-func (n *InterfaceType) editChildren(edit func(Node) Node) {
- editFields(n.Methods, edit)
-}
-
func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *KeyExpr) copy() Node {
c := *n
@@ -884,29 +828,6 @@ func (n *MakeExpr) editChildren(edit func(Node) Node) {
}
}
-func (n *MapType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *MapType) copy() Node {
- c := *n
- return &c
-}
-func (n *MapType) doChildren(do func(Node) bool) bool {
- if n.Key != nil && do(n.Key) {
- return true
- }
- if n.Elem != nil && do(n.Elem) {
- return true
- }
- return false
-}
-func (n *MapType) editChildren(edit func(Node) Node) {
- if n.Key != nil {
- n.Key = edit(n.Key).(Ntype)
- }
- if n.Elem != nil {
- n.Elem = edit(n.Elem).(Ntype)
- }
-}
-
func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
@@ -947,17 +868,6 @@ func (n *ParenExpr) editChildren(edit func(Node) Node) {
}
}
-func (n *PkgName) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *PkgName) copy() Node {
- c := *n
- return &c
-}
-func (n *PkgName) doChildren(do func(Node) bool) bool {
- return false
-}
-func (n *PkgName) editChildren(edit func(Node) Node) {
-}
-
func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *RangeStmt) copy() Node {
c := *n
@@ -1212,23 +1122,6 @@ func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
}
}
-func (n *SliceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *SliceType) copy() Node {
- c := *n
- return &c
-}
-func (n *SliceType) doChildren(do func(Node) bool) bool {
- if n.Elem != nil && do(n.Elem) {
- return true
- }
- return false
-}
-func (n *SliceType) editChildren(edit func(Node) Node) {
- if n.Elem != nil {
- n.Elem = edit(n.Elem).(Ntype)
- }
-}
-
func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *StarExpr) copy() Node {
c := *n
@@ -1273,22 +1166,6 @@ func (n *StructKeyExpr) editChildren(edit func(Node) Node) {
}
}
-func (n *StructType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *StructType) copy() Node {
- c := *n
- c.Fields = copyFields(c.Fields)
- return &c
-}
-func (n *StructType) doChildren(do func(Node) bool) bool {
- if doFields(n.Fields, do) {
- return true
- }
- return false
-}
-func (n *StructType) editChildren(edit func(Node) Node) {
- editFields(n.Fields, edit)
-}
-
func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *SwitchStmt) copy() Node {
c := *n
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index b8cee71818..f623735f6d 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -12,169 +12,162 @@ func _() {
_ = x[ONAME-1]
_ = x[ONONAME-2]
_ = x[OTYPE-3]
- _ = x[OPACK-4]
- _ = x[OLITERAL-5]
- _ = x[ONIL-6]
- _ = x[OADD-7]
- _ = x[OSUB-8]
- _ = x[OOR-9]
- _ = x[OXOR-10]
- _ = x[OADDSTR-11]
- _ = x[OADDR-12]
- _ = x[OANDAND-13]
- _ = x[OAPPEND-14]
- _ = x[OBYTES2STR-15]
- _ = x[OBYTES2STRTMP-16]
- _ = x[ORUNES2STR-17]
- _ = x[OSTR2BYTES-18]
- _ = x[OSTR2BYTESTMP-19]
- _ = x[OSTR2RUNES-20]
- _ = x[OSLICE2ARRPTR-21]
- _ = x[OAS-22]
- _ = x[OAS2-23]
- _ = x[OAS2DOTTYPE-24]
- _ = x[OAS2FUNC-25]
- _ = x[OAS2MAPR-26]
- _ = x[OAS2RECV-27]
- _ = x[OASOP-28]
- _ = x[OCALL-29]
- _ = x[OCALLFUNC-30]
- _ = x[OCALLMETH-31]
- _ = x[OCALLINTER-32]
- _ = x[OCAP-33]
- _ = x[OCLOSE-34]
- _ = x[OCLOSURE-35]
- _ = x[OCOMPLIT-36]
- _ = x[OMAPLIT-37]
- _ = x[OSTRUCTLIT-38]
- _ = x[OARRAYLIT-39]
- _ = x[OSLICELIT-40]
- _ = x[OPTRLIT-41]
- _ = x[OCONV-42]
- _ = x[OCONVIFACE-43]
- _ = x[OCONVIDATA-44]
- _ = x[OCONVNOP-45]
- _ = x[OCOPY-46]
- _ = x[ODCL-47]
- _ = x[ODCLFUNC-48]
- _ = x[ODCLCONST-49]
- _ = x[ODCLTYPE-50]
- _ = x[ODELETE-51]
- _ = x[ODOT-52]
- _ = x[ODOTPTR-53]
- _ = x[ODOTMETH-54]
- _ = x[ODOTINTER-55]
- _ = x[OXDOT-56]
- _ = x[ODOTTYPE-57]
- _ = x[ODOTTYPE2-58]
- _ = x[OEQ-59]
- _ = x[ONE-60]
- _ = x[OLT-61]
- _ = x[OLE-62]
- _ = x[OGE-63]
- _ = x[OGT-64]
- _ = x[ODEREF-65]
- _ = x[OINDEX-66]
- _ = x[OINDEXMAP-67]
- _ = x[OKEY-68]
- _ = x[OSTRUCTKEY-69]
- _ = x[OLEN-70]
- _ = x[OMAKE-71]
- _ = x[OMAKECHAN-72]
- _ = x[OMAKEMAP-73]
- _ = x[OMAKESLICE-74]
- _ = x[OMAKESLICECOPY-75]
- _ = x[OMUL-76]
- _ = x[ODIV-77]
- _ = x[OMOD-78]
- _ = x[OLSH-79]
- _ = x[ORSH-80]
- _ = x[OAND-81]
- _ = x[OANDNOT-82]
- _ = x[ONEW-83]
- _ = x[ONOT-84]
- _ = x[OBITNOT-85]
- _ = x[OPLUS-86]
- _ = x[ONEG-87]
- _ = x[OOROR-88]
- _ = x[OPANIC-89]
- _ = x[OPRINT-90]
- _ = x[OPRINTN-91]
- _ = x[OPAREN-92]
- _ = x[OSEND-93]
- _ = x[OSLICE-94]
- _ = x[OSLICEARR-95]
- _ = x[OSLICESTR-96]
- _ = x[OSLICE3-97]
- _ = x[OSLICE3ARR-98]
- _ = x[OSLICEHEADER-99]
- _ = x[ORECOVER-100]
- _ = x[ORECOVERFP-101]
- _ = x[ORECV-102]
- _ = x[ORUNESTR-103]
- _ = x[OSELRECV2-104]
- _ = x[OIOTA-105]
- _ = x[OREAL-106]
- _ = x[OIMAG-107]
- _ = x[OCOMPLEX-108]
- _ = x[OALIGNOF-109]
- _ = x[OOFFSETOF-110]
- _ = x[OSIZEOF-111]
- _ = x[OUNSAFEADD-112]
- _ = x[OUNSAFESLICE-113]
- _ = x[OMETHEXPR-114]
- _ = x[OMETHVALUE-115]
- _ = x[OBLOCK-116]
- _ = x[OBREAK-117]
- _ = x[OCASE-118]
- _ = x[OCONTINUE-119]
- _ = x[ODEFER-120]
- _ = x[OFALL-121]
- _ = x[OFOR-122]
- _ = x[OFORUNTIL-123]
- _ = x[OGOTO-124]
- _ = x[OIF-125]
- _ = x[OLABEL-126]
- _ = x[OGO-127]
- _ = x[ORANGE-128]
- _ = x[ORETURN-129]
- _ = x[OSELECT-130]
- _ = x[OSWITCH-131]
- _ = x[OTYPESW-132]
- _ = x[OFUNCINST-133]
- _ = x[OTCHAN-134]
- _ = x[OTMAP-135]
- _ = x[OTSTRUCT-136]
- _ = x[OTINTER-137]
- _ = x[OTFUNC-138]
- _ = x[OTARRAY-139]
- _ = x[OTSLICE-140]
- _ = x[OINLCALL-141]
- _ = x[OEFACE-142]
- _ = x[OITAB-143]
- _ = x[OIDATA-144]
- _ = x[OSPTR-145]
- _ = x[OCFUNC-146]
- _ = x[OCHECKNIL-147]
- _ = x[OVARDEF-148]
- _ = x[OVARKILL-149]
- _ = x[OVARLIVE-150]
- _ = x[ORESULT-151]
- _ = x[OINLMARK-152]
- _ = x[OLINKSYMOFFSET-153]
- _ = x[ODYNAMICDOTTYPE-154]
- _ = x[ODYNAMICDOTTYPE2-155]
- _ = x[ODYNAMICTYPE-156]
- _ = x[OTAILCALL-157]
- _ = x[OGETG-158]
- _ = x[OGETCALLERPC-159]
- _ = x[OGETCALLERSP-160]
- _ = x[OEND-161]
+ _ = x[OLITERAL-4]
+ _ = x[ONIL-5]
+ _ = x[OADD-6]
+ _ = x[OSUB-7]
+ _ = x[OOR-8]
+ _ = x[OXOR-9]
+ _ = x[OADDSTR-10]
+ _ = x[OADDR-11]
+ _ = x[OANDAND-12]
+ _ = x[OAPPEND-13]
+ _ = x[OBYTES2STR-14]
+ _ = x[OBYTES2STRTMP-15]
+ _ = x[ORUNES2STR-16]
+ _ = x[OSTR2BYTES-17]
+ _ = x[OSTR2BYTESTMP-18]
+ _ = x[OSTR2RUNES-19]
+ _ = x[OSLICE2ARRPTR-20]
+ _ = x[OAS-21]
+ _ = x[OAS2-22]
+ _ = x[OAS2DOTTYPE-23]
+ _ = x[OAS2FUNC-24]
+ _ = x[OAS2MAPR-25]
+ _ = x[OAS2RECV-26]
+ _ = x[OASOP-27]
+ _ = x[OCALL-28]
+ _ = x[OCALLFUNC-29]
+ _ = x[OCALLMETH-30]
+ _ = x[OCALLINTER-31]
+ _ = x[OCAP-32]
+ _ = x[OCLOSE-33]
+ _ = x[OCLOSURE-34]
+ _ = x[OCOMPLIT-35]
+ _ = x[OMAPLIT-36]
+ _ = x[OSTRUCTLIT-37]
+ _ = x[OARRAYLIT-38]
+ _ = x[OSLICELIT-39]
+ _ = x[OPTRLIT-40]
+ _ = x[OCONV-41]
+ _ = x[OCONVIFACE-42]
+ _ = x[OCONVIDATA-43]
+ _ = x[OCONVNOP-44]
+ _ = x[OCOPY-45]
+ _ = x[ODCL-46]
+ _ = x[ODCLFUNC-47]
+ _ = x[ODCLCONST-48]
+ _ = x[ODCLTYPE-49]
+ _ = x[ODELETE-50]
+ _ = x[ODOT-51]
+ _ = x[ODOTPTR-52]
+ _ = x[ODOTMETH-53]
+ _ = x[ODOTINTER-54]
+ _ = x[OXDOT-55]
+ _ = x[ODOTTYPE-56]
+ _ = x[ODOTTYPE2-57]
+ _ = x[OEQ-58]
+ _ = x[ONE-59]
+ _ = x[OLT-60]
+ _ = x[OLE-61]
+ _ = x[OGE-62]
+ _ = x[OGT-63]
+ _ = x[ODEREF-64]
+ _ = x[OINDEX-65]
+ _ = x[OINDEXMAP-66]
+ _ = x[OKEY-67]
+ _ = x[OSTRUCTKEY-68]
+ _ = x[OLEN-69]
+ _ = x[OMAKE-70]
+ _ = x[OMAKECHAN-71]
+ _ = x[OMAKEMAP-72]
+ _ = x[OMAKESLICE-73]
+ _ = x[OMAKESLICECOPY-74]
+ _ = x[OMUL-75]
+ _ = x[ODIV-76]
+ _ = x[OMOD-77]
+ _ = x[OLSH-78]
+ _ = x[ORSH-79]
+ _ = x[OAND-80]
+ _ = x[OANDNOT-81]
+ _ = x[ONEW-82]
+ _ = x[ONOT-83]
+ _ = x[OBITNOT-84]
+ _ = x[OPLUS-85]
+ _ = x[ONEG-86]
+ _ = x[OOROR-87]
+ _ = x[OPANIC-88]
+ _ = x[OPRINT-89]
+ _ = x[OPRINTN-90]
+ _ = x[OPAREN-91]
+ _ = x[OSEND-92]
+ _ = x[OSLICE-93]
+ _ = x[OSLICEARR-94]
+ _ = x[OSLICESTR-95]
+ _ = x[OSLICE3-96]
+ _ = x[OSLICE3ARR-97]
+ _ = x[OSLICEHEADER-98]
+ _ = x[ORECOVER-99]
+ _ = x[ORECOVERFP-100]
+ _ = x[ORECV-101]
+ _ = x[ORUNESTR-102]
+ _ = x[OSELRECV2-103]
+ _ = x[OIOTA-104]
+ _ = x[OREAL-105]
+ _ = x[OIMAG-106]
+ _ = x[OCOMPLEX-107]
+ _ = x[OALIGNOF-108]
+ _ = x[OOFFSETOF-109]
+ _ = x[OSIZEOF-110]
+ _ = x[OUNSAFEADD-111]
+ _ = x[OUNSAFESLICE-112]
+ _ = x[OMETHEXPR-113]
+ _ = x[OMETHVALUE-114]
+ _ = x[OBLOCK-115]
+ _ = x[OBREAK-116]
+ _ = x[OCASE-117]
+ _ = x[OCONTINUE-118]
+ _ = x[ODEFER-119]
+ _ = x[OFALL-120]
+ _ = x[OFOR-121]
+ _ = x[OFORUNTIL-122]
+ _ = x[OGOTO-123]
+ _ = x[OIF-124]
+ _ = x[OLABEL-125]
+ _ = x[OGO-126]
+ _ = x[ORANGE-127]
+ _ = x[ORETURN-128]
+ _ = x[OSELECT-129]
+ _ = x[OSWITCH-130]
+ _ = x[OTYPESW-131]
+ _ = x[OFUNCINST-132]
+ _ = x[OTFUNC-133]
+ _ = x[OINLCALL-134]
+ _ = x[OEFACE-135]
+ _ = x[OITAB-136]
+ _ = x[OIDATA-137]
+ _ = x[OSPTR-138]
+ _ = x[OCFUNC-139]
+ _ = x[OCHECKNIL-140]
+ _ = x[OVARDEF-141]
+ _ = x[OVARKILL-142]
+ _ = x[OVARLIVE-143]
+ _ = x[ORESULT-144]
+ _ = x[OINLMARK-145]
+ _ = x[OLINKSYMOFFSET-146]
+ _ = x[ODYNAMICDOTTYPE-147]
+ _ = x[ODYNAMICDOTTYPE2-148]
+ _ = x[ODYNAMICTYPE-149]
+ _ = x[OTAILCALL-150]
+ _ = x[OGETG-151]
+ _ = x[OGETCALLERPC-152]
+ _ = x[OGETCALLERSP-153]
+ _ = x[OEND-154]
}
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
+const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTFUNCINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 208, 213, 220, 227, 233, 242, 250, 258, 264, 268, 277, 286, 293, 297, 300, 307, 315, 322, 328, 331, 337, 344, 352, 356, 363, 371, 373, 375, 377, 379, 381, 383, 388, 393, 401, 404, 413, 416, 420, 428, 435, 444, 457, 460, 463, 466, 469, 472, 475, 481, 484, 487, 493, 497, 500, 504, 509, 514, 520, 525, 529, 534, 542, 550, 556, 565, 576, 583, 592, 596, 603, 611, 615, 619, 623, 630, 637, 645, 651, 660, 671, 679, 688, 693, 698, 702, 710, 715, 719, 722, 730, 734, 736, 741, 743, 748, 754, 760, 766, 772, 780, 785, 789, 796, 802, 807, 813, 819, 826, 831, 835, 840, 844, 849, 857, 863, 870, 877, 883, 890, 903, 917, 932, 943, 951, 955, 966, 977, 980}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 132, 134, 137, 147, 154, 161, 168, 172, 176, 184, 192, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 282, 289, 293, 296, 303, 311, 318, 324, 327, 333, 340, 348, 352, 359, 367, 369, 371, 373, 375, 377, 379, 384, 389, 397, 400, 409, 412, 416, 424, 431, 440, 453, 456, 459, 462, 465, 468, 471, 477, 480, 483, 489, 493, 496, 500, 505, 510, 516, 521, 525, 530, 538, 546, 552, 561, 572, 579, 588, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 656, 667, 675, 684, 689, 694, 698, 706, 711, 715, 718, 726, 730, 732, 737, 739, 744, 750, 756, 762, 768, 776, 781, 788, 793, 797, 802, 806, 811, 819, 825, 832, 839, 845, 852, 865, 879, 894, 905, 913, 917, 928, 939, 942}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index a4421fcf53..fca11ffc7c 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 192, 328},
- {Name{}, 112, 200},
+ {Name{}, 108, 192},
}
for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
index 63dd673dcd..f8aa35da4c 100644
--- a/src/cmd/compile/internal/ir/type.go
+++ b/src/cmd/compile/internal/ir/type.go
@@ -58,81 +58,6 @@ func (n *miniType) setOTYPE(t *types.Type, self Ntype) {
func (n *miniType) Sym() *types.Sym { return nil } // for Format OTYPE
func (n *miniType) Implicit() bool { return false } // for Format OTYPE
-// A ChanType represents a chan Elem syntax with the direction Dir.
-type ChanType struct {
- miniType
- Elem Ntype
- Dir types.ChanDir
-}
-
-func NewChanType(pos src.XPos, elem Ntype, dir types.ChanDir) *ChanType {
- n := &ChanType{Elem: elem, Dir: dir}
- n.op = OTCHAN
- n.pos = pos
- return n
-}
-
-func (n *ChanType) SetOTYPE(t *types.Type) {
- n.setOTYPE(t, n)
- n.Elem = nil
-}
-
-// A MapType represents a map[Key]Value type syntax.
-type MapType struct {
- miniType
- Key Ntype
- Elem Ntype
-}
-
-func NewMapType(pos src.XPos, key, elem Ntype) *MapType {
- n := &MapType{Key: key, Elem: elem}
- n.op = OTMAP
- n.pos = pos
- return n
-}
-
-func (n *MapType) SetOTYPE(t *types.Type) {
- n.setOTYPE(t, n)
- n.Key = nil
- n.Elem = nil
-}
-
-// A StructType represents a struct { ... } type syntax.
-type StructType struct {
- miniType
- Fields []*Field
-}
-
-func NewStructType(pos src.XPos, fields []*Field) *StructType {
- n := &StructType{Fields: fields}
- n.op = OTSTRUCT
- n.pos = pos
- return n
-}
-
-func (n *StructType) SetOTYPE(t *types.Type) {
- n.setOTYPE(t, n)
- n.Fields = nil
-}
-
-// An InterfaceType represents a struct { ... } type syntax.
-type InterfaceType struct {
- miniType
- Methods []*Field
-}
-
-func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType {
- n := &InterfaceType{Methods: methods}
- n.op = OTINTER
- n.pos = pos
- return n
-}
-
-func (n *InterfaceType) SetOTYPE(t *types.Type) {
- n.setOTYPE(t, n)
- n.Methods = nil
-}
-
// A FuncType represents a func(Args) Results type syntax.
type FuncType struct {
miniType
@@ -240,47 +165,6 @@ func editFields(list []*Field, edit func(Node) Node) {
}
}
-// A SliceType represents a []Elem type syntax.
-// If DDD is true, it's the ...Elem at the end of a function list.
-type SliceType struct {
- miniType
- Elem Ntype
- DDD bool
-}
-
-func NewSliceType(pos src.XPos, elem Ntype) *SliceType {
- n := &SliceType{Elem: elem}
- n.op = OTSLICE
- n.pos = pos
- return n
-}
-
-func (n *SliceType) SetOTYPE(t *types.Type) {
- n.setOTYPE(t, n)
- n.Elem = nil
-}
-
-// An ArrayType represents a [Len]Elem type syntax.
-// If Len is nil, the type is a [...]Elem in an array literal.
-type ArrayType struct {
- miniType
- Len Node
- Elem Ntype
-}
-
-func NewArrayType(pos src.XPos, len Node, elem Ntype) *ArrayType {
- n := &ArrayType{Len: len, Elem: elem}
- n.op = OTARRAY
- n.pos = pos
- return n
-}
-
-func (n *ArrayType) SetOTYPE(t *types.Type) {
- n.setOTYPE(t, n)
- n.Len = nil
- n.Elem = nil
-}
-
// A typeNode is a Node wrapper for type t.
type typeNode struct {
miniNode
diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go
index f8cb7729ac..bc0831dd78 100644
--- a/src/cmd/compile/internal/noder/codes.go
+++ b/src/cmd/compile/internal/noder/codes.go
@@ -6,63 +6,12 @@
package noder
-type code interface {
- marker() syncMarker
- value() int
-}
-
-type codeVal int
-
-func (c codeVal) marker() syncMarker { return syncVal }
-func (c codeVal) value() int { return int(c) }
-
-const (
- valBool codeVal = iota
- valString
- valInt64
- valBigInt
- valBigRat
- valBigFloat
-)
-
-type codeType int
-
-func (c codeType) marker() syncMarker { return syncType }
-func (c codeType) value() int { return int(c) }
-
-const (
- typeBasic codeType = iota
- typeNamed
- typePointer
- typeSlice
- typeArray
- typeChan
- typeMap
- typeSignature
- typeStruct
- typeInterface
- typeUnion
- typeTypeParam
-)
-
-type codeObj int
-
-func (c codeObj) marker() syncMarker { return syncCodeObj }
-func (c codeObj) value() int { return int(c) }
-
-const (
- objAlias codeObj = iota
- objConst
- objType
- objFunc
- objVar
- objStub
-)
+import "internal/pkgbits"
type codeStmt int
-func (c codeStmt) marker() syncMarker { return syncStmt1 }
-func (c codeStmt) value() int { return int(c) }
+func (c codeStmt) Marker() pkgbits.SyncMarker { return pkgbits.SyncStmt1 }
+func (c codeStmt) Value() int { return int(c) }
const (
stmtEnd codeStmt = iota
@@ -87,8 +36,8 @@ const (
type codeExpr int
-func (c codeExpr) marker() syncMarker { return syncExpr }
-func (c codeExpr) value() int { return int(c) }
+func (c codeExpr) Marker() pkgbits.SyncMarker { return pkgbits.SyncExpr }
+func (c codeExpr) Value() int { return int(c) }
// TODO(mdempsky): Split expr into addr, for lvalues.
const (
@@ -112,8 +61,8 @@ const (
type codeDecl int
-func (c codeDecl) marker() syncMarker { return syncDecl }
-func (c codeDecl) value() int { return int(c) }
+func (c codeDecl) Marker() pkgbits.SyncMarker { return pkgbits.SyncDecl }
+func (c codeDecl) Value() int { return int(c) }
const (
declEnd codeDecl = iota
diff --git a/src/cmd/compile/internal/noder/decoder.go b/src/cmd/compile/internal/noder/decoder.go
deleted file mode 100644
index 2c18727420..0000000000
--- a/src/cmd/compile/internal/noder/decoder.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// UNREVIEWED
-
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
- "encoding/binary"
- "fmt"
- "go/constant"
- "go/token"
- "math/big"
- "os"
- "runtime"
- "strings"
-
- "cmd/compile/internal/base"
-)
-
-type pkgDecoder struct {
- pkgPath string
-
- elemEndsEnds [numRelocs]uint32
- elemEnds []uint32
- elemData string
-}
-
-func newPkgDecoder(pkgPath, input string) pkgDecoder {
- pr := pkgDecoder{
- pkgPath: pkgPath,
- }
-
- // TODO(mdempsky): Implement direct indexing of input string to
- // avoid copying the position information.
-
- r := strings.NewReader(input)
-
- assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
-
- pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
- assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
-
- pos, err := r.Seek(0, os.SEEK_CUR)
- assert(err == nil)
-
- pr.elemData = input[pos:]
- assert(len(pr.elemData) == int(pr.elemEnds[len(pr.elemEnds)-1]))
-
- return pr
-}
-
-func (pr *pkgDecoder) numElems(k reloc) int {
- count := int(pr.elemEndsEnds[k])
- if k > 0 {
- count -= int(pr.elemEndsEnds[k-1])
- }
- return count
-}
-
-func (pr *pkgDecoder) totalElems() int {
- return len(pr.elemEnds)
-}
-
-func (pr *pkgDecoder) absIdx(k reloc, idx int) int {
- absIdx := idx
- if k > 0 {
- absIdx += int(pr.elemEndsEnds[k-1])
- }
- if absIdx >= int(pr.elemEndsEnds[k]) {
- base.Fatalf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
- }
- return absIdx
-}
-
-func (pr *pkgDecoder) dataIdx(k reloc, idx int) string {
- absIdx := pr.absIdx(k, idx)
-
- var start uint32
- if absIdx > 0 {
- start = pr.elemEnds[absIdx-1]
- }
- end := pr.elemEnds[absIdx]
-
- return pr.elemData[start:end]
-}
-
-func (pr *pkgDecoder) stringIdx(idx int) string {
- return pr.dataIdx(relocString, idx)
-}
-
-func (pr *pkgDecoder) newDecoder(k reloc, idx int, marker syncMarker) decoder {
- r := pr.newDecoderRaw(k, idx)
- r.sync(marker)
- return r
-}
-
-func (pr *pkgDecoder) newDecoderRaw(k reloc, idx int) decoder {
- r := decoder{
- common: pr,
- k: k,
- idx: idx,
- }
-
- // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
- r.data = *strings.NewReader(pr.dataIdx(k, idx))
-
- r.sync(syncRelocs)
- r.relocs = make([]relocEnt, r.len())
- for i := range r.relocs {
- r.sync(syncReloc)
- r.relocs[i] = relocEnt{reloc(r.len()), r.len()}
- }
-
- return r
-}
-
-type decoder struct {
- common *pkgDecoder
-
- relocs []relocEnt
- data strings.Reader
-
- k reloc
- idx int
-}
-
-func (r *decoder) checkErr(err error) {
- if err != nil {
- base.Fatalf("unexpected error: %v", err)
- }
-}
-
-func (r *decoder) rawUvarint() uint64 {
- x, err := binary.ReadUvarint(&r.data)
- r.checkErr(err)
- return x
-}
-
-func (r *decoder) rawVarint() int64 {
- ux := r.rawUvarint()
-
- // Zig-zag decode.
- x := int64(ux >> 1)
- if ux&1 != 0 {
- x = ^x
- }
- return x
-}
-
-func (r *decoder) rawReloc(k reloc, idx int) int {
- e := r.relocs[idx]
- assert(e.kind == k)
- return e.idx
-}
-
-func (r *decoder) sync(mWant syncMarker) {
- if !enableSync {
- return
- }
-
- pos, _ := r.data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
- mHave := syncMarker(r.rawUvarint())
- writerPCs := make([]int, r.rawUvarint())
- for i := range writerPCs {
- writerPCs[i] = int(r.rawUvarint())
- }
-
- if mHave == mWant {
- return
- }
-
- // There's some tension here between printing:
- //
- // (1) full file paths that tools can recognize (e.g., so emacs
- // hyperlinks the "file:line" text for easy navigation), or
- //
- // (2) short file paths that are easier for humans to read (e.g., by
- // omitting redundant or irrelevant details, so it's easier to
- // focus on the useful bits that remain).
- //
- // The current formatting favors the former, as it seems more
- // helpful in practice. But perhaps the formatting could be improved
- // to better address both concerns. For example, use relative file
- // paths if they would be shorter, or rewrite file paths to contain
- // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
- // to reliably expand that again.
-
- fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.idx, pos)
-
- fmt.Printf("\nfound %v, written at:\n", mHave)
- if len(writerPCs) == 0 {
- fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
- }
- for _, pc := range writerPCs {
- fmt.Printf("\t%s\n", r.common.stringIdx(r.rawReloc(relocString, pc)))
- }
-
- fmt.Printf("\nexpected %v, reading at:\n", mWant)
- var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
- n := runtime.Callers(2, readerPCs[:])
- for _, pc := range fmtFrames(readerPCs[:n]...) {
- fmt.Printf("\t%s\n", pc)
- }
-
- // We already printed a stack trace for the reader, so now we can
- // simply exit. Printing a second one with panic or base.Fatalf
- // would just be noise.
- os.Exit(1)
-}
-
-func (r *decoder) bool() bool {
- r.sync(syncBool)
- x, err := r.data.ReadByte()
- r.checkErr(err)
- assert(x < 2)
- return x != 0
-}
-
-func (r *decoder) int64() int64 {
- r.sync(syncInt64)
- return r.rawVarint()
-}
-
-func (r *decoder) uint64() uint64 {
- r.sync(syncUint64)
- return r.rawUvarint()
-}
-
-func (r *decoder) len() int { x := r.uint64(); v := int(x); assert(uint64(v) == x); return v }
-func (r *decoder) int() int { x := r.int64(); v := int(x); assert(int64(v) == x); return v }
-func (r *decoder) uint() uint { x := r.uint64(); v := uint(x); assert(uint64(v) == x); return v }
-
-func (r *decoder) code(mark syncMarker) int {
- r.sync(mark)
- return r.len()
-}
-
-func (r *decoder) reloc(k reloc) int {
- r.sync(syncUseReloc)
- return r.rawReloc(k, r.len())
-}
-
-func (r *decoder) string() string {
- r.sync(syncString)
- return r.common.stringIdx(r.reloc(relocString))
-}
-
-func (r *decoder) strings() []string {
- res := make([]string, r.len())
- for i := range res {
- res[i] = r.string()
- }
- return res
-}
-
-func (r *decoder) value() constant.Value {
- r.sync(syncValue)
- isComplex := r.bool()
- val := r.scalar()
- if isComplex {
- val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
- }
- return val
-}
-
-func (r *decoder) scalar() constant.Value {
- switch tag := codeVal(r.code(syncVal)); tag {
- default:
- panic(fmt.Sprintf("unexpected scalar tag: %v", tag))
-
- case valBool:
- return constant.MakeBool(r.bool())
- case valString:
- return constant.MakeString(r.string())
- case valInt64:
- return constant.MakeInt64(r.int64())
- case valBigInt:
- return constant.Make(r.bigInt())
- case valBigRat:
- num := r.bigInt()
- denom := r.bigInt()
- return constant.Make(new(big.Rat).SetFrac(num, denom))
- case valBigFloat:
- return constant.Make(r.bigFloat())
- }
-}
-
-func (r *decoder) bigInt() *big.Int {
- v := new(big.Int).SetBytes([]byte(r.string()))
- if r.bool() {
- v.Neg(v)
- }
- return v
-}
-
-func (r *decoder) bigFloat() *big.Float {
- v := new(big.Float).SetPrec(512)
- assert(v.UnmarshalText([]byte(r.string())) == nil)
- return v
-}
diff --git a/src/cmd/compile/internal/noder/encoder.go b/src/cmd/compile/internal/noder/encoder.go
deleted file mode 100644
index b07b3a4a48..0000000000
--- a/src/cmd/compile/internal/noder/encoder.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// UNREVIEWED
-
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/constant"
- "io"
- "math/big"
- "runtime"
-
- "cmd/compile/internal/base"
-)
-
-type pkgEncoder struct {
- elems [numRelocs][]string
-
- stringsIdx map[string]int
-}
-
-func newPkgEncoder() pkgEncoder {
- return pkgEncoder{
- stringsIdx: make(map[string]int),
- }
-}
-
-func (pw *pkgEncoder) dump(out io.Writer) {
- writeUint32 := func(x uint32) {
- assert(binary.Write(out, binary.LittleEndian, x) == nil)
- }
-
- var sum uint32
- for _, elems := range &pw.elems {
- sum += uint32(len(elems))
- writeUint32(sum)
- }
-
- sum = 0
- for _, elems := range &pw.elems {
- for _, elem := range elems {
- sum += uint32(len(elem))
- writeUint32(sum)
- }
- }
-
- for _, elems := range &pw.elems {
- for _, elem := range elems {
- _, err := io.WriteString(out, elem)
- assert(err == nil)
- }
- }
-}
-
-func (pw *pkgEncoder) stringIdx(s string) int {
- if idx, ok := pw.stringsIdx[s]; ok {
- assert(pw.elems[relocString][idx] == s)
- return idx
- }
-
- idx := len(pw.elems[relocString])
- pw.elems[relocString] = append(pw.elems[relocString], s)
- pw.stringsIdx[s] = idx
- return idx
-}
-
-func (pw *pkgEncoder) newEncoder(k reloc, marker syncMarker) encoder {
- e := pw.newEncoderRaw(k)
- e.sync(marker)
- return e
-}
-
-func (pw *pkgEncoder) newEncoderRaw(k reloc) encoder {
- idx := len(pw.elems[k])
- pw.elems[k] = append(pw.elems[k], "") // placeholder
-
- return encoder{
- p: pw,
- k: k,
- idx: idx,
- }
-}
-
-// Encoders
-
-type encoder struct {
- p *pkgEncoder
-
- relocs []relocEnt
- data bytes.Buffer
-
- encodingRelocHeader bool
-
- k reloc
- idx int
-}
-
-func (w *encoder) flush() int {
- var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
-
- // Backup the data so we write the relocations at the front.
- var tmp bytes.Buffer
- io.Copy(&tmp, &w.data)
-
- // TODO(mdempsky): Consider writing these out separately so they're
- // easier to strip, along with function bodies, so that we can prune
- // down to just the data that's relevant to go/types.
- if w.encodingRelocHeader {
- base.Fatalf("encodingRelocHeader already true; recursive flush?")
- }
- w.encodingRelocHeader = true
- w.sync(syncRelocs)
- w.len(len(w.relocs))
- for _, rent := range w.relocs {
- w.sync(syncReloc)
- w.len(int(rent.kind))
- w.len(rent.idx)
- }
-
- io.Copy(&sb, &w.data)
- io.Copy(&sb, &tmp)
- w.p.elems[w.k][w.idx] = sb.String()
-
- return w.idx
-}
-
-func (w *encoder) checkErr(err error) {
- if err != nil {
- base.Fatalf("unexpected error: %v", err)
- }
-}
-
-func (w *encoder) rawUvarint(x uint64) {
- var buf [binary.MaxVarintLen64]byte
- n := binary.PutUvarint(buf[:], x)
- _, err := w.data.Write(buf[:n])
- w.checkErr(err)
-}
-
-func (w *encoder) rawVarint(x int64) {
- // Zig-zag encode.
- ux := uint64(x) << 1
- if x < 0 {
- ux = ^ux
- }
-
- w.rawUvarint(ux)
-}
-
-func (w *encoder) rawReloc(r reloc, idx int) int {
- // TODO(mdempsky): Use map for lookup.
- for i, rent := range w.relocs {
- if rent.kind == r && rent.idx == idx {
- return i
- }
- }
-
- i := len(w.relocs)
- w.relocs = append(w.relocs, relocEnt{r, idx})
- return i
-}
-
-func (w *encoder) sync(m syncMarker) {
- if !enableSync {
- return
- }
-
- // Writing out stack frame string references requires working
- // relocations, but writing out the relocations themselves involves
- // sync markers. To prevent infinite recursion, we simply trim the
- // stack frame for sync markers within the relocation header.
- var frames []string
- if !w.encodingRelocHeader && base.Debug.SyncFrames > 0 {
- pcs := make([]uintptr, base.Debug.SyncFrames)
- n := runtime.Callers(2, pcs)
- frames = fmtFrames(pcs[:n]...)
- }
-
- // TODO(mdempsky): Save space by writing out stack frames as a
- // linked list so we can share common stack frames.
- w.rawUvarint(uint64(m))
- w.rawUvarint(uint64(len(frames)))
- for _, frame := range frames {
- w.rawUvarint(uint64(w.rawReloc(relocString, w.p.stringIdx(frame))))
- }
-}
-
-func (w *encoder) bool(b bool) bool {
- w.sync(syncBool)
- var x byte
- if b {
- x = 1
- }
- err := w.data.WriteByte(x)
- w.checkErr(err)
- return b
-}
-
-func (w *encoder) int64(x int64) {
- w.sync(syncInt64)
- w.rawVarint(x)
-}
-
-func (w *encoder) uint64(x uint64) {
- w.sync(syncUint64)
- w.rawUvarint(x)
-}
-
-func (w *encoder) len(x int) { assert(x >= 0); w.uint64(uint64(x)) }
-func (w *encoder) int(x int) { w.int64(int64(x)) }
-func (w *encoder) uint(x uint) { w.uint64(uint64(x)) }
-
-func (w *encoder) reloc(r reloc, idx int) {
- w.sync(syncUseReloc)
- w.len(w.rawReloc(r, idx))
-}
-
-func (w *encoder) code(c code) {
- w.sync(c.marker())
- w.len(c.value())
-}
-
-func (w *encoder) string(s string) {
- w.sync(syncString)
- w.reloc(relocString, w.p.stringIdx(s))
-}
-
-func (w *encoder) strings(ss []string) {
- w.len(len(ss))
- for _, s := range ss {
- w.string(s)
- }
-}
-
-func (w *encoder) value(val constant.Value) {
- w.sync(syncValue)
- if w.bool(val.Kind() == constant.Complex) {
- w.scalar(constant.Real(val))
- w.scalar(constant.Imag(val))
- } else {
- w.scalar(val)
- }
-}
-
-func (w *encoder) scalar(val constant.Value) {
- switch v := constant.Val(val).(type) {
- default:
- panic(fmt.Sprintf("unhandled %v (%v)", val, val.Kind()))
- case bool:
- w.code(valBool)
- w.bool(v)
- case string:
- w.code(valString)
- w.string(v)
- case int64:
- w.code(valInt64)
- w.int64(v)
- case *big.Int:
- w.code(valBigInt)
- w.bigInt(v)
- case *big.Rat:
- w.code(valBigRat)
- w.bigInt(v.Num())
- w.bigInt(v.Denom())
- case *big.Float:
- w.code(valBigFloat)
- w.bigFloat(v)
- }
-}
-
-func (w *encoder) bigInt(v *big.Int) {
- b := v.Bytes()
- w.string(string(b)) // TODO: More efficient encoding.
- w.bool(v.Sign() < 0)
-}
-
-func (w *encoder) bigFloat(v *big.Float) {
- b := v.Append(nil, 'p', -1)
- w.string(string(b)) // TODO: More efficient encoding.
-}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
index 8a9afeb095..4b5ae706c1 100644
--- a/src/cmd/compile/internal/noder/expr.go
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -114,7 +114,7 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
case *syntax.CallExpr:
fun := g.expr(expr.Fun)
- return Call(pos, g.typ(typ), fun, g.exprs(expr.ArgList), expr.HasDots)
+ return g.callExpr(pos, g.typ(typ), fun, g.exprs(expr.ArgList), expr.HasDots)
case *syntax.IndexExpr:
args := unpackListExpr(expr.Index)
@@ -206,6 +206,53 @@ func (g *irgen) substType(typ *types.Type, tparams *types.Type, targs []ir.Node)
return newt
}
+// callExpr creates a call expression (which might be a type conversion, built-in
+// call, or a regular call) and does standard transforms, unless we are in a generic
+// function.
+func (g *irgen) callExpr(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) ir.Node {
+ n := ir.NewCallExpr(pos, ir.OCALL, fun, args)
+ n.IsDDD = dots
+ typed(typ, n)
+
+ if fun.Op() == ir.OTYPE {
+ // Actually a type conversion, not a function call.
+ if !g.delayTransform() {
+ return transformConvCall(n)
+ }
+ return n
+ }
+
+ if fun, ok := fun.(*ir.Name); ok && fun.BuiltinOp != 0 {
+ if !g.delayTransform() {
+ return transformBuiltin(n)
+ }
+ return n
+ }
+
+ // Add information, now that we know that fun is actually being called.
+ switch fun := fun.(type) {
+ case *ir.SelectorExpr:
+ if fun.Op() == ir.OMETHVALUE {
+ op := ir.ODOTMETH
+ if fun.X.Type().IsInterface() {
+ op = ir.ODOTINTER
+ }
+ fun.SetOp(op)
+ // Set the type to include the receiver, since that's what
+ // later parts of the compiler expect
+ fun.SetType(fun.Selection.Type)
+ }
+ }
+
+ // A function instantiation (even if fully concrete) shouldn't be
+ // transformed yet, because we need to add the dictionary during the
+ // transformation.
+ if fun.Op() != ir.OFUNCINST && !g.delayTransform() {
+ transformCall(n)
+ }
+ return n
+}
+
// selectorExpr resolves the choice of ODOT, ODOTPTR, OMETHVALUE (eventually
// ODOTMETH & ODOTINTER), and OMETHEXPR and deals with embedded fields here rather
// than in typecheck.go.
@@ -332,13 +379,13 @@ func (g *irgen) exprs(exprs []syntax.Expr) []ir.Node {
}
func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
- if ptr, ok := types2.StructuralType(typ).(*types2.Pointer); ok {
+ if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok {
n := ir.NewAddrExpr(g.pos(lit), g.compLit(ptr.Elem(), lit))
n.SetOp(ir.OPTRLIT)
return typed(g.typ(typ), n)
}
- _, isStruct := types2.StructuralType(typ).(*types2.Struct)
+ _, isStruct := types2.CoreType(typ).(*types2.Struct)
exprs := make([]ir.Node, len(lit.ElemList))
for i, elem := range lit.ElemList {
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
index 5524673e66..33acd6051a 100644
--- a/src/cmd/compile/internal/noder/helpers.go
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -98,95 +98,6 @@ func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) *ir.BinaryExp
}
}
-func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) ir.Node {
- n := ir.NewCallExpr(pos, ir.OCALL, fun, args)
- n.IsDDD = dots
-
- if fun.Op() == ir.OTYPE {
- // Actually a type conversion, not a function call.
- if !fun.Type().IsInterface() &&
- (fun.Type().HasTParam() || args[0].Type().HasTParam()) {
- // For type params, we can transform if fun.Type() is known
- // to be an interface (in which case a CONVIFACE node will be
- // inserted). Otherwise, don't typecheck until we actually
- // know the type.
- return typed(typ, n)
- }
- typed(typ, n)
- return transformConvCall(n)
- }
-
- if fun, ok := fun.(*ir.Name); ok && fun.BuiltinOp != 0 {
- // For most Builtin ops, we delay doing transformBuiltin if any of the
- // args have type params, for a variety of reasons:
- //
- // OMAKE: transformMake can't choose specific ops OMAKESLICE, etc.
- // until arg type is known
- // OREAL/OIMAG: transformRealImag can't determine type float32/float64
- // until arg type known
- // OAPPEND: transformAppend requires that the arg is a slice
- // ODELETE: transformDelete requires that the arg is a map
- // OALIGNOF, OSIZEOF: can be eval'ed to a constant until types known.
- switch fun.BuiltinOp {
- case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.ODELETE, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- hasTParam := false
- for _, arg := range args {
- if fun.BuiltinOp == ir.OOFFSETOF {
- // It's the type of left operand of the
- // selection that matters, not the type of
- // the field itself (which is irrelevant for
- // offsetof).
- arg = arg.(*ir.SelectorExpr).X
- }
- if arg.Type().HasTParam() {
- hasTParam = true
- break
- }
- }
- if hasTParam {
- return typed(typ, n)
- }
- }
-
- typed(typ, n)
- return transformBuiltin(n)
- }
-
- // Add information, now that we know that fun is actually being called.
- switch fun := fun.(type) {
- case *ir.SelectorExpr:
- if fun.Op() == ir.OMETHVALUE {
- op := ir.ODOTMETH
- if fun.X.Type().IsInterface() {
- op = ir.ODOTINTER
- }
- fun.SetOp(op)
- // Set the type to include the receiver, since that's what
- // later parts of the compiler expect
- fun.SetType(fun.Selection.Type)
- }
- }
-
- if fun.Type().HasTParam() || fun.Op() == ir.OXDOT || fun.Op() == ir.OFUNCINST {
- // If the fun arg is or has a type param, we can't do all the
- // transformations, since we may not have needed properties yet
- // (e.g. number of return values, etc). The same applies if a fun
- // which is an XDOT could not be transformed yet because of a generic
- // type in the X of the selector expression.
- //
- // A function instantiation (even if fully concrete) shouldn't be
- // transformed yet, because we need to add the dictionary during the
- // transformation.
- return typed(typ, n)
- }
-
- // If no type params, do the normal call transformations. This
- // will convert OCALL to OCALLFUNC.
- typed(typ, n)
- transformCall(n)
- return n
-}
-
func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) *ir.BinaryExpr {
n := ir.NewBinaryExpr(pos, op, x, y)
typed(typ, n)
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index 58dffbad1e..0898a298eb 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -11,7 +11,6 @@ import (
"os"
pathpkg "path"
"runtime"
- "sort"
"strconv"
"strings"
"unicode"
@@ -20,7 +19,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/importer"
"cmd/compile/internal/ir"
- "cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
@@ -28,7 +26,6 @@ import (
"cmd/internal/bio"
"cmd/internal/goobj"
"cmd/internal/objabi"
- "cmd/internal/src"
)
// haveLegacyImports records whether we've imported any packages
@@ -141,10 +138,6 @@ func openPackage(path string) (*os.File, error) {
return nil, errors.New("file not found")
}
-// myheight tracks the local package's height based on packages
-// imported so far.
-var myheight int
-
// resolveImportPath resolves an import path as it appears in a Go
// source file to the package's full path.
func resolveImportPath(path string) (string, error) {
@@ -187,42 +180,6 @@ func resolveImportPath(path string) (string, error) {
return path, nil
}
-func importfile(decl *syntax.ImportDecl) *types.Pkg {
- path, err := parseImportPath(decl.Path)
- if err != nil {
- base.Errorf("%s", err)
- return nil
- }
-
- pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
- if err != nil {
- base.Errorf("%s", err)
- return nil
- }
-
- if pkg != types.UnsafePkg && pkg.Height >= myheight {
- myheight = pkg.Height + 1
- }
- return pkg
-}
-
-func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
- if pathLit.Kind != syntax.StringLit {
- return "", errors.New("import path must be a string")
- }
-
- path, err := strconv.Unquote(pathLit.Value)
- if err != nil {
- return "", errors.New("import path must be a string")
- }
-
- if err := checkImportPath(path, false); err != nil {
- return "", err
- }
-
- return path, err
-}
-
// readImportFile reads the import file for the given package path and
// returns its types.Pkg representation. If packages is non-nil, the
// types2.Package representation is also returned.
@@ -467,135 +424,3 @@ func checkImportPath(path string, allowSpace bool) error {
return nil
}
-
-func pkgnotused(lineno src.XPos, path string, name string) {
- // If the package was imported with a name other than the final
- // import path element, show it explicitly in the error message.
- // Note that this handles both renamed imports and imports of
- // packages containing unconventional package declarations.
- // Note that this uses / always, even on Windows, because Go import
- // paths always use forward slashes.
- elem := path
- if i := strings.LastIndex(elem, "/"); i >= 0 {
- elem = elem[i+1:]
- }
- if name == "" || elem == name {
- base.ErrorfAt(lineno, "imported and not used: %q", path)
- } else {
- base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
- }
-}
-
-func mkpackage(pkgname string) {
- if types.LocalPkg.Name == "" {
- if pkgname == "_" {
- base.Errorf("invalid package name _")
- }
- types.LocalPkg.Name = pkgname
- } else {
- if pkgname != types.LocalPkg.Name {
- base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
- }
- }
-}
-
-func clearImports() {
- type importedPkg struct {
- pos src.XPos
- path string
- name string
- }
- var unused []importedPkg
-
- for _, s := range types.LocalPkg.Syms {
- n := ir.AsNode(s.Def)
- if n == nil {
- continue
- }
- if n.Op() == ir.OPACK {
- // throw away top-level package name left over
- // from previous file.
- // leave s->block set to cause redeclaration
- // errors if a conflicting top-level name is
- // introduced by a different file.
- p := n.(*ir.PkgName)
- if !p.Used && base.SyntaxErrors() == 0 {
- unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
- }
- s.Def = nil
- continue
- }
- if s.Def != nil && s.Def.Sym() != s {
- // throw away top-level name left over
- // from previous import . "x"
- // We'll report errors after type checking in CheckDotImports.
- s.Def = nil
- continue
- }
- }
-
- sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
- for _, pkg := range unused {
- pkgnotused(pkg.pos, pkg.path, pkg.name)
- }
-}
-
-// CheckDotImports reports errors for any unused dot imports.
-func CheckDotImports() {
- for _, pack := range dotImports {
- if !pack.Used {
- base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
- }
- }
-
- // No longer needed; release memory.
- dotImports = nil
- typecheck.DotImportRefs = nil
-}
-
-// dotImports tracks all PkgNames that have been dot-imported.
-var dotImports []*ir.PkgName
-
-// find all the exported symbols in package referenced by PkgName,
-// and make them available in the current package
-func importDot(pack *ir.PkgName) {
- if typecheck.DotImportRefs == nil {
- typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName)
- }
-
- opkg := pack.Pkg
- for _, s := range opkg.Syms {
- if s.Def == nil {
- if _, ok := typecheck.DeclImporter[s]; !ok {
- continue
- }
- }
- if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
- continue
- }
- s1 := typecheck.Lookup(s.Name)
- if s1.Def != nil {
- pkgerror := fmt.Sprintf("during import %q", opkg.Path)
- typecheck.Redeclared(base.Pos, s1, pkgerror)
- continue
- }
-
- id := ir.NewIdent(src.NoXPos, s)
- typecheck.DotImportRefs[id] = pack
- s1.Def = id
- s1.Block = 1
- }
-
- dotImports = append(dotImports, pack)
-}
-
-// importName is like oldname,
-// but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) ir.Node {
- n := oldname(sym)
- if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
- n.SetDiag(true)
- base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
- }
- return n
-}
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index 52224c4046..993c254218 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -6,7 +6,6 @@ package noder
import (
"fmt"
- "os"
"cmd/compile/internal/base"
"cmd/compile/internal/dwarfgen"
@@ -77,10 +76,6 @@ func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
func check2(noders []*noder) {
m, pkg, info := checkFiles(noders)
- if base.Flag.G < 2 {
- os.Exit(0)
- }
-
g := irgen{
target: typecheck.Target,
self: pkg,
@@ -90,10 +85,6 @@ func check2(noders []*noder) {
typs: make(map[types2.Type]*types.Type),
}
g.generate(noders)
-
- if base.Flag.G < 3 {
- os.Exit(0)
- }
}
// Information about sub-dictionary entries in a dictionary
diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go
index 2bc7f7c608..0c86088e62 100644
--- a/src/cmd/compile/internal/noder/linker.go
+++ b/src/cmd/compile/internal/noder/linker.go
@@ -7,6 +7,7 @@
package noder
import (
+ "internal/pkgbits"
"io"
"cmd/compile/internal/base"
@@ -29,26 +30,30 @@ import (
// multiple parts into a cohesive whole"... e.g., "assembler" and
// "compiler" are also already taken.
+// TODO(mdempsky): Should linker go into pkgbits? Probably the
+// low-level linking details can be moved there, but the logic for
+// handling extension data needs to stay in the compiler.
+
type linker struct {
- pw pkgEncoder
+ pw pkgbits.PkgEncoder
pkgs map[string]int
decls map[*types.Sym]int
}
-func (l *linker) relocAll(pr *pkgReader, relocs []relocEnt) []relocEnt {
- res := make([]relocEnt, len(relocs))
+func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.RelocEnt {
+ res := make([]pkgbits.RelocEnt, len(relocs))
for i, rent := range relocs {
- rent.idx = l.relocIdx(pr, rent.kind, rent.idx)
+ rent.Idx = l.relocIdx(pr, rent.Kind, rent.Idx)
res[i] = rent
}
return res
}
-func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
+func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx int) int {
assert(pr != nil)
- absIdx := pr.absIdx(k, idx)
+ absIdx := pr.AbsIdx(k, idx)
if newidx := pr.newindex[absIdx]; newidx != 0 {
return ^newidx
@@ -56,11 +61,11 @@ func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
var newidx int
switch k {
- case relocString:
+ case pkgbits.RelocString:
newidx = l.relocString(pr, idx)
- case relocPkg:
+ case pkgbits.RelocPkg:
newidx = l.relocPkg(pr, idx)
- case relocObj:
+ case pkgbits.RelocObj:
newidx = l.relocObj(pr, idx)
default:
@@ -70,9 +75,9 @@ func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
// every section could be deduplicated. This would also be easier
// if we do external relocations.
- w := l.pw.newEncoderRaw(k)
+ w := l.pw.NewEncoderRaw(k)
l.relocCommon(pr, &w, k, idx)
- newidx = w.idx
+ newidx = w.Idx
}
pr.newindex[absIdx] = ^newidx
@@ -81,43 +86,43 @@ func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
}
func (l *linker) relocString(pr *pkgReader, idx int) int {
- return l.pw.stringIdx(pr.stringIdx(idx))
+ return l.pw.StringIdx(pr.StringIdx(idx))
}
func (l *linker) relocPkg(pr *pkgReader, idx int) int {
- path := pr.peekPkgPath(idx)
+ path := pr.PeekPkgPath(idx)
if newidx, ok := l.pkgs[path]; ok {
return newidx
}
- r := pr.newDecoder(relocPkg, idx, syncPkgDef)
- w := l.pw.newEncoder(relocPkg, syncPkgDef)
- l.pkgs[path] = w.idx
+ r := pr.NewDecoder(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef)
+ w := l.pw.NewEncoder(pkgbits.RelocPkg, pkgbits.SyncPkgDef)
+ l.pkgs[path] = w.Idx
// TODO(mdempsky): We end up leaving an empty string reference here
// from when the package was originally written as "". Probably not
// a big deal, but a little annoying. Maybe relocating
// cross-references in place is the way to go after all.
- w.relocs = l.relocAll(pr, r.relocs)
+ w.Relocs = l.relocAll(pr, r.Relocs)
- _ = r.string() // original path
- w.string(path)
+ _ = r.String() // original path
+ w.String(path)
- io.Copy(&w.data, &r.data)
+ io.Copy(&w.Data, &r.Data)
- return w.flush()
+ return w.Flush()
}
func (l *linker) relocObj(pr *pkgReader, idx int) int {
- path, name, tag := pr.peekObj(idx)
+ path, name, tag := pr.PeekObj(idx)
sym := types.NewPkg(path, "").Lookup(name)
if newidx, ok := l.decls[sym]; ok {
return newidx
}
- if tag == objStub && path != "builtin" && path != "unsafe" {
+ if tag == pkgbits.ObjStub && path != "builtin" && path != "unsafe" {
pri, ok := objReader[sym]
if !ok {
base.Fatalf("missing reader for %q.%v", path, name)
@@ -127,25 +132,25 @@ func (l *linker) relocObj(pr *pkgReader, idx int) int {
pr = pri.pr
idx = pri.idx
- path2, name2, tag2 := pr.peekObj(idx)
+ path2, name2, tag2 := pr.PeekObj(idx)
sym2 := types.NewPkg(path2, "").Lookup(name2)
assert(sym == sym2)
- assert(tag2 != objStub)
+ assert(tag2 != pkgbits.ObjStub)
}
- w := l.pw.newEncoderRaw(relocObj)
- wext := l.pw.newEncoderRaw(relocObjExt)
- wname := l.pw.newEncoderRaw(relocName)
- wdict := l.pw.newEncoderRaw(relocObjDict)
+ w := l.pw.NewEncoderRaw(pkgbits.RelocObj)
+ wext := l.pw.NewEncoderRaw(pkgbits.RelocObjExt)
+ wname := l.pw.NewEncoderRaw(pkgbits.RelocName)
+ wdict := l.pw.NewEncoderRaw(pkgbits.RelocObjDict)
- l.decls[sym] = w.idx
- assert(wext.idx == w.idx)
- assert(wname.idx == w.idx)
- assert(wdict.idx == w.idx)
+ l.decls[sym] = w.Idx
+ assert(wext.Idx == w.Idx)
+ assert(wname.Idx == w.Idx)
+ assert(wdict.Idx == w.Idx)
- l.relocCommon(pr, &w, relocObj, idx)
- l.relocCommon(pr, &wname, relocName, idx)
- l.relocCommon(pr, &wdict, relocObjDict, idx)
+ l.relocCommon(pr, &w, pkgbits.RelocObj, idx)
+ l.relocCommon(pr, &wname, pkgbits.RelocName, idx)
+ l.relocCommon(pr, &wdict, pkgbits.RelocObjDict, idx)
var obj *ir.Name
if path == "" {
@@ -162,70 +167,70 @@ func (l *linker) relocObj(pr *pkgReader, idx int) int {
}
if obj != nil {
- wext.sync(syncObject1)
+ wext.Sync(pkgbits.SyncObject1)
switch tag {
- case objFunc:
+ case pkgbits.ObjFunc:
l.relocFuncExt(&wext, obj)
- case objType:
+ case pkgbits.ObjType:
l.relocTypeExt(&wext, obj)
- case objVar:
+ case pkgbits.ObjVar:
l.relocVarExt(&wext, obj)
}
- wext.flush()
+ wext.Flush()
} else {
- l.relocCommon(pr, &wext, relocObjExt, idx)
+ l.relocCommon(pr, &wext, pkgbits.RelocObjExt, idx)
}
- return w.idx
+ return w.Idx
}
-func (l *linker) relocCommon(pr *pkgReader, w *encoder, k reloc, idx int) {
- r := pr.newDecoderRaw(k, idx)
- w.relocs = l.relocAll(pr, r.relocs)
- io.Copy(&w.data, &r.data)
- w.flush()
+func (l *linker) relocCommon(pr *pkgReader, w *pkgbits.Encoder, k pkgbits.RelocKind, idx int) {
+ r := pr.NewDecoderRaw(k, idx)
+ w.Relocs = l.relocAll(pr, r.Relocs)
+ io.Copy(&w.Data, &r.Data)
+ w.Flush()
}
-func (l *linker) pragmaFlag(w *encoder, pragma ir.PragmaFlag) {
- w.sync(syncPragma)
- w.int(int(pragma))
+func (l *linker) pragmaFlag(w *pkgbits.Encoder, pragma ir.PragmaFlag) {
+ w.Sync(pkgbits.SyncPragma)
+ w.Int(int(pragma))
}
-func (l *linker) relocFuncExt(w *encoder, name *ir.Name) {
- w.sync(syncFuncExt)
+func (l *linker) relocFuncExt(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncFuncExt)
l.pragmaFlag(w, name.Func.Pragma)
l.linkname(w, name)
// Relocated extension data.
- w.bool(true)
+ w.Bool(true)
// Record definition ABI so cross-ABI calls can be direct.
// This is important for the performance of calling some
// common functions implemented in assembly (e.g., bytealg).
- w.uint64(uint64(name.Func.ABI))
+ w.Uint64(uint64(name.Func.ABI))
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(name.Type()).FieldSlice() {
- w.string(f.Note)
+ w.String(f.Note)
}
}
- if inl := name.Func.Inl; w.bool(inl != nil) {
- w.len(int(inl.Cost))
- w.bool(inl.CanDelayResults)
+ if inl := name.Func.Inl; w.Bool(inl != nil) {
+ w.Len(int(inl.Cost))
+ w.Bool(inl.CanDelayResults)
pri, ok := bodyReader[name.Func]
assert(ok)
- w.reloc(relocBody, l.relocIdx(pri.pr, relocBody, pri.idx))
+ w.Reloc(pkgbits.RelocBody, l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx))
}
- w.sync(syncEOF)
+ w.Sync(pkgbits.SyncEOF)
}
-func (l *linker) relocTypeExt(w *encoder, name *ir.Name) {
- w.sync(syncTypeExt)
+func (l *linker) relocTypeExt(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncTypeExt)
typ := name.Type()
@@ -242,55 +247,28 @@ func (l *linker) relocTypeExt(w *encoder, name *ir.Name) {
}
}
-func (l *linker) relocVarExt(w *encoder, name *ir.Name) {
- w.sync(syncVarExt)
+func (l *linker) relocVarExt(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncVarExt)
l.linkname(w, name)
}
-func (l *linker) linkname(w *encoder, name *ir.Name) {
- w.sync(syncLinkname)
+func (l *linker) linkname(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncLinkname)
linkname := name.Sym().Linkname
if !l.lsymIdx(w, linkname, name.Linksym()) {
- w.string(linkname)
+ w.String(linkname)
}
}
-func (l *linker) lsymIdx(w *encoder, linkname string, lsym *obj.LSym) bool {
+func (l *linker) lsymIdx(w *pkgbits.Encoder, linkname string, lsym *obj.LSym) bool {
if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" {
- w.int64(-1)
+ w.Int64(-1)
return false
}
// For a defined symbol, export its index.
// For re-exporting an imported symbol, pass its index through.
- w.int64(int64(lsym.SymIdx))
+ w.Int64(int64(lsym.SymIdx))
return true
}
-
-// @@@ Helpers
-
-// TODO(mdempsky): These should probably be removed. I think they're a
-// smell that the export data format is not yet quite right.
-
-func (pr *pkgDecoder) peekPkgPath(idx int) string {
- r := pr.newDecoder(relocPkg, idx, syncPkgDef)
- path := r.string()
- if path == "" {
- path = pr.pkgPath
- }
- return path
-}
-
-func (pr *pkgDecoder) peekObj(idx int) (string, string, codeObj) {
- r := pr.newDecoder(relocName, idx, syncObject1)
- r.sync(syncSym)
- r.sync(syncPkg)
- path := pr.peekPkgPath(r.reloc(relocPkg))
- name := r.string()
- assert(name != "")
-
- tag := codeObj(r.code(syncCodeObj))
-
- return path, name, tag
-}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index b36db67a50..1d7c1f44a4 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -7,9 +7,6 @@ package noder
import (
"errors"
"fmt"
- "go/constant"
- "go/token"
- "internal/buildcfg"
"os"
"path/filepath"
"runtime"
@@ -19,7 +16,6 @@ import (
"unicode/utf8"
"cmd/compile/internal/base"
- "cmd/compile/internal/dwarfgen"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
@@ -31,13 +27,7 @@ import (
func LoadPackage(filenames []string) {
base.Timer.Start("fe", "parse")
- // -G=3 and unified expect generics syntax, but -G=0 does not.
- supportsGenerics := base.Flag.G != 0 || buildcfg.Experiment.Unified
-
- mode := syntax.CheckBranches
- if supportsGenerics {
- mode |= syntax.AllowGenerics
- }
+ mode := syntax.CheckBranches | syntax.AllowGenerics
// Limit the number of simultaneously open files.
sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
@@ -45,8 +35,7 @@ func LoadPackage(filenames []string) {
noders := make([]*noder, len(filenames))
for i, filename := range filenames {
p := noder{
- err: make(chan syntax.Error),
- trackScopes: base.Flag.Dwarf,
+ err: make(chan syntax.Error),
}
noders[i] = &p
@@ -85,104 +74,8 @@ func LoadPackage(filenames []string) {
return
}
- if base.Flag.G != 0 {
- // Use types2 to type-check and possibly generate IR.
- check2(noders)
- return
- }
-
- for _, p := range noders {
- p.node()
- p.file = nil // release memory
- }
-
- if base.SyntaxErrors() != 0 {
- base.ErrorExit()
- }
- types.CheckDclstack()
-
- for _, p := range noders {
- p.processPragmas()
- }
-
- // Typecheck.
- types.LocalPkg.Height = myheight
- typecheck.DeclareUniverse()
- typecheck.TypecheckAllowed = true
-
- // Process top-level declarations in phases.
-
- // Phase 1: const, type, and names and types of funcs.
- // This will gather all the information about types
- // and methods but doesn't depend on any of it.
- //
- // We also defer type alias declarations until phase 2
- // to avoid cycles like #18640.
- // TODO(gri) Remove this again once we have a fix for #25838.
- //
- // Phase 2: Variable assignments.
- // To check interface assignments, depends on phase 1.
-
- // Don't use range--typecheck can add closures to Target.Decls.
- for phase, name := range []string{"top1", "top2"} {
- base.Timer.Start("fe", "typecheck", name)
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- op := n.Op()
-
- // Closure function declarations are typechecked as part of the
- // closure expression.
- if fn, ok := n.(*ir.Func); ok && fn.OClosure != nil {
- continue
- }
-
- // We don't actually add ir.ODCL nodes to Target.Decls. Make sure of that.
- if op == ir.ODCL {
- base.FatalfAt(n.Pos(), "unexpected top declaration: %v", op)
- }
-
- // Identify declarations that should be deferred to the second
- // iteration.
- late := op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias()
-
- if late == (phase == 1) {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
- }
- }
- }
-
- // Phase 3: Type check function bodies.
- // Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "func")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
- if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore typecheck %v", fn)
- ir.Dump(s, fn)
- }
- typecheck.FuncBody(fn)
- if base.Flag.W > 1 {
- s := fmt.Sprintf("\nafter typecheck %v", fn)
- ir.Dump(s, fn)
- }
- }
- }
-
- // Phase 4: Check external declarations.
- // TODO(mdempsky): This should be handled when type checking their
- // corresponding ODCL nodes.
- base.Timer.Start("fe", "typecheck", "externdcls")
- for i, n := range typecheck.Target.Externs {
- if n.Op() == ir.ONAME {
- typecheck.Target.Externs[i] = typecheck.Expr(typecheck.Target.Externs[i])
- }
- }
-
- // Phase 5: With all user code type-checked, it's now safe to verify map keys.
- // With all user code typechecked, it's now safe to verify unused dot imports.
- typecheck.CheckMapKeys()
- CheckDotImports()
- base.ExitIfErrors()
+ // Use types2 to type-check and generate IR.
+ check2(noders)
}
func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) {
@@ -218,76 +111,6 @@ type noder struct {
err chan syntax.Error
importedUnsafe bool
importedEmbed bool
- trackScopes bool
-
- funcState *funcState
-}
-
-// funcState tracks all per-function state to make handling nested
-// functions easier.
-type funcState struct {
- // scopeVars is a stack tracking the number of variables declared in
- // the current function at the moment each open scope was opened.
- scopeVars []int
- marker dwarfgen.ScopeMarker
-
- lastCloseScopePos syntax.Pos
-}
-
-func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
- outerFuncState := p.funcState
- p.funcState = new(funcState)
- typecheck.StartFuncBody(fn)
-
- if block != nil {
- body := p.stmts(block.List)
- if body == nil {
- body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
- }
- fn.Body = body
-
- base.Pos = p.makeXPos(block.Rbrace)
- fn.Endlineno = base.Pos
- }
-
- typecheck.FinishFuncBody()
- p.funcState.marker.WriteTo(fn)
- p.funcState = outerFuncState
-}
-
-func (p *noder) openScope(pos syntax.Pos) {
- fs := p.funcState
- types.Markdcl()
-
- if p.trackScopes {
- fs.scopeVars = append(fs.scopeVars, len(ir.CurFunc.Dcl))
- fs.marker.Push(p.makeXPos(pos))
- }
-}
-
-func (p *noder) closeScope(pos syntax.Pos) {
- fs := p.funcState
- fs.lastCloseScopePos = pos
- types.Popdcl()
-
- if p.trackScopes {
- scopeVars := fs.scopeVars[len(fs.scopeVars)-1]
- fs.scopeVars = fs.scopeVars[:len(fs.scopeVars)-1]
- if scopeVars == len(ir.CurFunc.Dcl) {
- // no variables were declared in this scope, so we can retract it.
- fs.marker.Unpush()
- } else {
- fs.marker.Pop(p.makeXPos(pos))
- }
- }
-}
-
-// closeAnotherScope is like closeScope, but it reuses the same mark
-// position as the last closeScope call. This is useful for "for" and
-// "if" statements, as their implicit blocks always end at the same
-// position as an explicit block.
-func (p *noder) closeAnotherScope() {
- p.closeScope(p.funcState.lastCloseScopePos)
}
// linkname records a //go:linkname directive.
@@ -297,24 +120,6 @@ type linkname struct {
remote string
}
-func (p *noder) node() {
- p.importedUnsafe = false
- p.importedEmbed = false
-
- p.setlineno(p.file.PkgName)
- mkpackage(p.file.PkgName.Value)
-
- if pragma, ok := p.file.Pragma.(*pragmas); ok {
- pragma.Flag &^= ir.GoBuildPragma
- p.checkUnused(pragma)
- }
-
- typecheck.Target.Decls = append(typecheck.Target.Decls, p.decls(p.file.DeclList)...)
-
- base.Pos = src.NoXPos
- clearImports()
-}
-
func (p *noder) processPragmas() {
for _, l := range p.linknames {
if !p.importedUnsafe {
@@ -335,1074 +140,6 @@ func (p *noder) processPragmas() {
typecheck.Target.CgoPragmas = append(typecheck.Target.CgoPragmas, p.pragcgobuf...)
}
-func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
- var cs constState
-
- for _, decl := range decls {
- p.setlineno(decl)
- switch decl := decl.(type) {
- case *syntax.ImportDecl:
- p.importDecl(decl)
-
- case *syntax.VarDecl:
- l = append(l, p.varDecl(decl)...)
-
- case *syntax.ConstDecl:
- l = append(l, p.constDecl(decl, &cs)...)
-
- case *syntax.TypeDecl:
- l = append(l, p.typeDecl(decl))
-
- case *syntax.FuncDecl:
- l = append(l, p.funcDecl(decl))
-
- default:
- panic("unhandled Decl")
- }
- }
-
- return
-}
-
-func (p *noder) importDecl(imp *syntax.ImportDecl) {
- if imp.Path == nil || imp.Path.Bad {
- return // avoid follow-on errors if there was a syntax error
- }
-
- if pragma, ok := imp.Pragma.(*pragmas); ok {
- p.checkUnused(pragma)
- }
-
- ipkg := importfile(imp)
- if ipkg == nil {
- if base.Errors() == 0 {
- base.Fatalf("phase error in import")
- }
- return
- }
-
- if ipkg == types.UnsafePkg {
- p.importedUnsafe = true
- }
- if ipkg.Path == "embed" {
- p.importedEmbed = true
- }
-
- var my *types.Sym
- if imp.LocalPkgName != nil {
- my = p.name(imp.LocalPkgName)
- } else {
- my = typecheck.Lookup(ipkg.Name)
- }
-
- pack := ir.NewPkgName(p.pos(imp), my, ipkg)
-
- switch my.Name {
- case ".":
- importDot(pack)
- return
- case "init":
- base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
- return
- case "_":
- return
- }
- if my.Def != nil {
- typecheck.Redeclared(pack.Pos(), my, "as imported package name")
- }
- my.Def = pack
- my.Lastlineno = pack.Pos()
- my.Block = 1 // at top level
-}
-
-func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
- names := p.declNames(ir.ONAME, decl.NameList)
- typ := p.typeExprOrNil(decl.Type)
- exprs := p.exprList(decl.Values)
-
- if pragma, ok := decl.Pragma.(*pragmas); ok {
- varEmbed(p.makeXPos, names[0], decl, pragma, p.importedEmbed)
- p.checkUnused(pragma)
- }
-
- var init []ir.Node
- p.setlineno(decl)
-
- if len(names) > 1 && len(exprs) == 1 {
- as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, exprs)
- for _, v := range names {
- as2.Lhs.Append(v)
- typecheck.Declare(v, typecheck.DeclContext)
- v.Ntype = typ
- v.Defn = as2
- if ir.CurFunc != nil {
- init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
- }
- }
-
- return append(init, as2)
- }
-
- for i, v := range names {
- var e ir.Node
- if i < len(exprs) {
- e = exprs[i]
- }
-
- typecheck.Declare(v, typecheck.DeclContext)
- v.Ntype = typ
-
- if ir.CurFunc != nil {
- init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
- }
- as := ir.NewAssignStmt(base.Pos, v, e)
- init = append(init, as)
- if e != nil || ir.CurFunc == nil {
- v.Defn = as
- }
- }
-
- if len(exprs) != 0 && len(names) != len(exprs) {
- base.Errorf("assignment mismatch: %d variables but %d values", len(names), len(exprs))
- }
-
- return init
-}
-
-// constState tracks state between constant specifiers within a
-// declaration group. This state is kept separate from noder so nested
-// constant declarations are handled correctly (e.g., issue 15550).
-type constState struct {
- group *syntax.Group
- typ ir.Ntype
- values syntax.Expr
- iota int64
-}
-
-func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
- if decl.Group == nil || decl.Group != cs.group {
- *cs = constState{
- group: decl.Group,
- }
- }
-
- if pragma, ok := decl.Pragma.(*pragmas); ok {
- p.checkUnused(pragma)
- }
-
- names := p.declNames(ir.OLITERAL, decl.NameList)
- typ := p.typeExprOrNil(decl.Type)
-
- if decl.Values != nil {
- cs.typ, cs.values = typ, decl.Values
- } else {
- if typ != nil {
- base.Errorf("const declaration cannot have type without expression")
- }
- typ = cs.typ
- }
- values := p.exprList(cs.values)
-
- nn := make([]ir.Node, 0, len(names))
- for i, n := range names {
- if i >= len(values) {
- base.Errorf("missing value in const declaration")
- break
- }
-
- v := values[i]
- if decl.Values == nil {
- ir.Visit(v, func(v ir.Node) {
- if ir.HasUniquePos(v) {
- v.SetPos(n.Pos())
- }
- })
- }
-
- typecheck.Declare(n, typecheck.DeclContext)
-
- n.Ntype = typ
- n.Defn = v
- n.SetIota(cs.iota)
-
- nn = append(nn, ir.NewDecl(p.pos(decl), ir.ODCLCONST, n))
- }
-
- if len(values) > len(names) {
- base.Errorf("extra expression in const declaration")
- }
-
- cs.iota++
-
- return nn
-}
-
-func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
- n := p.declName(ir.OTYPE, decl.Name)
- typecheck.Declare(n, typecheck.DeclContext)
-
- // decl.Type may be nil but in that case we got a syntax error during parsing
- typ := p.typeExprOrNil(decl.Type)
-
- n.Ntype = typ
- n.SetAlias(decl.Alias)
- if pragma, ok := decl.Pragma.(*pragmas); ok {
- if !decl.Alias {
- n.SetPragma(pragma.Flag & typePragmas)
- pragma.Flag &^= typePragmas
- }
- p.checkUnused(pragma)
- }
-
- nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n)
- if n.Alias() && !types.AllowsGoVersion(types.LocalPkg, 1, 9) {
- base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
- }
- return nod
-}
-
-func (p *noder) declNames(op ir.Op, names []*syntax.Name) []*ir.Name {
- nodes := make([]*ir.Name, 0, len(names))
- for _, name := range names {
- nodes = append(nodes, p.declName(op, name))
- }
- return nodes
-}
-
-func (p *noder) declName(op ir.Op, name *syntax.Name) *ir.Name {
- return ir.NewDeclNameAt(p.pos(name), op, p.name(name))
-}
-
-func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
- name := p.name(fun.Name)
- t := p.signature(fun.Recv, fun.Type)
- f := ir.NewFunc(p.pos(fun))
-
- if fun.Recv == nil {
- if name.Name == "init" {
- name = renameinit()
- if len(t.Params) > 0 || len(t.Results) > 0 {
- base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
- }
- typecheck.Target.Inits = append(typecheck.Target.Inits, f)
- }
-
- if types.LocalPkg.Name == "main" && name.Name == "main" {
- if len(t.Params) > 0 || len(t.Results) > 0 {
- base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
- }
- }
- } else {
- f.Shortname = name
- name = ir.BlankNode.Sym() // filled in by tcFunc
- }
-
- f.Nname = ir.NewNameAt(p.pos(fun.Name), name)
- f.Nname.Func = f
- f.Nname.Defn = f
- f.Nname.Ntype = t
-
- if pragma, ok := fun.Pragma.(*pragmas); ok {
- f.Pragma = pragma.Flag & funcPragmas
- if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
- base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
- }
- pragma.Flag &^= funcPragmas
- p.checkUnused(pragma)
- }
-
- if fun.Recv == nil {
- typecheck.Declare(f.Nname, ir.PFUNC)
- }
-
- p.funcBody(f, fun.Body)
-
- if fun.Body != nil {
- if f.Pragma&ir.Noescape != 0 {
- base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
- }
- } else {
- if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") {
- // Linknamed functions are allowed to have no body. Hopefully
- // the linkname target has a body. See issue 23311.
- isLinknamed := false
- for _, n := range p.linknames {
- if ir.FuncName(f) == n.local {
- isLinknamed = true
- break
- }
- }
- if !isLinknamed {
- base.ErrorfAt(f.Pos(), "missing function body")
- }
- }
- }
-
- return f
-}
-
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.FuncType {
- var rcvr *ir.Field
- if recv != nil {
- rcvr = p.param(recv, false, false)
- }
- return ir.NewFuncType(p.pos(typ), rcvr,
- p.params(typ.ParamList, true),
- p.params(typ.ResultList, false))
-}
-
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
- nodes := make([]*ir.Field, 0, len(params))
- for i, param := range params {
- p.setlineno(param)
- nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
- if i > 0 && params[i].Type == params[i-1].Type {
- nodes[i].Ntype = nodes[i-1].Ntype
- }
- }
- return nodes
-}
-
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Field {
- var name *types.Sym
- if param.Name != nil {
- name = p.name(param.Name)
- }
-
- typ := p.typeExpr(param.Type)
- n := ir.NewField(p.pos(param), name, typ, nil)
-
- // rewrite ...T parameter
- if typ, ok := typ.(*ir.SliceType); ok && typ.DDD {
- if !dddOk {
- // We mark these as syntax errors to get automatic elimination
- // of multiple such errors per line (see ErrorfAt in subr.go).
- base.Errorf("syntax error: cannot use ... in receiver or result parameter list")
- } else if !final {
- if param.Name == nil {
- base.Errorf("syntax error: cannot use ... with non-final parameter")
- } else {
- p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
- }
- }
- typ.DDD = false
- n.IsDDD = true
- }
-
- return n
-}
-
-func (p *noder) exprList(expr syntax.Expr) []ir.Node {
- switch expr := expr.(type) {
- case nil:
- return nil
- case *syntax.ListExpr:
- return p.exprs(expr.ElemList)
- default:
- return []ir.Node{p.expr(expr)}
- }
-}
-
-func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
- nodes := make([]ir.Node, 0, len(exprs))
- for _, expr := range exprs {
- nodes = append(nodes, p.expr(expr))
- }
- return nodes
-}
-
-func (p *noder) expr(expr syntax.Expr) ir.Node {
- p.setlineno(expr)
- switch expr := expr.(type) {
- case nil, *syntax.BadExpr:
- return nil
- case *syntax.Name:
- return p.mkname(expr)
- case *syntax.BasicLit:
- n := ir.NewBasicLit(p.pos(expr), p.basicLit(expr))
- if expr.Kind == syntax.RuneLit {
- n.SetType(types.UntypedRune)
- }
- n.SetDiag(expr.Bad || n.Val().Kind() == constant.Unknown) // avoid follow-on errors if there was a syntax error
- return n
- case *syntax.CompositeLit:
- n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, p.typeExpr(expr.Type), nil)
- l := p.exprs(expr.ElemList)
- for i, e := range l {
- l[i] = p.wrapname(expr.ElemList[i], e)
- }
- n.List = l
- base.Pos = p.makeXPos(expr.Rbrace)
- return n
- case *syntax.KeyValueExpr:
- // use position of expr.Key rather than of expr (which has position of ':')
- return ir.NewKeyExpr(p.pos(expr.Key), p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
- case *syntax.FuncLit:
- return p.funcLit(expr)
- case *syntax.ParenExpr:
- return ir.NewParenExpr(p.pos(expr), p.expr(expr.X))
- case *syntax.SelectorExpr:
- // parser.new_dotname
- obj := p.expr(expr.X)
- if obj.Op() == ir.OPACK {
- pack := obj.(*ir.PkgName)
- pack.Used = true
- return importName(pack.Pkg.Lookup(expr.Sel.Value))
- }
- n := ir.NewSelectorExpr(base.Pos, ir.OXDOT, obj, p.name(expr.Sel))
- n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
- return n
- case *syntax.IndexExpr:
- return ir.NewIndexExpr(p.pos(expr), p.expr(expr.X), p.expr(expr.Index))
- case *syntax.SliceExpr:
- op := ir.OSLICE
- if expr.Full {
- op = ir.OSLICE3
- }
- x := p.expr(expr.X)
- var index [3]ir.Node
- for i, n := range &expr.Index {
- if n != nil {
- index[i] = p.expr(n)
- }
- }
- return ir.NewSliceExpr(p.pos(expr), op, x, index[0], index[1], index[2])
- case *syntax.AssertExpr:
- return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type))
- case *syntax.Operation:
- if expr.Op == syntax.Add && expr.Y != nil {
- return p.sum(expr)
- }
- x := p.expr(expr.X)
- if expr.Y == nil {
- pos, op := p.pos(expr), p.unOp(expr.Op)
- switch op {
- case ir.OADDR:
- return typecheck.NodAddrAt(pos, x)
- case ir.ODEREF:
- return ir.NewStarExpr(pos, x)
- }
- return ir.NewUnaryExpr(pos, op, x)
- }
-
- pos, op, y := p.pos(expr), p.binOp(expr.Op), p.expr(expr.Y)
- switch op {
- case ir.OANDAND, ir.OOROR:
- return ir.NewLogicalExpr(pos, op, x, y)
- }
- return ir.NewBinaryExpr(pos, op, x, y)
- case *syntax.CallExpr:
- n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), p.exprs(expr.ArgList))
- n.IsDDD = expr.HasDots
- return n
-
- case *syntax.ArrayType:
- var len ir.Node
- if expr.Len != nil {
- len = p.expr(expr.Len)
- }
- return ir.NewArrayType(p.pos(expr), len, p.typeExpr(expr.Elem))
- case *syntax.SliceType:
- return ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
- case *syntax.DotsType:
- t := ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
- t.DDD = true
- return t
- case *syntax.StructType:
- return p.structType(expr)
- case *syntax.InterfaceType:
- return p.interfaceType(expr)
- case *syntax.FuncType:
- return p.signature(nil, expr)
- case *syntax.MapType:
- return ir.NewMapType(p.pos(expr),
- p.typeExpr(expr.Key), p.typeExpr(expr.Value))
- case *syntax.ChanType:
- return ir.NewChanType(p.pos(expr),
- p.typeExpr(expr.Elem), p.chanDir(expr.Dir))
-
- case *syntax.TypeSwitchGuard:
- var tag *ir.Ident
- if expr.Lhs != nil {
- tag = ir.NewIdent(p.pos(expr.Lhs), p.name(expr.Lhs))
- if ir.IsBlank(tag) {
- base.Errorf("invalid variable name %v in type switch", tag)
- }
- }
- return ir.NewTypeSwitchGuard(p.pos(expr), tag, p.expr(expr.X))
- }
- panic("unhandled Expr")
-}
-
-// sum efficiently handles very large summation expressions (such as
-// in issue #16394). In particular, it avoids left recursion and
-// collapses string literals.
-func (p *noder) sum(x syntax.Expr) ir.Node {
- // While we need to handle long sums with asymptotic
- // efficiency, the vast majority of sums are very small: ~95%
- // have only 2 or 3 operands, and ~99% of string literals are
- // never concatenated.
-
- adds := make([]*syntax.Operation, 0, 2)
- for {
- add, ok := x.(*syntax.Operation)
- if !ok || add.Op != syntax.Add || add.Y == nil {
- break
- }
- adds = append(adds, add)
- x = add.X
- }
-
- // nstr is the current rightmost string literal in the
- // summation (if any), and chunks holds its accumulated
- // substrings.
- //
- // Consider the expression x + "a" + "b" + "c" + y. When we
- // reach the string literal "a", we assign nstr to point to
- // its corresponding Node and initialize chunks to {"a"}.
- // Visiting the subsequent string literals "b" and "c", we
- // simply append their values to chunks. Finally, when we
- // reach the non-constant operand y, we'll join chunks to form
- // "abc" and reassign the "a" string literal's value.
- //
- // N.B., we need to be careful about named string constants
- // (indicated by Sym != nil) because 1) we can't modify their
- // value, as doing so would affect other uses of the string
- // constant, and 2) they may have types, which we need to
- // handle correctly. For now, we avoid these problems by
- // treating named string constants the same as non-constant
- // operands.
- var nstr ir.Node
- chunks := make([]string, 0, 1)
-
- n := p.expr(x)
- if ir.IsConst(n, constant.String) && n.Sym() == nil {
- nstr = n
- chunks = append(chunks, ir.StringVal(nstr))
- }
-
- for i := len(adds) - 1; i >= 0; i-- {
- add := adds[i]
-
- r := p.expr(add.Y)
- if ir.IsConst(r, constant.String) && r.Sym() == nil {
- if nstr != nil {
- // Collapse r into nstr instead of adding to n.
- chunks = append(chunks, ir.StringVal(r))
- continue
- }
-
- nstr = r
- chunks = append(chunks, ir.StringVal(nstr))
- } else {
- if len(chunks) > 1 {
- nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
- }
- nstr = nil
- chunks = chunks[:0]
- }
- n = ir.NewBinaryExpr(p.pos(add), ir.OADD, n, r)
- }
- if len(chunks) > 1 {
- nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
- }
-
- return n
-}
-
-func (p *noder) typeExpr(typ syntax.Expr) ir.Ntype {
- // TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
- n := p.expr(typ)
- if n == nil {
- return nil
- }
- return n.(ir.Ntype)
-}
-
-func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Ntype {
- if typ != nil {
- return p.typeExpr(typ)
- }
- return nil
-}
-
-func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
- switch dir {
- case 0:
- return types.Cboth
- case syntax.SendOnly:
- return types.Csend
- case syntax.RecvOnly:
- return types.Crecv
- }
- panic("unhandled ChanDir")
-}
-
-func (p *noder) structType(expr *syntax.StructType) ir.Node {
- l := make([]*ir.Field, 0, len(expr.FieldList))
- for i, field := range expr.FieldList {
- p.setlineno(field)
- var n *ir.Field
- if field.Name == nil {
- n = p.embedded(field.Type)
- } else {
- n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
- }
- if i > 0 && expr.FieldList[i].Type == expr.FieldList[i-1].Type {
- n.Ntype = l[i-1].Ntype
- }
- if i < len(expr.TagList) && expr.TagList[i] != nil {
- n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
- }
- l = append(l, n)
- }
-
- p.setlineno(expr)
- return ir.NewStructType(p.pos(expr), l)
-}
-
-func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
- l := make([]*ir.Field, 0, len(expr.MethodList))
- for _, method := range expr.MethodList {
- p.setlineno(method)
- var n *ir.Field
- if method.Name == nil {
- n = ir.NewField(p.pos(method), nil, importName(p.packname(method.Type)).(ir.Ntype), nil)
- } else {
- mname := p.name(method.Name)
- if mname.IsBlank() {
- base.Errorf("methods must have a unique non-blank name")
- continue
- }
- sig := p.typeExpr(method.Type).(*ir.FuncType)
- sig.Recv = fakeRecv()
- n = ir.NewField(p.pos(method), mname, sig, nil)
- }
- l = append(l, n)
- }
-
- return ir.NewInterfaceType(p.pos(expr), l)
-}
-
-func (p *noder) packname(expr syntax.Expr) *types.Sym {
- switch expr := expr.(type) {
- case *syntax.Name:
- name := p.name(expr)
- if n := oldname(name); n.Name() != nil && n.Name().PkgName != nil {
- n.Name().PkgName.Used = true
- }
- return name
- case *syntax.SelectorExpr:
- name := p.name(expr.X.(*syntax.Name))
- def := ir.AsNode(name.Def)
- if def == nil {
- base.Errorf("undefined: %v", name)
- return name
- }
- var pkg *types.Pkg
- if def.Op() != ir.OPACK {
- base.Errorf("%v is not a package", name)
- pkg = types.LocalPkg
- } else {
- def := def.(*ir.PkgName)
- def.Used = true
- pkg = def.Pkg
- }
- return pkg.Lookup(expr.Sel.Value)
- }
- panic(fmt.Sprintf("unexpected packname: %#v", expr))
-}
-
-func (p *noder) embedded(typ syntax.Expr) *ir.Field {
- pos := p.pos(syntax.StartPos(typ))
-
- op, isStar := typ.(*syntax.Operation)
- if isStar {
- if op.Op != syntax.Mul || op.Y != nil {
- panic("unexpected Operation")
- }
- typ = op.X
- }
-
- sym := p.packname(typ)
- n := ir.NewField(pos, typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
- n.Embedded = true
-
- if isStar {
- n.Ntype = ir.NewStarExpr(pos, n.Ntype)
- }
- return n
-}
-
-func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
- return p.stmtsFall(stmts, false)
-}
-
-func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
- var nodes []ir.Node
- for i, stmt := range stmts {
- s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
- if s == nil {
- } else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
- // Inline non-empty block.
- // Empty blocks must be preserved for CheckReturn.
- nodes = append(nodes, s.(*ir.BlockStmt).List...)
- } else {
- nodes = append(nodes, s)
- }
- }
- return nodes
-}
-
-func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
- return p.stmtFall(stmt, false)
-}
-
-func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
- p.setlineno(stmt)
- switch stmt := stmt.(type) {
- case nil, *syntax.EmptyStmt:
- return nil
- case *syntax.LabeledStmt:
- return p.labeledStmt(stmt, fallOK)
- case *syntax.BlockStmt:
- l := p.blockStmt(stmt)
- if len(l) == 0 {
- // TODO(mdempsky): Line number?
- return ir.NewBlockStmt(base.Pos, nil)
- }
- return ir.NewBlockStmt(src.NoXPos, l)
- case *syntax.ExprStmt:
- return p.wrapname(stmt, p.expr(stmt.X))
- case *syntax.SendStmt:
- return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value))
- case *syntax.DeclStmt:
- return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList))
- case *syntax.AssignStmt:
- if stmt.Rhs == nil {
- pos := p.pos(stmt)
- n := ir.NewAssignOpStmt(pos, p.binOp(stmt.Op), p.expr(stmt.Lhs), ir.NewBasicLit(pos, one))
- n.IncDec = true
- return n
- }
-
- if stmt.Op != 0 && stmt.Op != syntax.Def {
- n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
- return n
- }
-
- rhs := p.exprList(stmt.Rhs)
- if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
- n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil)
- n.Def = stmt.Op == syntax.Def
- n.Lhs = p.assignList(stmt.Lhs, n, n.Def)
- n.Rhs = rhs
- return n
- }
-
- n := ir.NewAssignStmt(p.pos(stmt), nil, nil)
- n.Def = stmt.Op == syntax.Def
- n.X = p.assignList(stmt.Lhs, n, n.Def)[0]
- n.Y = rhs[0]
- return n
-
- case *syntax.BranchStmt:
- var op ir.Op
- switch stmt.Tok {
- case syntax.Break:
- op = ir.OBREAK
- case syntax.Continue:
- op = ir.OCONTINUE
- case syntax.Fallthrough:
- if !fallOK {
- base.Errorf("fallthrough statement out of place")
- }
- op = ir.OFALL
- case syntax.Goto:
- op = ir.OGOTO
- default:
- panic("unhandled BranchStmt")
- }
- var sym *types.Sym
- if stmt.Label != nil {
- sym = p.name(stmt.Label)
- }
- return ir.NewBranchStmt(p.pos(stmt), op, sym)
- case *syntax.CallStmt:
- var op ir.Op
- switch stmt.Tok {
- case syntax.Defer:
- op = ir.ODEFER
- case syntax.Go:
- op = ir.OGO
- default:
- panic("unhandled CallStmt")
- }
- return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call))
- case *syntax.ReturnStmt:
- n := ir.NewReturnStmt(p.pos(stmt), p.exprList(stmt.Results))
- if len(n.Results) == 0 && ir.CurFunc != nil {
- for _, ln := range ir.CurFunc.Dcl {
- if ln.Class == ir.PPARAM {
- continue
- }
- if ln.Class != ir.PPARAMOUT {
- break
- }
- if ln.Sym().Def != ln {
- base.Errorf("%s is shadowed during return", ln.Sym().Name)
- }
- }
- }
- return n
- case *syntax.IfStmt:
- return p.ifStmt(stmt)
- case *syntax.ForStmt:
- return p.forStmt(stmt)
- case *syntax.SwitchStmt:
- return p.switchStmt(stmt)
- case *syntax.SelectStmt:
- return p.selectStmt(stmt)
- }
- panic("unhandled Stmt")
-}
-
-func (p *noder) assignList(expr syntax.Expr, defn ir.InitNode, colas bool) []ir.Node {
- if !colas {
- return p.exprList(expr)
- }
-
- var exprs []syntax.Expr
- if list, ok := expr.(*syntax.ListExpr); ok {
- exprs = list.ElemList
- } else {
- exprs = []syntax.Expr{expr}
- }
-
- res := make([]ir.Node, len(exprs))
- seen := make(map[*types.Sym]bool, len(exprs))
-
- newOrErr := false
- for i, expr := range exprs {
- p.setlineno(expr)
- res[i] = ir.BlankNode
-
- name, ok := expr.(*syntax.Name)
- if !ok {
- p.errorAt(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
- newOrErr = true
- continue
- }
-
- sym := p.name(name)
- if sym.IsBlank() {
- continue
- }
-
- if seen[sym] {
- p.errorAt(expr.Pos(), "%v repeated on left side of :=", sym)
- newOrErr = true
- continue
- }
- seen[sym] = true
-
- if sym.Block == types.Block {
- res[i] = oldname(sym)
- continue
- }
-
- newOrErr = true
- n := typecheck.NewName(sym)
- typecheck.Declare(n, typecheck.DeclContext)
- n.Defn = defn
- defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
- res[i] = n
- }
-
- if !newOrErr {
- base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
- }
- return res
-}
-
-func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
- p.openScope(stmt.Pos())
- nodes := p.stmts(stmt.List)
- p.closeScope(stmt.Rbrace)
- return nodes
-}
-
-func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
- p.openScope(stmt.Pos())
- init := p.stmt(stmt.Init)
- n := ir.NewIfStmt(p.pos(stmt), p.expr(stmt.Cond), p.blockStmt(stmt.Then), nil)
- if init != nil {
- n.SetInit([]ir.Node{init})
- }
- if stmt.Else != nil {
- e := p.stmt(stmt.Else)
- if e.Op() == ir.OBLOCK {
- e := e.(*ir.BlockStmt)
- n.Else = e.List
- } else {
- n.Else = []ir.Node{e}
- }
- }
- p.closeAnotherScope()
- return n
-}
-
-func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
- p.openScope(stmt.Pos())
- if r, ok := stmt.Init.(*syntax.RangeClause); ok {
- if stmt.Cond != nil || stmt.Post != nil {
- panic("unexpected RangeClause")
- }
-
- n := ir.NewRangeStmt(p.pos(r), nil, nil, p.expr(r.X), nil)
- if r.Lhs != nil {
- n.Def = r.Def
- lhs := p.assignList(r.Lhs, n, n.Def)
- n.Key = lhs[0]
- if len(lhs) > 1 {
- n.Value = lhs[1]
- }
- }
- n.Body = p.blockStmt(stmt.Body)
- p.closeAnotherScope()
- return n
- }
-
- n := ir.NewForStmt(p.pos(stmt), p.stmt(stmt.Init), p.expr(stmt.Cond), p.stmt(stmt.Post), p.blockStmt(stmt.Body))
- p.closeAnotherScope()
- return n
-}
-
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
- p.openScope(stmt.Pos())
-
- init := p.stmt(stmt.Init)
- n := ir.NewSwitchStmt(p.pos(stmt), p.expr(stmt.Tag), nil)
- if init != nil {
- n.SetInit([]ir.Node{init})
- }
-
- var tswitch *ir.TypeSwitchGuard
- if l := n.Tag; l != nil && l.Op() == ir.OTYPESW {
- tswitch = l.(*ir.TypeSwitchGuard)
- }
- n.Cases = p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)
-
- p.closeScope(stmt.Rbrace)
- return n
-}
-
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []*ir.CaseClause {
- nodes := make([]*ir.CaseClause, 0, len(clauses))
- for i, clause := range clauses {
- p.setlineno(clause)
- if i > 0 {
- p.closeScope(clause.Pos())
- }
- p.openScope(clause.Pos())
-
- n := ir.NewCaseStmt(p.pos(clause), p.exprList(clause.Cases), nil)
- if tswitch != nil && tswitch.Tag != nil {
- nn := typecheck.NewName(tswitch.Tag.Sym())
- typecheck.Declare(nn, typecheck.DeclContext)
- n.Var = nn
- // keep track of the instances for reporting unused
- nn.Defn = tswitch
- }
-
- // Trim trailing empty statements. We omit them from
- // the Node AST anyway, and it's easier to identify
- // out-of-place fallthrough statements without them.
- body := clause.Body
- for len(body) > 0 {
- if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok {
- break
- }
- body = body[:len(body)-1]
- }
-
- n.Body = p.stmtsFall(body, true)
- if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL {
- if tswitch != nil {
- base.Errorf("cannot fallthrough in type switch")
- }
- if i+1 == len(clauses) {
- base.Errorf("cannot fallthrough final case in switch")
- }
- }
-
- nodes = append(nodes, n)
- }
- if len(clauses) > 0 {
- p.closeScope(rbrace)
- }
- return nodes
-}
-
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
- return ir.NewSelectStmt(p.pos(stmt), p.commClauses(stmt.Body, stmt.Rbrace))
-}
-
-func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommClause {
- nodes := make([]*ir.CommClause, len(clauses))
- for i, clause := range clauses {
- p.setlineno(clause)
- if i > 0 {
- p.closeScope(clause.Pos())
- }
- p.openScope(clause.Pos())
-
- nodes[i] = ir.NewCommStmt(p.pos(clause), p.stmt(clause.Comm), p.stmts(clause.Body))
- }
- if len(clauses) > 0 {
- p.closeScope(rbrace)
- }
- return nodes
-}
-
-func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
- sym := p.name(label.Label)
- lhs := ir.NewLabelStmt(p.pos(label), sym)
-
- var ls ir.Node
- if label.Stmt != nil { // TODO(mdempsky): Should always be present.
- ls = p.stmtFall(label.Stmt, fallOK)
- // Attach label directly to control statement too.
- if ls != nil {
- switch ls.Op() {
- case ir.OFOR:
- ls := ls.(*ir.ForStmt)
- ls.Label = sym
- case ir.ORANGE:
- ls := ls.(*ir.RangeStmt)
- ls.Label = sym
- case ir.OSWITCH:
- ls := ls.(*ir.SwitchStmt)
- ls.Label = sym
- case ir.OSELECT:
- ls := ls.(*ir.SelectStmt)
- ls.Label = sym
- }
- }
- }
-
- l := []ir.Node{lhs}
- if ls != nil {
- if ls.Op() == ir.OBLOCK {
- ls := ls.(*ir.BlockStmt)
- l = append(l, ls.List...)
- } else {
- l = append(l, ls)
- }
- }
- return ir.NewBlockStmt(src.NoXPos, l)
-}
-
var unOps = [...]ir.Op{
syntax.Recv: ir.ORECV,
syntax.Mul: ir.ODEREF,
@@ -1414,13 +151,6 @@ var unOps = [...]ir.Op{
syntax.Sub: ir.ONEG,
}
-func (p *noder) unOp(op syntax.Operator) ir.Op {
- if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
- panic("invalid Operator")
- }
- return unOps[op]
-}
-
var binOps = [...]ir.Op{
syntax.OrOr: ir.OOROR,
syntax.AndAnd: ir.OANDAND,
@@ -1446,96 +176,6 @@ var binOps = [...]ir.Op{
syntax.Shr: ir.ORSH,
}
-func (p *noder) binOp(op syntax.Operator) ir.Op {
- if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
- panic("invalid Operator")
- }
- return binOps[op]
-}
-
-// checkLangCompat reports an error if the representation of a numeric
-// literal is not compatible with the current language version.
-func checkLangCompat(lit *syntax.BasicLit) {
- s := lit.Value
- if len(s) <= 2 || types.AllowsGoVersion(types.LocalPkg, 1, 13) {
- return
- }
- // len(s) > 2
- if strings.Contains(s, "_") {
- base.ErrorfVers("go1.13", "underscores in numeric literals")
- return
- }
- if s[0] != '0' {
- return
- }
- radix := s[1]
- if radix == 'b' || radix == 'B' {
- base.ErrorfVers("go1.13", "binary literals")
- return
- }
- if radix == 'o' || radix == 'O' {
- base.ErrorfVers("go1.13", "0o/0O-style octal literals")
- return
- }
- if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
- base.ErrorfVers("go1.13", "hexadecimal floating-point literals")
- }
-}
-
-func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value {
- // We don't use the errors of the conversion routines to determine
- // if a literal string is valid because the conversion routines may
- // accept a wider syntax than the language permits. Rely on lit.Bad
- // instead.
- if lit.Bad {
- return constant.MakeUnknown()
- }
-
- switch lit.Kind {
- case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
- checkLangCompat(lit)
- // The max. mantissa precision for untyped numeric values
- // is 512 bits, or 4048 bits for each of the two integer
- // parts of a fraction for floating-point numbers that are
- // represented accurately in the go/constant package.
- // Constant literals that are longer than this many bits
- // are not meaningful; and excessively long constants may
- // consume a lot of space and time for a useless conversion.
- // Cap constant length with a generous upper limit that also
- // allows for separators between all digits.
- const limit = 10000
- if len(lit.Value) > limit {
- p.errorAt(lit.Pos(), "excessively long constant: %s... (%d chars)", lit.Value[:10], len(lit.Value))
- return constant.MakeUnknown()
- }
- }
-
- v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0)
- if v.Kind() == constant.Unknown {
- // TODO(mdempsky): Better error message?
- p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value)
- }
-
- return v
-}
-
-var tokenForLitKind = [...]token.Token{
- syntax.IntLit: token.INT,
- syntax.RuneLit: token.CHAR,
- syntax.FloatLit: token.FLOAT,
- syntax.ImagLit: token.IMAG,
- syntax.StringLit: token.STRING,
-}
-
-func (p *noder) name(name *syntax.Name) *types.Sym {
- return typecheck.Lookup(name.Value)
-}
-
-func (p *noder) mkname(name *syntax.Name) ir.Node {
- // TODO(mdempsky): Set line number?
- return mkname(p.name(name))
-}
-
func wrapname(pos src.XPos, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
@@ -1545,7 +185,7 @@ func wrapname(pos src.XPos, x ir.Node) ir.Node {
break
}
fallthrough
- case ir.ONAME, ir.ONONAME, ir.OPACK:
+ case ir.ONAME, ir.ONONAME:
p := ir.NewParenExpr(pos, x)
p.SetImplicit(true)
return p
@@ -1553,16 +193,6 @@ func wrapname(pos src.XPos, x ir.Node) ir.Node {
return x
}
-func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
- return wrapname(p.pos(n), x)
-}
-
-func (p *noder) setlineno(n syntax.Node) {
- if n != nil {
- base.Pos = p.pos(n)
- }
-}
-
// error is called concurrently if files are parsed concurrently.
func (p *noder) error(err error) {
p.err <- err.(syntax.Error)
@@ -1598,19 +228,6 @@ type pragmaEmbed struct {
Patterns []string
}
-func (p *noder) checkUnused(pragma *pragmas) {
- for _, pos := range pragma.Pos {
- if pos.Flag&pragma.Flag != 0 {
- p.errorAt(pos.Pos, "misplaced compiler directive")
- }
- }
- if len(pragma.Embeds) > 0 {
- for _, e := range pragma.Embeds {
- p.errorAt(e.Pos, "misplaced go:embed directive")
- }
- }
-}
-
func (p *noder) checkUnusedDuringParse(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
@@ -1749,14 +366,6 @@ func safeArg(name string) bool {
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
}
-func mkname(sym *types.Sym) ir.Node {
- n := oldname(sym)
- if n.Name() != nil && n.Name().PkgName != nil {
- n.Name().PkgName.Used = true
- }
- return n
-}
-
// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
// go/build/read.go also processes these strings and contains similar logic.
@@ -1818,21 +427,6 @@ func parseGoEmbed(args string) ([]string, error) {
return list, nil
}
-func fakeRecv() *ir.Field {
- return ir.NewField(base.Pos, nil, nil, types.FakeRecvType())
-}
-
-func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
- fn := ir.NewClosureFunc(p.pos(expr), ir.CurFunc != nil)
- fn.Nname.Ntype = p.typeExpr(expr.Type)
-
- p.funcBody(fn, expr.Body)
-
- ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
-
- return fn.OClosure
-}
-
// A function named init is a special case.
// It is called by the initialization before main is run.
// To make it unique within a package and also uncallable,
@@ -1845,34 +439,6 @@ func renameinit() *types.Sym {
return s
}
-// oldname returns the Node that declares symbol s in the current scope.
-// If no such Node currently exists, an ONONAME Node is returned instead.
-// Automatically creates a new closure variable if the referenced symbol was
-// declared in a different (containing) function.
-func oldname(s *types.Sym) ir.Node {
- if s.Pkg != types.LocalPkg {
- return ir.NewIdent(base.Pos, s)
- }
-
- n := ir.AsNode(s.Def)
- if n == nil {
- // Maybe a top-level declaration will come along later to
- // define s. resolve will check s.Def again once all input
- // source has been processed.
- return ir.NewIdent(base.Pos, s)
- }
-
- if n, ok := n.(*ir.Name); ok {
- // TODO(rsc): If there is an outer variable x and we
- // are parsing x := 5 inside the closure, until we get to
- // the := it looks like a reference to the outer x so we'll
- // make x a closure variable unnecessarily.
- return ir.CaptureName(base.Pos, ir.CurFunc, n)
- }
-
- return n
-}
-
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go
index 914c5d2bd7..c4cb9b9a2c 100644
--- a/src/cmd/compile/internal/noder/quirks.go
+++ b/src/cmd/compile/internal/noder/quirks.go
@@ -9,254 +9,13 @@ package noder
import (
"fmt"
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
- "cmd/compile/internal/types2"
- "cmd/internal/src"
)
// This file defines helper functions useful for satisfying toolstash
// -cmp when compared against the legacy frontend behavior, but can be
// removed after that's no longer a concern.
-// quirksMode controls whether behavior specific to satisfying
-// toolstash -cmp is used.
-func quirksMode() bool {
- return base.Debug.UnifiedQuirks != 0
-}
-
-// posBasesOf returns all of the position bases in the source files,
-// as seen in a straightforward traversal.
-//
-// This is necessary to ensure position bases (and thus file names)
-// get registered in the same order as noder would visit them.
-func posBasesOf(noders []*noder) []*syntax.PosBase {
- seen := make(map[*syntax.PosBase]bool)
- var bases []*syntax.PosBase
-
- for _, p := range noders {
- syntax.Crawl(p.file, func(n syntax.Node) bool {
- if b := n.Pos().Base(); !seen[b] {
- bases = append(bases, b)
- seen[b] = true
- }
- return false
- })
- }
-
- return bases
-}
-
-// importedObjsOf returns the imported objects (i.e., referenced
-// objects not declared by curpkg) from the parsed source files, in
-// the order that typecheck used to load their definitions.
-//
-// This is needed because loading the definitions for imported objects
-// can also add file names.
-func importedObjsOf(curpkg *types2.Package, info *types2.Info, noders []*noder) []types2.Object {
- // This code is complex because it matches the precise order that
- // typecheck recursively and repeatedly traverses the IR. It's meant
- // to be thrown away eventually anyway.
-
- seen := make(map[types2.Object]bool)
- var objs []types2.Object
-
- var phase int
-
- decls := make(map[types2.Object]syntax.Decl)
- assoc := func(decl syntax.Decl, names ...*syntax.Name) {
- for _, name := range names {
- obj, ok := info.Defs[name]
- assert(ok)
- decls[obj] = decl
- }
- }
-
- for _, p := range noders {
- syntax.Crawl(p.file, func(n syntax.Node) bool {
- switch n := n.(type) {
- case *syntax.ConstDecl:
- assoc(n, n.NameList...)
- case *syntax.FuncDecl:
- assoc(n, n.Name)
- case *syntax.TypeDecl:
- assoc(n, n.Name)
- case *syntax.VarDecl:
- assoc(n, n.NameList...)
- case *syntax.BlockStmt:
- return true
- }
- return false
- })
- }
-
- var visited map[syntax.Decl]bool
-
- var resolveDecl func(n syntax.Decl)
- var resolveNode func(n syntax.Node, top bool)
-
- resolveDecl = func(n syntax.Decl) {
- if visited[n] {
- return
- }
- visited[n] = true
-
- switch n := n.(type) {
- case *syntax.ConstDecl:
- resolveNode(n.Type, true)
- resolveNode(n.Values, true)
-
- case *syntax.FuncDecl:
- if n.Recv != nil {
- resolveNode(n.Recv, true)
- }
- resolveNode(n.Type, true)
-
- case *syntax.TypeDecl:
- resolveNode(n.Type, true)
-
- case *syntax.VarDecl:
- if n.Type != nil {
- resolveNode(n.Type, true)
- } else {
- resolveNode(n.Values, true)
- }
- }
- }
-
- resolveObj := func(pos syntax.Pos, obj types2.Object) {
- switch obj.Pkg() {
- case nil:
- // builtin; nothing to do
-
- case curpkg:
- if decl, ok := decls[obj]; ok {
- resolveDecl(decl)
- }
-
- default:
- if obj.Parent() == obj.Pkg().Scope() && !seen[obj] {
- seen[obj] = true
- objs = append(objs, obj)
- }
- }
- }
-
- checkdefat := func(pos syntax.Pos, n *syntax.Name) {
- if n.Value == "_" {
- return
- }
- obj, ok := info.Uses[n]
- if !ok {
- obj, ok = info.Defs[n]
- if !ok {
- return
- }
- }
- if obj == nil {
- return
- }
- resolveObj(pos, obj)
- }
- checkdef := func(n *syntax.Name) { checkdefat(n.Pos(), n) }
-
- var later []syntax.Node
-
- resolveNode = func(n syntax.Node, top bool) {
- if n == nil {
- return
- }
- syntax.Crawl(n, func(n syntax.Node) bool {
- switch n := n.(type) {
- case *syntax.Name:
- checkdef(n)
-
- case *syntax.SelectorExpr:
- if name, ok := n.X.(*syntax.Name); ok {
- if _, isPkg := info.Uses[name].(*types2.PkgName); isPkg {
- checkdefat(n.X.Pos(), n.Sel)
- return true
- }
- }
-
- case *syntax.AssignStmt:
- resolveNode(n.Rhs, top)
- resolveNode(n.Lhs, top)
- return true
-
- case *syntax.VarDecl:
- resolveNode(n.Values, top)
-
- case *syntax.FuncLit:
- if top {
- resolveNode(n.Type, top)
- later = append(later, n.Body)
- return true
- }
-
- case *syntax.BlockStmt:
- if phase >= 3 {
- for _, stmt := range n.List {
- resolveNode(stmt, false)
- }
- }
- return true
- }
-
- return false
- })
- }
-
- for phase = 1; phase <= 5; phase++ {
- visited = map[syntax.Decl]bool{}
-
- for _, p := range noders {
- for _, decl := range p.file.DeclList {
- switch decl := decl.(type) {
- case *syntax.ConstDecl:
- resolveDecl(decl)
-
- case *syntax.FuncDecl:
- resolveDecl(decl)
- if phase >= 3 && decl.Body != nil {
- resolveNode(decl.Body, true)
- }
-
- case *syntax.TypeDecl:
- if !decl.Alias || phase >= 2 {
- resolveDecl(decl)
- }
-
- case *syntax.VarDecl:
- if phase >= 2 {
- resolveNode(decl.Values, true)
- resolveDecl(decl)
- }
- }
- }
-
- if phase >= 5 {
- syntax.Crawl(p.file, func(n syntax.Node) bool {
- if name, ok := n.(*syntax.Name); ok {
- if obj, ok := info.Uses[name]; ok {
- resolveObj(name.Pos(), obj)
- }
- }
- return false
- })
- }
- }
-
- for i := 0; i < len(later); i++ {
- resolveNode(later[i], true)
- }
- later = nil
- }
-
- return objs
-}
-
// typeExprEndPos returns the position that noder would leave base.Pos
// after parsing the given type expression.
func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
@@ -320,131 +79,3 @@ func lastFieldType(fields []*syntax.Field) syntax.Expr {
}
return fields[len(fields)-1].Type
}
-
-// sumPos returns the position that noder.sum would produce for
-// constant expression x.
-func sumPos(x syntax.Expr) syntax.Pos {
- orig := x
- for {
- switch x1 := x.(type) {
- case *syntax.BasicLit:
- assert(x1.Kind == syntax.StringLit)
- return x1.Pos()
- case *syntax.Operation:
- assert(x1.Op == syntax.Add && x1.Y != nil)
- if r, ok := x1.Y.(*syntax.BasicLit); ok {
- assert(r.Kind == syntax.StringLit)
- x = x1.X
- continue
- }
- }
- return orig.Pos()
- }
-}
-
-// funcParamsEndPos returns the value of base.Pos left by noder after
-// processing a function signature.
-func funcParamsEndPos(fn *ir.Func) src.XPos {
- sig := fn.Nname.Type()
-
- fields := sig.Results().FieldSlice()
- if len(fields) == 0 {
- fields = sig.Params().FieldSlice()
- if len(fields) == 0 {
- fields = sig.Recvs().FieldSlice()
- if len(fields) == 0 {
- if fn.OClosure != nil {
- return fn.Nname.Ntype.Pos()
- }
- return fn.Pos()
- }
- }
- }
-
- return fields[len(fields)-1].Pos
-}
-
-type dupTypes struct {
- origs map[types2.Type]types2.Type
-}
-
-func (d *dupTypes) orig(t types2.Type) types2.Type {
- if orig, ok := d.origs[t]; ok {
- return orig
- }
- return t
-}
-
-func (d *dupTypes) add(t, orig types2.Type) {
- if t == orig {
- return
- }
-
- if d.origs == nil {
- d.origs = make(map[types2.Type]types2.Type)
- }
- assert(d.origs[t] == nil)
- d.origs[t] = orig
-
- switch t := t.(type) {
- case *types2.Pointer:
- orig := orig.(*types2.Pointer)
- d.add(t.Elem(), orig.Elem())
-
- case *types2.Slice:
- orig := orig.(*types2.Slice)
- d.add(t.Elem(), orig.Elem())
-
- case *types2.Map:
- orig := orig.(*types2.Map)
- d.add(t.Key(), orig.Key())
- d.add(t.Elem(), orig.Elem())
-
- case *types2.Array:
- orig := orig.(*types2.Array)
- assert(t.Len() == orig.Len())
- d.add(t.Elem(), orig.Elem())
-
- case *types2.Chan:
- orig := orig.(*types2.Chan)
- assert(t.Dir() == orig.Dir())
- d.add(t.Elem(), orig.Elem())
-
- case *types2.Struct:
- orig := orig.(*types2.Struct)
- assert(t.NumFields() == orig.NumFields())
- for i := 0; i < t.NumFields(); i++ {
- d.add(t.Field(i).Type(), orig.Field(i).Type())
- }
-
- case *types2.Interface:
- orig := orig.(*types2.Interface)
- assert(t.NumExplicitMethods() == orig.NumExplicitMethods())
- assert(t.NumEmbeddeds() == orig.NumEmbeddeds())
- for i := 0; i < t.NumExplicitMethods(); i++ {
- d.add(t.ExplicitMethod(i).Type(), orig.ExplicitMethod(i).Type())
- }
- for i := 0; i < t.NumEmbeddeds(); i++ {
- d.add(t.EmbeddedType(i), orig.EmbeddedType(i))
- }
-
- case *types2.Signature:
- orig := orig.(*types2.Signature)
- assert((t.Recv() == nil) == (orig.Recv() == nil))
- if t.Recv() != nil {
- d.add(t.Recv().Type(), orig.Recv().Type())
- }
- d.add(t.Params(), orig.Params())
- d.add(t.Results(), orig.Results())
-
- case *types2.Tuple:
- orig := orig.(*types2.Tuple)
- assert(t.Len() == orig.Len())
- for i := 0; i < t.Len(); i++ {
- d.add(t.At(i).Type(), orig.At(i).Type())
- }
-
- default:
- assert(types2.Identical(t, orig))
- }
-}
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 5d17c534c1..2b1636588e 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -11,6 +11,7 @@ import (
"fmt"
"go/constant"
"internal/buildcfg"
+ "internal/pkgbits"
"strings"
"cmd/compile/internal/base"
@@ -32,7 +33,7 @@ import (
// this until after that's done.
type pkgReader struct {
- pkgDecoder
+ pkgbits.PkgDecoder
posBases []*src.PosBase
pkgs []*types.Pkg
@@ -43,15 +44,15 @@ type pkgReader struct {
newindex []int
}
-func newPkgReader(pr pkgDecoder) *pkgReader {
+func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader {
return &pkgReader{
- pkgDecoder: pr,
+ PkgDecoder: pr,
- posBases: make([]*src.PosBase, pr.numElems(relocPosBase)),
- pkgs: make([]*types.Pkg, pr.numElems(relocPkg)),
- typs: make([]*types.Type, pr.numElems(relocType)),
+ posBases: make([]*src.PosBase, pr.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Pkg, pr.NumElems(pkgbits.RelocPkg)),
+ typs: make([]*types.Type, pr.NumElems(pkgbits.RelocType)),
- newindex: make([]int, pr.totalElems()),
+ newindex: make([]int, pr.TotalElems()),
}
}
@@ -61,21 +62,21 @@ type pkgReaderIndex struct {
dict *readerDict
}
-func (pri pkgReaderIndex) asReader(k reloc, marker syncMarker) *reader {
+func (pri pkgReaderIndex) asReader(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *reader {
r := pri.pr.newReader(k, pri.idx, marker)
r.dict = pri.dict
return r
}
-func (pr *pkgReader) newReader(k reloc, idx int, marker syncMarker) *reader {
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx int, marker pkgbits.SyncMarker) *reader {
return &reader{
- decoder: pr.newDecoder(k, idx, marker),
+ Decoder: pr.NewDecoder(k, idx, marker),
p: pr,
}
}
type reader struct {
- decoder
+ pkgbits.Decoder
p *pkgReader
@@ -170,19 +171,19 @@ func (r *reader) pos() src.XPos {
}
func (r *reader) pos0() src.Pos {
- r.sync(syncPos)
- if !r.bool() {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
return src.NoPos
}
posBase := r.posBase()
- line := r.uint()
- col := r.uint()
+ line := r.Uint()
+ col := r.Uint()
return src.MakePos(posBase, line, col)
}
func (r *reader) posBase() *src.PosBase {
- return r.inlPosBase(r.p.posBaseIdx(r.reloc(relocPosBase)))
+ return r.inlPosBase(r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)))
}
func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
@@ -190,10 +191,10 @@ func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
return b
}
- r := pr.newReader(relocPosBase, idx, syncPosBase)
+ r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
var b *src.PosBase
- absFilename := r.string()
+ absFilename := r.String()
filename := absFilename
// For build artifact stability, the export data format only
@@ -212,12 +213,12 @@ func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
filename = buildcfg.GOROOT + filename[len(dollarGOROOT):]
}
- if r.bool() {
+ if r.Bool() {
b = src.NewFileBase(filename, absFilename)
} else {
pos := r.pos0()
- line := r.uint()
- col := r.uint()
+ line := r.Uint()
+ col := r.Uint()
b = src.NewLinePragmaBase(pos, filename, absFilename, line, col)
}
@@ -265,8 +266,8 @@ func (r *reader) origPos(xpos src.XPos) src.XPos {
// @@@ Packages
func (r *reader) pkg() *types.Pkg {
- r.sync(syncPkg)
- return r.p.pkgIdx(r.reloc(relocPkg))
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
}
func (pr *pkgReader) pkgIdx(idx int) *types.Pkg {
@@ -274,22 +275,22 @@ func (pr *pkgReader) pkgIdx(idx int) *types.Pkg {
return pkg
}
- pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
pr.pkgs[idx] = pkg
return pkg
}
func (r *reader) doPkg() *types.Pkg {
- path := r.string()
+ path := r.String()
if path == "builtin" {
return types.BuiltinPkg
}
if path == "" {
- path = r.p.pkgPath
+ path = r.p.PkgPath()
}
- name := r.string()
- height := r.len()
+ name := r.String()
+ height := r.Len()
pkg := types.NewPkg(path, "")
@@ -321,11 +322,11 @@ func (r *reader) typWrapped(wrapped bool) *types.Type {
}
func (r *reader) typInfo() typeInfo {
- r.sync(syncType)
- if r.bool() {
- return typeInfo{idx: r.len(), derived: true}
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: r.Len(), derived: true}
}
- return typeInfo{idx: r.reloc(relocType), derived: false}
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
}
func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *types.Type {
@@ -342,7 +343,7 @@ func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *type
return typ
}
- r := pr.newReader(relocType, idx, syncTypeIdx)
+ r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
r.dict = dict
typ := r.doTyp()
@@ -408,38 +409,38 @@ func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *type
}
func (r *reader) doTyp() *types.Type {
- switch tag := codeType(r.code(syncType)); tag {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
default:
panic(fmt.Sprintf("unexpected type: %v", tag))
- case typeBasic:
- return *basics[r.len()]
+ case pkgbits.TypeBasic:
+ return *basics[r.Len()]
- case typeNamed:
+ case pkgbits.TypeNamed:
obj := r.obj()
assert(obj.Op() == ir.OTYPE)
return obj.Type()
- case typeTypeParam:
- return r.dict.targs[r.len()]
+ case pkgbits.TypeTypeParam:
+ return r.dict.targs[r.Len()]
- case typeArray:
- len := int64(r.uint64())
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
return types.NewArray(r.typ(), len)
- case typeChan:
- dir := dirs[r.len()]
+ case pkgbits.TypeChan:
+ dir := dirs[r.Len()]
return types.NewChan(r.typ(), dir)
- case typeMap:
+ case pkgbits.TypeMap:
return types.NewMap(r.typ(), r.typ())
- case typePointer:
+ case pkgbits.TypePointer:
return types.NewPtr(r.typ())
- case typeSignature:
+ case pkgbits.TypeSignature:
return r.signature(types.LocalPkg, nil)
- case typeSlice:
+ case pkgbits.TypeSlice:
return types.NewSlice(r.typ())
- case typeStruct:
+ case pkgbits.TypeStruct:
return r.structType()
- case typeInterface:
+ case pkgbits.TypeInterface:
return r.interfaceType()
}
}
@@ -447,7 +448,9 @@ func (r *reader) doTyp() *types.Type {
func (r *reader) interfaceType() *types.Type {
tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
- nmethods, nembeddeds := r.len(), r.len()
+ nmethods, nembeddeds := r.Len(), r.Len()
+ implicit := nmethods == 0 && nembeddeds == 1 && r.Bool()
+ assert(!implicit) // implicit interfaces only appear in constraints
fields := make([]*types.Field, nmethods+nembeddeds)
methods, embeddeds := fields[:nmethods], fields[nmethods:]
@@ -471,14 +474,14 @@ func (r *reader) interfaceType() *types.Type {
func (r *reader) structType() *types.Type {
tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
- fields := make([]*types.Field, r.len())
+ fields := make([]*types.Field, r.Len())
for i := range fields {
pos := r.pos()
pkg, sym := r.selector()
tpkg = pkg
ftyp := r.typ()
- tag := r.string()
- embedded := r.bool()
+ tag := r.String()
+ embedded := r.Bool()
f := types.NewField(pos, sym, ftyp)
f.Note = tag
@@ -491,11 +494,11 @@ func (r *reader) structType() *types.Type {
}
func (r *reader) signature(tpkg *types.Pkg, recv *types.Field) *types.Type {
- r.sync(syncSignature)
+ r.Sync(pkgbits.SyncSignature)
params := r.params(&tpkg)
results := r.params(&tpkg)
- if r.bool() { // variadic
+ if r.Bool() { // variadic
params[len(params)-1].SetIsDDD(true)
}
@@ -503,8 +506,8 @@ func (r *reader) signature(tpkg *types.Pkg, recv *types.Field) *types.Type {
}
func (r *reader) params(tpkg **types.Pkg) []*types.Field {
- r.sync(syncParams)
- fields := make([]*types.Field, r.len())
+ r.Sync(pkgbits.SyncParams)
+ fields := make([]*types.Field, r.Len())
for i := range fields {
*tpkg, fields[i] = r.param()
}
@@ -512,7 +515,7 @@ func (r *reader) params(tpkg **types.Pkg) []*types.Field {
}
func (r *reader) param() (*types.Pkg, *types.Field) {
- r.sync(syncParam)
+ r.Sync(pkgbits.SyncParam)
pos := r.pos()
pkg, sym := r.localIdent()
@@ -526,10 +529,10 @@ func (r *reader) param() (*types.Pkg, *types.Field) {
var objReader = map[*types.Sym]pkgReaderIndex{}
func (r *reader) obj() ir.Node {
- r.sync(syncObject)
+ r.Sync(pkgbits.SyncObject)
- if r.bool() {
- idx := r.len()
+ if r.Bool() {
+ idx := r.Len()
obj := r.dict.funcsObj[idx]
if obj == nil {
fn := r.dict.funcs[idx]
@@ -545,9 +548,9 @@ func (r *reader) obj() ir.Node {
return obj
}
- idx := r.reloc(relocObj)
+ idx := r.Reloc(pkgbits.RelocObj)
- explicits := make([]*types.Type, r.len())
+ explicits := make([]*types.Type, r.Len())
for i := range explicits {
explicits[i] = r.typ()
}
@@ -561,11 +564,11 @@ func (r *reader) obj() ir.Node {
}
func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node {
- rname := pr.newReader(relocName, idx, syncObject1)
+ rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
_, sym := rname.qualifiedIdent()
- tag := codeObj(rname.code(syncCodeObj))
+ tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
- if tag == objStub {
+ if tag == pkgbits.ObjStub {
assert(!sym.IsBlank())
switch sym.Pkg {
case types.BuiltinPkg, types.UnsafePkg:
@@ -583,8 +586,8 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
dict := pr.objDictIdx(sym, idx, implicits, explicits)
- r := pr.newReader(relocObj, idx, syncObject1)
- rext := pr.newReader(relocObjExt, idx, syncObject1)
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ rext := pr.newReader(pkgbits.RelocObjExt, idx, pkgbits.SyncObject1)
r.dict = dict
rext.dict = dict
@@ -616,21 +619,21 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
default:
panic("unexpected object")
- case objAlias:
+ case pkgbits.ObjAlias:
name := do(ir.OTYPE, false)
setType(name, r.typ())
name.SetAlias(true)
return name
- case objConst:
+ case pkgbits.ObjConst:
name := do(ir.OLITERAL, false)
typ := r.typ()
- val := FixValue(typ, r.value())
+ val := FixValue(typ, r.Value())
setType(name, typ)
setValue(name, val)
return name
- case objFunc:
+ case pkgbits.ObjFunc:
if sym.Name == "init" {
sym = renameinit()
}
@@ -643,7 +646,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
rext.funcExt(name)
return name
- case objType:
+ case pkgbits.ObjType:
name := do(ir.OTYPE, true)
typ := types.NewNamed(name)
setType(name, typ)
@@ -657,7 +660,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
typ.SetUnderlying(r.typWrapped(false))
types.ResumeCheckSize()
- methods := make([]*types.Field, r.len())
+ methods := make([]*types.Field, r.Len())
for i := range methods {
methods[i] = r.method(rext)
}
@@ -669,7 +672,7 @@ func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node
return name
- case objVar:
+ case pkgbits.ObjVar:
name := do(ir.ONAME, false)
setType(name, r.typ())
rext.varExt(name)
@@ -700,12 +703,12 @@ func (r *reader) mangle(sym *types.Sym) *types.Sym {
}
func (pr *pkgReader) objDictIdx(sym *types.Sym, idx int, implicits, explicits []*types.Type) *readerDict {
- r := pr.newReader(relocObjDict, idx, syncObject1)
+ r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
var dict readerDict
- nimplicits := r.len()
- nexplicits := r.len()
+ nimplicits := r.Len()
+ nexplicits := r.Len()
if nimplicits > len(implicits) || nexplicits != len(explicits) {
base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
@@ -717,25 +720,25 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx int, implicits, explicits []
// For stenciling, we can just skip over the type parameters.
for range dict.targs[dict.implicits:] {
// Skip past bounds without actually evaluating them.
- r.sync(syncType)
- if r.bool() {
- r.len()
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ r.Len()
} else {
- r.reloc(relocType)
+ r.Reloc(pkgbits.RelocType)
}
}
- dict.derived = make([]derivedInfo, r.len())
+ dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]*types.Type, len(dict.derived))
for i := range dict.derived {
- dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
}
- dict.funcs = make([]objInfo, r.len())
+ dict.funcs = make([]objInfo, r.Len())
dict.funcsObj = make([]ir.Node, len(dict.funcs))
for i := range dict.funcs {
- objIdx := r.reloc(relocObj)
- targs := make([]typeInfo, r.len())
+ objIdx := r.Reloc(pkgbits.RelocObj)
+ targs := make([]typeInfo, r.Len())
for j := range targs {
targs[j] = r.typInfo()
}
@@ -746,7 +749,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx int, implicits, explicits []
}
func (r *reader) typeParamNames() {
- r.sync(syncTypeParamNames)
+ r.Sync(pkgbits.SyncTypeParamNames)
for range r.dict.targs[r.dict.implicits:] {
r.pos()
@@ -755,7 +758,7 @@ func (r *reader) typeParamNames() {
}
func (r *reader) method(rext *reader) *types.Field {
- r.sync(syncMethod)
+ r.Sync(pkgbits.SyncMethod)
pos := r.pos()
pkg, sym := r.selector()
r.typeParamNames()
@@ -780,27 +783,27 @@ func (r *reader) method(rext *reader) *types.Field {
}
func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) {
- r.sync(syncSym)
+ r.Sync(pkgbits.SyncSym)
pkg = r.pkg()
- if name := r.string(); name != "" {
+ if name := r.String(); name != "" {
sym = pkg.Lookup(name)
}
return
}
func (r *reader) localIdent() (pkg *types.Pkg, sym *types.Sym) {
- r.sync(syncLocalIdent)
+ r.Sync(pkgbits.SyncLocalIdent)
pkg = r.pkg()
- if name := r.string(); name != "" {
+ if name := r.String(); name != "" {
sym = pkg.Lookup(name)
}
return
}
func (r *reader) selector() (origPkg *types.Pkg, sym *types.Sym) {
- r.sync(syncSelector)
+ r.Sync(pkgbits.SyncSelector)
origPkg = r.pkg()
- name := r.string()
+ name := r.String()
pkg := origPkg
if types.IsExported(name) {
pkg = types.LocalPkg
@@ -820,7 +823,7 @@ func (dict *readerDict) hasTypeParams() bool {
// @@@ Compiler extensions
func (r *reader) funcExt(name *ir.Name) {
- r.sync(syncFuncExt)
+ r.Sync(pkgbits.SyncFuncExt)
name.Class = 0 // so MarkFunc doesn't complain
ir.MarkFunc(name)
@@ -848,31 +851,31 @@ func (r *reader) funcExt(name *ir.Name) {
typecheck.Func(fn)
- if r.bool() {
- fn.ABI = obj.ABI(r.uint64())
+ if r.Bool() {
+ fn.ABI = obj.ABI(r.Uint64())
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(name.Type()).FieldSlice() {
- f.Note = r.string()
+ f.Note = r.String()
}
}
- if r.bool() {
+ if r.Bool() {
fn.Inl = &ir.Inline{
- Cost: int32(r.len()),
- CanDelayResults: r.bool(),
+ Cost: int32(r.Len()),
+ CanDelayResults: r.Bool(),
}
r.addBody(name.Func)
}
} else {
r.addBody(name.Func)
}
- r.sync(syncEOF)
+ r.Sync(pkgbits.SyncEOF)
}
func (r *reader) typeExt(name *ir.Name) {
- r.sync(syncTypeExt)
+ r.Sync(pkgbits.SyncTypeExt)
typ := name.Type()
@@ -891,30 +894,30 @@ func (r *reader) typeExt(name *ir.Name) {
typ.SetNotInHeap(true)
}
- typecheck.SetBaseTypeIndex(typ, r.int64(), r.int64())
+ typecheck.SetBaseTypeIndex(typ, r.Int64(), r.Int64())
}
func (r *reader) varExt(name *ir.Name) {
- r.sync(syncVarExt)
+ r.Sync(pkgbits.SyncVarExt)
r.linkname(name)
}
func (r *reader) linkname(name *ir.Name) {
assert(name.Op() == ir.ONAME)
- r.sync(syncLinkname)
+ r.Sync(pkgbits.SyncLinkname)
- if idx := r.int64(); idx >= 0 {
+ if idx := r.Int64(); idx >= 0 {
lsym := name.Linksym()
lsym.SymIdx = int32(idx)
lsym.Set(obj.AttrIndexed, true)
} else {
- name.Sym().Linkname = r.string()
+ name.Sym().Linkname = r.String()
}
}
func (r *reader) pragmaFlag() ir.PragmaFlag {
- r.sync(syncPragma)
- return ir.PragmaFlag(r.int())
+ r.Sync(pkgbits.SyncPragma)
+ return ir.PragmaFlag(r.Int())
}
// @@@ Function bodies
@@ -933,7 +936,7 @@ var todoBodies []*ir.Func
var todoBodiesDone = false
func (r *reader) addBody(fn *ir.Func) {
- pri := pkgReaderIndex{r.p, r.reloc(relocBody), r.dict}
+ pri := pkgReaderIndex{r.p, r.Reloc(pkgbits.RelocBody), r.dict}
bodyReader[fn] = pri
if fn.Nname.Defn == nil {
@@ -951,7 +954,7 @@ func (r *reader) addBody(fn *ir.Func) {
}
func (pri pkgReaderIndex) funcBody(fn *ir.Func) {
- r := pri.asReader(relocBody, syncFuncBody)
+ r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
r.funcBody(fn)
}
@@ -962,17 +965,13 @@ func (r *reader) funcBody(fn *ir.Func) {
ir.WithFunc(fn, func() {
r.funcargs(fn)
- if !r.bool() {
+ if !r.Bool() {
return
}
body := r.stmts()
if body == nil {
- pos := src.NoXPos
- if quirksMode() {
- pos = funcParamsEndPos(fn)
- }
- body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(pos, nil))}
+ body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(src.NoXPos, nil))}
}
fn.Body = body
fn.Endlineno = r.pos()
@@ -1038,9 +1037,9 @@ func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) {
func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT)
- r.sync(syncAddLocal)
- if enableSync {
- want := r.int()
+ r.Sync(pkgbits.SyncAddLocal)
+ if pkgbits.EnableSync {
+ want := r.Int()
if have := len(r.locals); have != want {
base.FatalfAt(name.Pos(), "locals table has desynced")
}
@@ -1081,15 +1080,15 @@ func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
}
func (r *reader) useLocal() *ir.Name {
- r.sync(syncUseObjLocal)
- if r.bool() {
- return r.locals[r.len()]
+ r.Sync(pkgbits.SyncUseObjLocal)
+ if r.Bool() {
+ return r.locals[r.Len()]
}
- return r.closureVars[r.len()]
+ return r.closureVars[r.Len()]
}
func (r *reader) openScope() {
- r.sync(syncOpenScope)
+ r.Sync(pkgbits.SyncOpenScope)
pos := r.pos()
if base.Flag.Dwarf {
@@ -1099,7 +1098,7 @@ func (r *reader) openScope() {
}
func (r *reader) closeScope() {
- r.sync(syncCloseScope)
+ r.Sync(pkgbits.SyncCloseScope)
r.lastCloseScopePos = r.pos()
r.closeAnotherScope()
@@ -1110,7 +1109,7 @@ func (r *reader) closeScope() {
// "if" statements, as their implicit blocks always end at the same
// position as an explicit block.
func (r *reader) closeAnotherScope() {
- r.sync(syncCloseAnotherScope)
+ r.Sync(pkgbits.SyncCloseAnotherScope)
if base.Flag.Dwarf {
scopeVars := r.scopeVars[len(r.scopeVars)-1]
@@ -1177,11 +1176,11 @@ func (r *reader) stmts() []ir.Node {
assert(ir.CurFunc == r.curfn)
var res ir.Nodes
- r.sync(syncStmts)
+ r.Sync(pkgbits.SyncStmts)
for {
- tag := codeStmt(r.code(syncStmt1))
+ tag := codeStmt(r.Code(pkgbits.SyncStmt1))
if tag == stmtEnd {
- r.sync(syncStmtsEnd)
+ r.Sync(pkgbits.SyncStmtsEnd)
return res
}
@@ -1291,27 +1290,15 @@ func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
case stmtSwitch:
return r.switchStmt(label)
-
- case stmtTypeDeclHack:
- // fake "type _ = int" declaration to prevent inlining in quirks mode.
- assert(quirksMode())
-
- name := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.BlankNode.Sym())
- name.SetAlias(true)
- setType(name, types.Types[types.TINT])
-
- n := ir.NewDecl(src.NoXPos, ir.ODCLTYPE, name)
- n.SetTypecheck(1)
- return n
}
}
func (r *reader) assignList() ([]*ir.Name, []ir.Node) {
- lhs := make([]ir.Node, r.len())
+ lhs := make([]ir.Node, r.Len())
var names []*ir.Name
for i := range lhs {
- if r.bool() {
+ if r.Bool() {
pos := r.pos()
_, sym := r.localIdent()
typ := r.typ()
@@ -1331,7 +1318,7 @@ func (r *reader) assignList() ([]*ir.Name, []ir.Node) {
}
func (r *reader) blockStmt() []ir.Node {
- r.sync(syncBlockStmt)
+ r.Sync(pkgbits.SyncBlockStmt)
r.openScope()
stmts := r.stmts()
r.closeScope()
@@ -1339,11 +1326,11 @@ func (r *reader) blockStmt() []ir.Node {
}
func (r *reader) forStmt(label *types.Sym) ir.Node {
- r.sync(syncForStmt)
+ r.Sync(pkgbits.SyncForStmt)
r.openScope()
- if r.bool() {
+ if r.Bool() {
pos := r.pos()
// TODO(mdempsky): After quirks mode is gone, swap these
@@ -1379,7 +1366,7 @@ func (r *reader) forStmt(label *types.Sym) ir.Node {
}
func (r *reader) ifStmt() ir.Node {
- r.sync(syncIfStmt)
+ r.Sync(pkgbits.SyncIfStmt)
r.openScope()
pos := r.pos()
init := r.stmts()
@@ -1393,10 +1380,10 @@ func (r *reader) ifStmt() ir.Node {
}
func (r *reader) selectStmt(label *types.Sym) ir.Node {
- r.sync(syncSelectStmt)
+ r.Sync(pkgbits.SyncSelectStmt)
pos := r.pos()
- clauses := make([]*ir.CommClause, r.len())
+ clauses := make([]*ir.CommClause, r.Len())
for i := range clauses {
if i > 0 {
r.closeScope()
@@ -1418,19 +1405,19 @@ func (r *reader) selectStmt(label *types.Sym) ir.Node {
}
func (r *reader) switchStmt(label *types.Sym) ir.Node {
- r.sync(syncSwitchStmt)
+ r.Sync(pkgbits.SyncSwitchStmt)
r.openScope()
pos := r.pos()
init := r.stmt()
var tag ir.Node
- if r.bool() {
+ if r.Bool() {
pos := r.pos()
var ident *ir.Ident
- if r.bool() {
+ if r.Bool() {
pos := r.pos()
- sym := typecheck.Lookup(r.string())
+ sym := typecheck.Lookup(r.String())
ident = ir.NewIdent(pos, sym)
}
x := r.expr()
@@ -1444,7 +1431,7 @@ func (r *reader) switchStmt(label *types.Sym) ir.Node {
tswitch = nil
}
- clauses := make([]*ir.CaseClause, r.len())
+ clauses := make([]*ir.CaseClause, r.Len())
for i := range clauses {
if i > 0 {
r.closeScope()
@@ -1483,8 +1470,8 @@ func (r *reader) switchStmt(label *types.Sym) ir.Node {
}
func (r *reader) label() *types.Sym {
- r.sync(syncLabel)
- name := r.string()
+ r.Sync(pkgbits.SyncLabel)
+ name := r.String()
if r.inlCall != nil {
name = fmt.Sprintf("~%s·%d", name, inlgen)
}
@@ -1492,8 +1479,8 @@ func (r *reader) label() *types.Sym {
}
func (r *reader) optLabel() *types.Sym {
- r.sync(syncOptLabel)
- if r.bool() {
+ r.Sync(pkgbits.SyncOptLabel)
+ if r.Bool() {
return r.label()
}
return nil
@@ -1526,7 +1513,7 @@ func (r *reader) expr() (res ir.Node) {
}
}()
- switch tag := codeExpr(r.code(syncExpr)); tag {
+ switch tag := codeExpr(r.Code(pkgbits.SyncExpr)); tag {
default:
panic("unhandled expression")
@@ -1555,9 +1542,9 @@ func (r *reader) expr() (res ir.Node) {
case exprConst:
pos := r.pos()
typ := r.typ()
- val := FixValue(typ, r.value())
+ val := FixValue(typ, r.Value())
op := r.op()
- orig := r.string()
+ orig := r.String()
return typecheck.Expr(OrigConst(pos, typ, val, op, orig))
case exprCompLit:
@@ -1636,14 +1623,14 @@ func (r *reader) expr() (res ir.Node) {
case exprCall:
fun := r.expr()
- if r.bool() { // method call
+ if r.Bool() { // method call
pos := r.pos()
_, sym := r.selector()
fun = typecheck.Callee(ir.NewSelectorExpr(pos, ir.OXDOT, fun, sym))
}
pos := r.pos()
args := r.exprs()
- dots := r.bool()
+ dots := r.Bool()
return typecheck.Call(pos, fun, args, dots)
case exprConvert:
@@ -1655,7 +1642,7 @@ func (r *reader) expr() (res ir.Node) {
}
func (r *reader) compLit() ir.Node {
- r.sync(syncCompLit)
+ r.Sync(pkgbits.SyncCompLit)
pos := r.pos()
typ0 := r.typ()
@@ -1668,14 +1655,14 @@ func (r *reader) compLit() ir.Node {
}
isStruct := typ.Kind() == types.TSTRUCT
- elems := make([]ir.Node, r.len())
+ elems := make([]ir.Node, r.Len())
for i := range elems {
elemp := &elems[i]
if isStruct {
- sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.len()), nil)
+ sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.Len()), nil)
*elemp, elemp = sk, &sk.Value
- } else if r.bool() {
+ } else if r.Bool() {
kv := ir.NewKeyExpr(r.pos(), r.expr(), nil)
*elemp, elemp = kv, &kv.Value
}
@@ -1700,7 +1687,7 @@ func wrapName(pos src.XPos, x ir.Node) ir.Node {
break
}
fallthrough
- case ir.ONAME, ir.ONONAME, ir.OPACK, ir.ONIL:
+ case ir.ONAME, ir.ONONAME, ir.ONIL:
p := ir.NewParenExpr(pos, x)
p.SetImplicit(true)
return p
@@ -1709,29 +1696,22 @@ func wrapName(pos src.XPos, x ir.Node) ir.Node {
}
func (r *reader) funcLit() ir.Node {
- r.sync(syncFuncLit)
+ r.Sync(pkgbits.SyncFuncLit)
pos := r.pos()
- typPos := r.pos()
xtype2 := r.signature(types.LocalPkg, nil)
opos := pos
- if quirksMode() {
- opos = r.origPos(pos)
- }
fn := ir.NewClosureFunc(opos, r.curfn != nil)
clo := fn.OClosure
ir.NameClosure(clo, r.curfn)
setType(fn.Nname, xtype2)
- if quirksMode() {
- fn.Nname.Ntype = ir.TypeNodeAt(typPos, xtype2)
- }
typecheck.Func(fn)
setType(clo, fn.Type())
- fn.ClosureVars = make([]*ir.Name, 0, r.len())
+ fn.ClosureVars = make([]*ir.Name, 0, r.Len())
for len(fn.ClosureVars) < cap(fn.ClosureVars) {
ir.NewClosureVar(r.pos(), fn, r.useLocal())
}
@@ -1743,13 +1723,13 @@ func (r *reader) funcLit() ir.Node {
}
func (r *reader) exprList() []ir.Node {
- r.sync(syncExprList)
+ r.Sync(pkgbits.SyncExprList)
return r.exprs()
}
func (r *reader) exprs() []ir.Node {
- r.sync(syncExprs)
- nodes := make([]ir.Node, r.len())
+ r.Sync(pkgbits.SyncExprs)
+ nodes := make([]ir.Node, r.Len())
if len(nodes) == 0 {
return nil // TODO(mdempsky): Unclear if this matters.
}
@@ -1760,45 +1740,28 @@ func (r *reader) exprs() []ir.Node {
}
func (r *reader) op() ir.Op {
- r.sync(syncOp)
- return ir.Op(r.len())
+ r.Sync(pkgbits.SyncOp)
+ return ir.Op(r.Len())
}
// @@@ Package initialization
func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) {
- if quirksMode() {
- for i, n := 0, r.len(); i < n; i++ {
- // Eagerly register position bases, so their filenames are
- // assigned stable indices.
- posBase := r.posBase()
- _ = base.Ctxt.PosTable.XPos(src.MakePos(posBase, 0, 0))
- }
-
- for i, n := 0, r.len(); i < n; i++ {
- // Eagerly resolve imported objects, so any filenames registered
- // in the process are assigned stable indices too.
- _, sym := r.qualifiedIdent()
- typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
- assert(sym.Def != nil)
- }
- }
-
- cgoPragmas := make([][]string, r.len())
+ cgoPragmas := make([][]string, r.Len())
for i := range cgoPragmas {
- cgoPragmas[i] = r.strings()
+ cgoPragmas[i] = r.Strings()
}
target.CgoPragmas = cgoPragmas
r.pkgDecls(target)
- r.sync(syncEOF)
+ r.Sync(pkgbits.SyncEOF)
}
func (r *reader) pkgDecls(target *ir.Package) {
- r.sync(syncDecls)
+ r.Sync(pkgbits.SyncDecls)
for {
- switch code := codeDecl(r.code(syncDecl)); code {
+ switch code := codeDecl(r.Code(pkgbits.SyncDecl)); code {
default:
panic(fmt.Sprintf("unhandled decl: %v", code))
@@ -1840,11 +1803,11 @@ func (r *reader) pkgDecls(target *ir.Package) {
}
}
- if n := r.len(); n > 0 {
+ if n := r.Len(); n > 0 {
assert(len(names) == 1)
embeds := make([]ir.Embed, n)
for i := range embeds {
- embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.strings()}
+ embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.Strings()}
}
names[0].Embed = &embeds
target.Embeds = append(target.Embeds, names[0])
@@ -1857,10 +1820,10 @@ func (r *reader) pkgDecls(target *ir.Package) {
}
func (r *reader) pkgObjs(target *ir.Package) []*ir.Name {
- r.sync(syncDeclNames)
- nodes := make([]*ir.Name, r.len())
+ r.Sync(pkgbits.SyncDeclNames)
+ nodes := make([]*ir.Name, r.Len())
for i := range nodes {
- r.sync(syncDeclName)
+ r.Sync(pkgbits.SyncDeclName)
name := r.obj().(*ir.Name)
nodes[i] = name
@@ -1925,7 +1888,7 @@ func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExp
expandInline(fn, pri)
}
- r := pri.asReader(relocBody, syncFuncBody)
+ r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
// TODO(mdempsky): This still feels clumsy. Can we do better?
tmpfn := ir.NewFunc(fn.Pos())
@@ -1949,7 +1912,7 @@ func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExp
r.funcargs(fn)
- assert(r.bool()) // have body
+ assert(r.Bool()) // have body
r.delayResults = fn.Inl.CanDelayResults
r.retlabel = typecheck.AutoLabel(".i")
@@ -2027,17 +1990,6 @@ func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExp
body := ir.Nodes(r.curfn.Body)
- // Quirk: If deadcode elimination turned a non-empty function into
- // an empty one, we need to set the position for the empty block
- // left behind to the inlined position for src.NoXPos, so that
- // an empty string gets added into the DWARF file name listing at
- // the appropriate index.
- if quirksMode() && len(body) == 1 {
- if block, ok := body[0].(*ir.BlockStmt); ok && len(block.List) == 0 {
- block.SetPos(r.updatePos(src.NoXPos))
- }
- }
-
// Quirkish: We need to eagerly prune variables added during
// inlining, but removed by deadcode.FuncBody above. Unused
// variables will get removed during stack frame layout anyway, but
@@ -2120,7 +2072,7 @@ func expandInline(fn *ir.Func, pri pkgReaderIndex) {
tmpfn.ClosureVars = fn.ClosureVars
{
- r := pri.asReader(relocBody, syncFuncBody)
+ r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
setType(tmpfn.Nname, fn.Type())
// Don't change parameter's Sym/Nname fields.
@@ -2218,8 +2170,8 @@ func (r *reader) importedDef() bool {
}
func MakeWrappers(target *ir.Package) {
- // Only unified IR in non-quirks mode emits its own wrappers.
- if base.Debug.Unified == 0 || quirksMode() {
+ // Only unified IR emits its own wrappers.
+ if base.Debug.Unified == 0 {
return
}
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index 50b6c0efcd..9d17d5ffd1 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -410,7 +410,8 @@ func (g *genInst) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
fn, formalParams, formalResults := startClosure(pos, outer, typ)
// This is the dictionary we want to use.
- // It may be a constant, or it may be a dictionary acquired from the outer function's dictionary.
+ // It may be a constant, it may be the outer functions's dictionary, or it may be
+ // a subdictionary acquired from the outer function's dictionary.
// For the latter, dictVar is a variable in the outer function's scope, set to the subdictionary
// read from the outer function's dictionary.
var dictVar *ir.Name
@@ -640,6 +641,11 @@ func (g *genInst) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMe
// over any pointer)
recvType := nameNode.Type().Recv().Type
recvType = deref(recvType)
+ if recvType.IsFullyInstantiated() {
+ // Get the type of the base generic type, so we get
+ // its original typeparams.
+ recvType = recvType.OrigSym().Def.(*ir.Name).Type()
+ }
tparams = recvType.RParams()
} else {
fields := nameNode.Type().TParams().Fields().Slice()
@@ -656,11 +662,9 @@ func (g *genInst) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMe
s1 := make([]*types.Type, len(shapes))
for i, t := range shapes {
var tparam *types.Type
- if tparams[i].Kind() == types.TTYPEPARAM {
- // Shapes are grouped differently for structural types, so we
- // pass the type param to Shapify(), so we can distinguish.
- tparam = tparams[i]
- }
+ // Shapes are grouped differently for structural types, so we
+ // pass the type param to Shapify(), so we can distinguish.
+ tparam = tparams[i]
if !t.IsShape() {
s1[i] = typecheck.Shapify(t, i, tparam)
} else {
@@ -1055,8 +1059,6 @@ func (subst *subster) node(n ir.Node) ir.Node {
// Transform the conversion, now that we know the
// type argument.
m = transformConvCall(call)
- // CONVIFACE transformation was already done in noder2
- assert(m.Op() != ir.OCONVIFACE)
case ir.OMETHVALUE, ir.OMETHEXPR:
// Redo the transformation of OXDOT, now that we
@@ -1076,14 +1078,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
case ir.ONAME:
name := call.X.Name()
if name.BuiltinOp != ir.OXXX {
- switch name.BuiltinOp {
- case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.ODELETE, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- // Transform these builtins now that we
- // know the type of the args.
- m = transformBuiltin(call)
- default:
- base.FatalfAt(call.Pos(), "Unexpected builtin op")
- }
+ m = transformBuiltin(call)
} else {
// This is the case of a function value that was a
// type parameter (implied to be a function via a
@@ -1154,6 +1149,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
newfn.Dcl = append(newfn.Dcl, ldict)
as := ir.NewAssignStmt(x.Pos(), ldict, cdict)
as.SetTypecheck(1)
+ ldict.Defn = as
newfn.Body.Append(as)
// Create inst info for the instantiated closure. The dict
diff --git a/src/cmd/compile/internal/noder/sync.go b/src/cmd/compile/internal/noder/sync.go
deleted file mode 100644
index 7af558f8b2..0000000000
--- a/src/cmd/compile/internal/noder/sync.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// UNREVIEWED
-
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
- "fmt"
- "strings"
-)
-
-// enableSync controls whether sync markers are written into unified
-// IR's export data format and also whether they're expected when
-// reading them back in. They're inessential to the correct
-// functioning of unified IR, but are helpful during development to
-// detect mistakes.
-//
-// When sync is enabled, writer stack frames will also be included in
-// the export data. Currently, a fixed number of frames are included,
-// controlled by -d=syncframes (default 0).
-const enableSync = true
-
-// fmtFrames formats a backtrace for reporting reader/writer desyncs.
-func fmtFrames(pcs ...uintptr) []string {
- res := make([]string, 0, len(pcs))
- walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
- // Trim package from function name. It's just redundant noise.
- name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
-
- res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
- })
- return res
-}
-
-type frameVisitor func(file string, line int, name string, offset uintptr)
-
-// syncMarker is an enum type that represents markers that may be
-// written to export data to ensure the reader and writer stay
-// synchronized.
-type syncMarker int
-
-//go:generate stringer -type=syncMarker -trimprefix=sync
-
-// TODO(mdempsky): Cleanup unneeded sync markers.
-
-// TODO(mdempsky): Split these markers into public/stable markers, and
-// private ones. Also, trim unused ones.
-const (
- _ syncMarker = iota
- syncNode
- syncBool
- syncInt64
- syncUint64
- syncString
- syncPos
- syncPkg
- syncSym
- syncSelector
- syncKind
- syncType
- syncTypePkg
- syncSignature
- syncParam
- syncOp
- syncObject
- syncExpr
- syncStmt
- syncDecl
- syncConstDecl
- syncFuncDecl
- syncTypeDecl
- syncVarDecl
- syncPragma
- syncValue
- syncEOF
- syncMethod
- syncFuncBody
- syncUse
- syncUseObj
- syncObjectIdx
- syncTypeIdx
- syncBOF
- syncEntry
- syncOpenScope
- syncCloseScope
- syncGlobal
- syncLocal
- syncDefine
- syncDefLocal
- syncUseLocal
- syncDefGlobal
- syncUseGlobal
- syncTypeParams
- syncUseLabel
- syncDefLabel
- syncFuncLit
- syncCommonFunc
- syncBodyRef
- syncLinksymExt
- syncHack
- syncSetlineno
- syncName
- syncImportDecl
- syncDeclNames
- syncDeclName
- syncExprList
- syncExprs
- syncWrapname
- syncTypeExpr
- syncTypeExprOrNil
- syncChanDir
- syncParams
- syncCloseAnotherScope
- syncSum
- syncUnOp
- syncBinOp
- syncStructType
- syncInterfaceType
- syncPackname
- syncEmbedded
- syncStmts
- syncStmtsFall
- syncStmtFall
- syncBlockStmt
- syncIfStmt
- syncForStmt
- syncSwitchStmt
- syncRangeStmt
- syncCaseClause
- syncCommClause
- syncSelectStmt
- syncDecls
- syncLabeledStmt
- syncCompLit
-
- sync1
- sync2
- sync3
- sync4
-
- syncN
- syncDefImplicit
- syncUseName
- syncUseObjLocal
- syncAddLocal
- syncBothSignature
- syncSetUnderlying
- syncLinkname
- syncStmt1
- syncStmtsEnd
- syncDeclare
- syncTopDecls
- syncTopConstDecl
- syncTopFuncDecl
- syncTopTypeDecl
- syncTopVarDecl
- syncObject1
- syncAddBody
- syncLabel
- syncFuncExt
- syncMethExt
- syncOptLabel
- syncScalar
- syncStmtDecls
- syncDeclLocal
- syncObjLocal
- syncObjLocal1
- syncDeclareLocal
- syncPublic
- syncPrivate
- syncRelocs
- syncReloc
- syncUseReloc
- syncVarExt
- syncPkgDef
- syncTypeExt
- syncVal
- syncCodeObj
- syncPosBase
- syncLocalIdent
- syncTypeParamNames
- syncTypeParamBounds
- syncImplicitTypes
- syncObjectName
-)
diff --git a/src/cmd/compile/internal/noder/syncmarker_string.go b/src/cmd/compile/internal/noder/syncmarker_string.go
deleted file mode 100644
index 655cafc950..0000000000
--- a/src/cmd/compile/internal/noder/syncmarker_string.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Code generated by "stringer -type=syncMarker -trimprefix=sync"; DO NOT EDIT.
-
-package noder
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[syncNode-1]
- _ = x[syncBool-2]
- _ = x[syncInt64-3]
- _ = x[syncUint64-4]
- _ = x[syncString-5]
- _ = x[syncPos-6]
- _ = x[syncPkg-7]
- _ = x[syncSym-8]
- _ = x[syncSelector-9]
- _ = x[syncKind-10]
- _ = x[syncType-11]
- _ = x[syncTypePkg-12]
- _ = x[syncSignature-13]
- _ = x[syncParam-14]
- _ = x[syncOp-15]
- _ = x[syncObject-16]
- _ = x[syncExpr-17]
- _ = x[syncStmt-18]
- _ = x[syncDecl-19]
- _ = x[syncConstDecl-20]
- _ = x[syncFuncDecl-21]
- _ = x[syncTypeDecl-22]
- _ = x[syncVarDecl-23]
- _ = x[syncPragma-24]
- _ = x[syncValue-25]
- _ = x[syncEOF-26]
- _ = x[syncMethod-27]
- _ = x[syncFuncBody-28]
- _ = x[syncUse-29]
- _ = x[syncUseObj-30]
- _ = x[syncObjectIdx-31]
- _ = x[syncTypeIdx-32]
- _ = x[syncBOF-33]
- _ = x[syncEntry-34]
- _ = x[syncOpenScope-35]
- _ = x[syncCloseScope-36]
- _ = x[syncGlobal-37]
- _ = x[syncLocal-38]
- _ = x[syncDefine-39]
- _ = x[syncDefLocal-40]
- _ = x[syncUseLocal-41]
- _ = x[syncDefGlobal-42]
- _ = x[syncUseGlobal-43]
- _ = x[syncTypeParams-44]
- _ = x[syncUseLabel-45]
- _ = x[syncDefLabel-46]
- _ = x[syncFuncLit-47]
- _ = x[syncCommonFunc-48]
- _ = x[syncBodyRef-49]
- _ = x[syncLinksymExt-50]
- _ = x[syncHack-51]
- _ = x[syncSetlineno-52]
- _ = x[syncName-53]
- _ = x[syncImportDecl-54]
- _ = x[syncDeclNames-55]
- _ = x[syncDeclName-56]
- _ = x[syncExprList-57]
- _ = x[syncExprs-58]
- _ = x[syncWrapname-59]
- _ = x[syncTypeExpr-60]
- _ = x[syncTypeExprOrNil-61]
- _ = x[syncChanDir-62]
- _ = x[syncParams-63]
- _ = x[syncCloseAnotherScope-64]
- _ = x[syncSum-65]
- _ = x[syncUnOp-66]
- _ = x[syncBinOp-67]
- _ = x[syncStructType-68]
- _ = x[syncInterfaceType-69]
- _ = x[syncPackname-70]
- _ = x[syncEmbedded-71]
- _ = x[syncStmts-72]
- _ = x[syncStmtsFall-73]
- _ = x[syncStmtFall-74]
- _ = x[syncBlockStmt-75]
- _ = x[syncIfStmt-76]
- _ = x[syncForStmt-77]
- _ = x[syncSwitchStmt-78]
- _ = x[syncRangeStmt-79]
- _ = x[syncCaseClause-80]
- _ = x[syncCommClause-81]
- _ = x[syncSelectStmt-82]
- _ = x[syncDecls-83]
- _ = x[syncLabeledStmt-84]
- _ = x[syncCompLit-85]
- _ = x[sync1-86]
- _ = x[sync2-87]
- _ = x[sync3-88]
- _ = x[sync4-89]
- _ = x[syncN-90]
- _ = x[syncDefImplicit-91]
- _ = x[syncUseName-92]
- _ = x[syncUseObjLocal-93]
- _ = x[syncAddLocal-94]
- _ = x[syncBothSignature-95]
- _ = x[syncSetUnderlying-96]
- _ = x[syncLinkname-97]
- _ = x[syncStmt1-98]
- _ = x[syncStmtsEnd-99]
- _ = x[syncDeclare-100]
- _ = x[syncTopDecls-101]
- _ = x[syncTopConstDecl-102]
- _ = x[syncTopFuncDecl-103]
- _ = x[syncTopTypeDecl-104]
- _ = x[syncTopVarDecl-105]
- _ = x[syncObject1-106]
- _ = x[syncAddBody-107]
- _ = x[syncLabel-108]
- _ = x[syncFuncExt-109]
- _ = x[syncMethExt-110]
- _ = x[syncOptLabel-111]
- _ = x[syncScalar-112]
- _ = x[syncStmtDecls-113]
- _ = x[syncDeclLocal-114]
- _ = x[syncObjLocal-115]
- _ = x[syncObjLocal1-116]
- _ = x[syncDeclareLocal-117]
- _ = x[syncPublic-118]
- _ = x[syncPrivate-119]
- _ = x[syncRelocs-120]
- _ = x[syncReloc-121]
- _ = x[syncUseReloc-122]
- _ = x[syncVarExt-123]
- _ = x[syncPkgDef-124]
- _ = x[syncTypeExt-125]
- _ = x[syncVal-126]
- _ = x[syncCodeObj-127]
- _ = x[syncPosBase-128]
- _ = x[syncLocalIdent-129]
- _ = x[syncTypeParamNames-130]
- _ = x[syncTypeParamBounds-131]
- _ = x[syncImplicitTypes-132]
- _ = x[syncObjectName-133]
-}
-
-const _syncMarker_name = "NodeBoolInt64Uint64StringPosPkgSymSelectorKindTypeTypePkgSignatureParamOpObjectExprStmtDeclConstDeclFuncDeclTypeDeclVarDeclPragmaValueEOFMethodFuncBodyUseUseObjObjectIdxTypeIdxBOFEntryOpenScopeCloseScopeGlobalLocalDefineDefLocalUseLocalDefGlobalUseGlobalTypeParamsUseLabelDefLabelFuncLitCommonFuncBodyRefLinksymExtHackSetlinenoNameImportDeclDeclNamesDeclNameExprListExprsWrapnameTypeExprTypeExprOrNilChanDirParamsCloseAnotherScopeSumUnOpBinOpStructTypeInterfaceTypePacknameEmbeddedStmtsStmtsFallStmtFallBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtCompLit1234NDefImplicitUseNameUseObjLocalAddLocalBothSignatureSetUnderlyingLinknameStmt1StmtsEndDeclareTopDeclsTopConstDeclTopFuncDeclTopTypeDeclTopVarDeclObject1AddBodyLabelFuncExtMethExtOptLabelScalarStmtDeclsDeclLocalObjLocalObjLocal1DeclareLocalPublicPrivateRelocsRelocUseRelocVarExtPkgDefTypeExtValCodeObjPosBaseLocalIdentTypeParamNamesTypeParamBoundsImplicitTypesObjectName"
-
-var _syncMarker_index = [...]uint16{0, 4, 8, 13, 19, 25, 28, 31, 34, 42, 46, 50, 57, 66, 71, 73, 79, 83, 87, 91, 100, 108, 116, 123, 129, 134, 137, 143, 151, 154, 160, 169, 176, 179, 184, 193, 203, 209, 214, 220, 228, 236, 245, 254, 264, 272, 280, 287, 297, 304, 314, 318, 327, 331, 341, 350, 358, 366, 371, 379, 387, 400, 407, 413, 430, 433, 437, 442, 452, 465, 473, 481, 486, 495, 503, 512, 518, 525, 535, 544, 554, 564, 574, 579, 590, 597, 598, 599, 600, 601, 602, 613, 620, 631, 639, 652, 665, 673, 678, 686, 693, 701, 713, 724, 735, 745, 752, 759, 764, 771, 778, 786, 792, 801, 810, 818, 827, 839, 845, 852, 858, 863, 871, 877, 883, 890, 893, 900, 907, 917, 931, 946, 959, 969}
-
-func (i syncMarker) String() string {
- i -= 1
- if i < 0 || i >= syncMarker(len(_syncMarker_index)-1) {
- return "syncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
- }
- return _syncMarker_name[_syncMarker_index[i]:_syncMarker_index[i+1]]
-}
diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go
index 5f1f41163b..208630271d 100644
--- a/src/cmd/compile/internal/noder/transform.go
+++ b/src/cmd/compile/internal/noder/transform.go
@@ -1046,13 +1046,7 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
kv := l.(*ir.KeyExpr)
key := kv.Key
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
- s = typecheck.Lookup(s.Name)
- }
if types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
// Exported field names should always have
// local pkg. We only need to do this
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index ec0012db4c..ac82f2df03 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -10,11 +10,13 @@ import (
"bytes"
"fmt"
"internal/goversion"
+ "internal/pkgbits"
"io"
"runtime"
"sort"
"cmd/compile/internal/base"
+ "cmd/compile/internal/importer"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
@@ -72,18 +74,14 @@ var localPkgReader *pkgReader
func unified(noders []*noder) {
inline.NewInline = InlineCall
- if !quirksMode() {
- writeNewExportFunc = writeNewExport
- } else if base.Flag.G != 0 {
- base.Errorf("cannot use -G and -d=quirksmode together")
- }
+ writeNewExportFunc = writeNewExport
newReadImportFunc = func(data string, pkg1 *types.Pkg, ctxt *types2.Context, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
- pr := newPkgDecoder(pkg1.Path, data)
+ pr := pkgbits.NewPkgDecoder(pkg1.Path, data)
// Read package descriptors for both types2 and compiler backend.
readPackage(newPkgReader(pr), pkg1)
- pkg2 = readPackage2(ctxt, packages, pr)
+ pkg2 = importer.ReadPackage(ctxt, packages, pr)
return
}
@@ -102,10 +100,10 @@ func unified(noders []*noder) {
typecheck.TypecheckAllowed = true
- localPkgReader = newPkgReader(newPkgDecoder(types.LocalPkg.Path, data))
+ localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data))
readPackage(localPkgReader, types.LocalPkg)
- r := localPkgReader.newReader(relocMeta, privateRootIdx, syncPrivate)
+ r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
r.pkgInit(types.LocalPkg, target)
// Type-check any top-level assignments. We ignore non-assignments
@@ -166,36 +164,36 @@ func writePkgStub(noders []*noder) string {
pw.collectDecls(noders)
- publicRootWriter := pw.newWriter(relocMeta, syncPublic)
- privateRootWriter := pw.newWriter(relocMeta, syncPrivate)
+ publicRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPublic)
+ privateRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPrivate)
- assert(publicRootWriter.idx == publicRootIdx)
- assert(privateRootWriter.idx == privateRootIdx)
+ assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
+ assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx)
{
w := publicRootWriter
w.pkg(pkg)
- w.bool(false) // has init; XXX
+ w.Bool(false) // has init; XXX
scope := pkg.Scope()
names := scope.Names()
- w.len(len(names))
+ w.Len(len(names))
for _, name := range scope.Names() {
w.obj(scope.Lookup(name), nil)
}
- w.sync(syncEOF)
- w.flush()
+ w.Sync(pkgbits.SyncEOF)
+ w.Flush()
}
{
w := privateRootWriter
w.pkgInit(noders)
- w.flush()
+ w.Flush()
}
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
- pw.dump(&sb)
+ pw.DumpTo(&sb)
// At this point, we're done with types2. Make sure the package is
// garbage collected.
@@ -239,26 +237,26 @@ func freePackage(pkg *types2.Package) {
}
func readPackage(pr *pkgReader, importpkg *types.Pkg) {
- r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
assert(pkg == importpkg)
- if r.bool() {
+ if r.Bool() {
sym := pkg.Lookup(".inittask")
task := ir.NewNameAt(src.NoXPos, sym)
task.Class = ir.PEXTERN
sym.Def = task
}
- for i, n := 0, r.len(); i < n; i++ {
- r.sync(syncObject)
- assert(!r.bool())
- idx := r.reloc(relocObj)
- assert(r.len() == 0)
+ for i, n := 0, r.Len(); i < n; i++ {
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ idx := r.Reloc(pkgbits.RelocObj)
+ assert(r.Len() == 0)
- path, name, code := r.p.peekObj(idx)
- if code != objStub {
+ path, name, code := r.p.PeekObj(idx)
+ if code != pkgbits.ObjStub {
objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil}
}
}
@@ -266,42 +264,42 @@ func readPackage(pr *pkgReader, importpkg *types.Pkg) {
func writeNewExport(out io.Writer) {
l := linker{
- pw: newPkgEncoder(),
+ pw: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
pkgs: make(map[string]int),
decls: make(map[*types.Sym]int),
}
- publicRootWriter := l.pw.newEncoder(relocMeta, syncPublic)
- assert(publicRootWriter.idx == publicRootIdx)
+ publicRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic)
+ assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
var selfPkgIdx int
{
pr := localPkgReader
- r := pr.newDecoder(relocMeta, publicRootIdx, syncPublic)
+ r := pr.NewDecoder(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
- r.sync(syncPkg)
- selfPkgIdx = l.relocIdx(pr, relocPkg, r.reloc(relocPkg))
+ r.Sync(pkgbits.SyncPkg)
+ selfPkgIdx = l.relocIdx(pr, pkgbits.RelocPkg, r.Reloc(pkgbits.RelocPkg))
- r.bool() // has init
+ r.Bool() // has init
- for i, n := 0, r.len(); i < n; i++ {
- r.sync(syncObject)
- assert(!r.bool())
- idx := r.reloc(relocObj)
- assert(r.len() == 0)
+ for i, n := 0, r.Len(); i < n; i++ {
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ idx := r.Reloc(pkgbits.RelocObj)
+ assert(r.Len() == 0)
- xpath, xname, xtag := pr.peekObj(idx)
- assert(xpath == pr.pkgPath)
- assert(xtag != objStub)
+ xpath, xname, xtag := pr.PeekObj(idx)
+ assert(xpath == pr.PkgPath())
+ assert(xtag != pkgbits.ObjStub)
if types.IsExported(xname) {
- l.relocIdx(pr, relocObj, idx)
+ l.relocIdx(pr, pkgbits.RelocObj, idx)
}
}
- r.sync(syncEOF)
+ r.Sync(pkgbits.SyncEOF)
}
{
@@ -313,22 +311,22 @@ func writeNewExport(out io.Writer) {
w := publicRootWriter
- w.sync(syncPkg)
- w.reloc(relocPkg, selfPkgIdx)
+ w.Sync(pkgbits.SyncPkg)
+ w.Reloc(pkgbits.RelocPkg, selfPkgIdx)
- w.bool(typecheck.Lookup(".inittask").Def != nil)
+ w.Bool(typecheck.Lookup(".inittask").Def != nil)
- w.len(len(idxs))
+ w.Len(len(idxs))
for _, idx := range idxs {
- w.sync(syncObject)
- w.bool(false)
- w.reloc(relocObj, idx)
- w.len(0)
+ w.Sync(pkgbits.SyncObject)
+ w.Bool(false)
+ w.Reloc(pkgbits.RelocObj, idx)
+ w.Len(0)
}
- w.sync(syncEOF)
- w.flush()
+ w.Sync(pkgbits.SyncEOF)
+ w.Flush()
}
- l.pw.dump(out)
+ l.pw.DumpTo(out)
}
diff --git a/src/cmd/compile/internal/noder/unified_test.go b/src/cmd/compile/internal/noder/unified_test.go
deleted file mode 100644
index d7334df282..0000000000
--- a/src/cmd/compile/internal/noder/unified_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder_test
-
-import (
- "encoding/json"
- "flag"
- exec "internal/execabs"
- "os"
- "reflect"
- "runtime"
- "strings"
- "testing"
-)
-
-var (
- flagCmp = flag.Bool("cmp", false, "enable TestUnifiedCompare")
- flagPkgs = flag.String("pkgs", "std", "list of packages to compare (ignored in -short mode)")
- flagAll = flag.Bool("all", false, "enable testing of all GOOS/GOARCH targets")
- flagParallel = flag.Bool("parallel", false, "test GOOS/GOARCH targets in parallel")
-)
-
-// TestUnifiedCompare implements a test similar to running:
-//
-// $ go build -toolexec="toolstash -cmp" std
-//
-// The -pkgs flag controls the list of packages tested.
-//
-// By default, only the native GOOS/GOARCH target is enabled. The -all
-// flag enables testing of non-native targets. The -parallel flag
-// additionally enables testing of targets in parallel.
-//
-// Caution: Testing all targets is very resource intensive! On an IBM
-// P920 (dual Intel Xeon Gold 6154 CPUs; 36 cores, 192GB RAM), testing
-// all targets in parallel takes about 5 minutes. Using the 'go test'
-// command's -run flag for subtest matching is recommended for less
-// powerful machines.
-func TestUnifiedCompare(t *testing.T) {
- // TODO(mdempsky): Either re-enable or delete. Disabled for now to
- // avoid impeding others' forward progress.
- if !*flagCmp {
- t.Skip("skipping TestUnifiedCompare (use -cmp to enable)")
- }
-
- targets, err := exec.Command("go", "tool", "dist", "list").Output()
- if err != nil {
- t.Fatal(err)
- }
-
- for _, target := range strings.Fields(string(targets)) {
- t.Run(target, func(t *testing.T) {
- parts := strings.Split(target, "/")
- goos, goarch := parts[0], parts[1]
-
- if !(*flagAll || goos == runtime.GOOS && goarch == runtime.GOARCH) {
- t.Skip("skipping non-native target (use -all to enable)")
- }
- if *flagParallel {
- t.Parallel()
- }
-
- pkgs1 := loadPackages(t, goos, goarch, "-d=unified=0 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
- pkgs2 := loadPackages(t, goos, goarch, "-d=unified=1 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
-
- if len(pkgs1) != len(pkgs2) {
- t.Fatalf("length mismatch: %v != %v", len(pkgs1), len(pkgs2))
- }
-
- for i := range pkgs1 {
- pkg1 := pkgs1[i]
- pkg2 := pkgs2[i]
-
- path := pkg1.ImportPath
- if path != pkg2.ImportPath {
- t.Fatalf("mismatched paths: %q != %q", path, pkg2.ImportPath)
- }
-
- // Packages that don't have any source files (e.g., packages
- // unsafe, embed/internal/embedtest, and cmd/internal/moddeps).
- if pkg1.Export == "" && pkg2.Export == "" {
- continue
- }
-
- if pkg1.BuildID == pkg2.BuildID {
- t.Errorf("package %q: build IDs unexpectedly matched", path)
- }
-
- // Unlike toolstash -cmp, we're comparing the same compiler
- // binary against itself, just with different flags. So we
- // don't need to worry about skipping over mismatched version
- // strings, but we do need to account for differing build IDs.
- //
- // Fortunately, build IDs are cryptographic 256-bit hashes,
- // and cmd/go provides us with them up front. So we can just
- // use them as delimeters to split the files, and then check
- // that the substrings are all equal.
- file1 := strings.Split(readFile(t, pkg1.Export), pkg1.BuildID)
- file2 := strings.Split(readFile(t, pkg2.Export), pkg2.BuildID)
- if !reflect.DeepEqual(file1, file2) {
- t.Errorf("package %q: compile output differs", path)
- }
- }
- })
- }
-}
-
-type pkg struct {
- ImportPath string
- Export string
- BuildID string
- Incomplete bool
-}
-
-func loadPackages(t *testing.T, goos, goarch, gcflags string) []pkg {
- args := []string{"list", "-e", "-export", "-json", "-gcflags=all=" + gcflags, "--"}
- if testing.Short() {
- t.Log("short testing mode; only testing package runtime")
- args = append(args, "runtime")
- } else {
- args = append(args, strings.Fields(*flagPkgs)...)
- }
-
- cmd := exec.Command("go", args...)
- cmd.Env = append(os.Environ(), "GOOS="+goos, "GOARCH="+goarch)
- cmd.Stderr = os.Stderr
- t.Logf("running %v", cmd)
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- t.Fatal(err)
- }
- if err := cmd.Start(); err != nil {
- t.Fatal(err)
- }
-
- var res []pkg
- for dec := json.NewDecoder(stdout); dec.More(); {
- var pkg pkg
- if err := dec.Decode(&pkg); err != nil {
- t.Fatal(err)
- }
- if pkg.Incomplete {
- t.Fatalf("incomplete package: %q", pkg.ImportPath)
- }
- res = append(res, pkg)
- }
- if err := cmd.Wait(); err != nil {
- t.Fatal(err)
- }
- return res
-}
-
-func readFile(t *testing.T, name string) string {
- buf, err := os.ReadFile(name)
- if err != nil {
- t.Fatal(err)
- }
- return string(buf)
-}
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index 933f577825..59e9409b97 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -8,7 +8,7 @@ package noder
import (
"fmt"
- "go/constant"
+ "internal/pkgbits"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -17,7 +17,7 @@ import (
)
type pkgWriter struct {
- pkgEncoder
+ pkgbits.PkgEncoder
m posMap
curpkg *types2.Package
@@ -33,13 +33,11 @@ type pkgWriter struct {
linknames map[types2.Object]string
cgoPragmas [][]string
-
- dups dupTypes
}
func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter {
return &pkgWriter{
- pkgEncoder: newPkgEncoder(),
+ PkgEncoder: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
m: m,
curpkg: pkg,
@@ -73,7 +71,7 @@ func (pw *pkgWriter) unexpected(what string, p poser) {
type writer struct {
p *pkgWriter
- encoder
+ pkgbits.Encoder
// TODO(mdempsky): We should be able to prune localsIdx whenever a
// scope closes, and then maybe we can just use the same map for
@@ -144,9 +142,9 @@ func (info objInfo) equals(other objInfo) bool {
return true
}
-func (pw *pkgWriter) newWriter(k reloc, marker syncMarker) *writer {
+func (pw *pkgWriter) newWriter(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *writer {
return &writer{
- encoder: pw.newEncoder(k, marker),
+ Encoder: pw.NewEncoder(k, marker),
p: pw,
}
}
@@ -154,23 +152,23 @@ func (pw *pkgWriter) newWriter(k reloc, marker syncMarker) *writer {
// @@@ Positions
func (w *writer) pos(p poser) {
- w.sync(syncPos)
+ w.Sync(pkgbits.SyncPos)
pos := p.Pos()
// TODO(mdempsky): Track down the remaining cases here and fix them.
- if !w.bool(pos.IsKnown()) {
+ if !w.Bool(pos.IsKnown()) {
return
}
// TODO(mdempsky): Delta encoding. Also, if there's a b-side, update
// its position base too (but not vice versa!).
w.posBase(pos.Base())
- w.uint(pos.Line())
- w.uint(pos.Col())
+ w.Uint(pos.Line())
+ w.Uint(pos.Col())
}
func (w *writer) posBase(b *syntax.PosBase) {
- w.reloc(relocPosBase, w.p.posBaseIdx(b))
+ w.Reloc(pkgbits.RelocPosBase, w.p.posBaseIdx(b))
}
func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) int {
@@ -178,25 +176,25 @@ func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) int {
return idx
}
- w := pw.newWriter(relocPosBase, syncPosBase)
- w.p.posBasesIdx[b] = w.idx
+ w := pw.newWriter(pkgbits.RelocPosBase, pkgbits.SyncPosBase)
+ w.p.posBasesIdx[b] = w.Idx
- w.string(trimFilename(b))
+ w.String(trimFilename(b))
- if !w.bool(b.IsFileBase()) {
+ if !w.Bool(b.IsFileBase()) {
w.pos(b)
- w.uint(b.Line())
- w.uint(b.Col())
+ w.Uint(b.Line())
+ w.Uint(b.Col())
}
- return w.flush()
+ return w.Flush()
}
// @@@ Packages
func (w *writer) pkg(pkg *types2.Package) {
- w.sync(syncPkg)
- w.reloc(relocPkg, w.p.pkgIdx(pkg))
+ w.Sync(pkgbits.SyncPkg)
+ w.Reloc(pkgbits.RelocPkg, w.p.pkgIdx(pkg))
}
func (pw *pkgWriter) pkgIdx(pkg *types2.Package) int {
@@ -204,27 +202,27 @@ func (pw *pkgWriter) pkgIdx(pkg *types2.Package) int {
return idx
}
- w := pw.newWriter(relocPkg, syncPkgDef)
- pw.pkgsIdx[pkg] = w.idx
+ w := pw.newWriter(pkgbits.RelocPkg, pkgbits.SyncPkgDef)
+ pw.pkgsIdx[pkg] = w.Idx
if pkg == nil {
- w.string("builtin")
+ w.String("builtin")
} else {
var path string
if pkg != w.p.curpkg {
path = pkg.Path()
}
- w.string(path)
- w.string(pkg.Name())
- w.len(pkg.Height())
+ w.String(path)
+ w.String(pkg.Name())
+ w.Len(pkg.Height())
- w.len(len(pkg.Imports()))
+ w.Len(len(pkg.Imports()))
for _, imp := range pkg.Imports() {
w.pkg(imp)
}
}
- return w.flush()
+ return w.Flush()
}
// @@@ Types
@@ -236,12 +234,12 @@ func (w *writer) typ(typ types2.Type) {
}
func (w *writer) typInfo(info typeInfo) {
- w.sync(syncType)
- if w.bool(info.derived) {
- w.len(info.idx)
+ w.Sync(pkgbits.SyncType)
+ if w.Bool(info.derived) {
+ w.Len(info.idx)
w.derived = true
} else {
- w.reloc(relocType, info.idx)
+ w.Reloc(pkgbits.RelocType, info.idx)
}
}
@@ -251,10 +249,6 @@ func (w *writer) typInfo(info typeInfo) {
// typIdx also reports whether typ is a derived type; that is, whether
// its identity depends on type parameters.
func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
- if quirksMode() {
- typ = pw.dups.orig(typ)
- }
-
if idx, ok := pw.typsIdx[typ]; ok {
return typeInfo{idx: idx, derived: false}
}
@@ -264,7 +258,7 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
}
}
- w := pw.newWriter(relocType, syncTypeIdx)
+ w := pw.newWriter(pkgbits.RelocType, pkgbits.SyncTypeIdx)
w.dict = dict
switch typ := typ.(type) {
@@ -277,15 +271,15 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
base.Fatalf("unexpected types2.Invalid")
case types2.Typ[kind] == typ:
- w.code(typeBasic)
- w.len(int(kind))
+ w.Code(pkgbits.TypeBasic)
+ w.Len(int(kind))
default:
// Handle "byte" and "rune" as references to their TypeName.
obj := types2.Universe.Lookup(typ.Name())
assert(obj.Type() == typ)
- w.code(typeNamed)
+ w.Code(pkgbits.TypeNamed)
w.obj(obj, nil)
}
@@ -301,7 +295,7 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
orig = orig.Origin()
}
- w.code(typeNamed)
+ w.Code(pkgbits.TypeNamed)
w.obj(orig.Obj(), typ.TypeArgs())
case *types2.TypeParam:
@@ -316,91 +310,100 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
}()
w.derived = true
- w.code(typeTypeParam)
- w.len(index)
+ w.Code(pkgbits.TypeTypeParam)
+ w.Len(index)
case *types2.Array:
- w.code(typeArray)
- w.uint64(uint64(typ.Len()))
+ w.Code(pkgbits.TypeArray)
+ w.Uint64(uint64(typ.Len()))
w.typ(typ.Elem())
case *types2.Chan:
- w.code(typeChan)
- w.len(int(typ.Dir()))
+ w.Code(pkgbits.TypeChan)
+ w.Len(int(typ.Dir()))
w.typ(typ.Elem())
case *types2.Map:
- w.code(typeMap)
+ w.Code(pkgbits.TypeMap)
w.typ(typ.Key())
w.typ(typ.Elem())
case *types2.Pointer:
- w.code(typePointer)
+ w.Code(pkgbits.TypePointer)
w.typ(typ.Elem())
case *types2.Signature:
base.Assertf(typ.TypeParams() == nil, "unexpected type params: %v", typ)
- w.code(typeSignature)
+ w.Code(pkgbits.TypeSignature)
w.signature(typ)
case *types2.Slice:
- w.code(typeSlice)
+ w.Code(pkgbits.TypeSlice)
w.typ(typ.Elem())
case *types2.Struct:
- w.code(typeStruct)
+ w.Code(pkgbits.TypeStruct)
w.structType(typ)
case *types2.Interface:
if typ == anyTypeName.Type() {
- w.code(typeNamed)
+ w.Code(pkgbits.TypeNamed)
w.obj(anyTypeName, nil)
break
}
- w.code(typeInterface)
+ w.Code(pkgbits.TypeInterface)
w.interfaceType(typ)
case *types2.Union:
- w.code(typeUnion)
+ w.Code(pkgbits.TypeUnion)
w.unionType(typ)
}
if w.derived {
idx := len(dict.derived)
- dict.derived = append(dict.derived, derivedInfo{idx: w.flush()})
+ dict.derived = append(dict.derived, derivedInfo{idx: w.Flush()})
dict.derivedIdx[typ] = idx
return typeInfo{idx: idx, derived: true}
}
- pw.typsIdx[typ] = w.idx
- return typeInfo{idx: w.flush(), derived: false}
+ pw.typsIdx[typ] = w.Idx
+ return typeInfo{idx: w.Flush(), derived: false}
}
func (w *writer) structType(typ *types2.Struct) {
- w.len(typ.NumFields())
+ w.Len(typ.NumFields())
for i := 0; i < typ.NumFields(); i++ {
f := typ.Field(i)
w.pos(f)
w.selector(f)
w.typ(f.Type())
- w.string(typ.Tag(i))
- w.bool(f.Embedded())
+ w.String(typ.Tag(i))
+ w.Bool(f.Embedded())
}
}
func (w *writer) unionType(typ *types2.Union) {
- w.len(typ.Len())
+ w.Len(typ.Len())
for i := 0; i < typ.Len(); i++ {
t := typ.Term(i)
- w.bool(t.Tilde())
+ w.Bool(t.Tilde())
w.typ(t.Type())
}
}
func (w *writer) interfaceType(typ *types2.Interface) {
- w.len(typ.NumExplicitMethods())
- w.len(typ.NumEmbeddeds())
+ w.Len(typ.NumExplicitMethods())
+ w.Len(typ.NumEmbeddeds())
+
+ if typ.NumExplicitMethods() == 0 && typ.NumEmbeddeds() == 1 {
+ w.Bool(typ.IsImplicit())
+ } else {
+ // Implicit interfaces always have 0 explicit methods and 1
+ // embedded type, so we skip writing out the implicit flag
+ // otherwise as a space optimization.
+ assert(!typ.IsImplicit())
+ }
for i := 0; i < typ.NumExplicitMethods(); i++ {
m := typ.ExplicitMethod(i)
@@ -418,22 +421,22 @@ func (w *writer) interfaceType(typ *types2.Interface) {
}
func (w *writer) signature(sig *types2.Signature) {
- w.sync(syncSignature)
+ w.Sync(pkgbits.SyncSignature)
w.params(sig.Params())
w.params(sig.Results())
- w.bool(sig.Variadic())
+ w.Bool(sig.Variadic())
}
func (w *writer) params(typ *types2.Tuple) {
- w.sync(syncParams)
- w.len(typ.Len())
+ w.Sync(pkgbits.SyncParams)
+ w.Len(typ.Len())
for i := 0; i < typ.Len(); i++ {
w.param(typ.At(i))
}
}
func (w *writer) param(param *types2.Var) {
- w.sync(syncParam)
+ w.Sync(pkgbits.SyncParam)
w.pos(param)
w.localIdent(param)
w.typ(param.Type())
@@ -462,9 +465,9 @@ func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) {
// TODO(mdempsky): Push up into expr; this shouldn't appear
// outside of expression context.
- w.sync(syncObject)
- w.bool(true)
- w.len(idx)
+ w.Sync(pkgbits.SyncObject)
+ w.Bool(true)
+ w.Len(idx)
return
}
@@ -478,11 +481,11 @@ func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) {
}
}
- w.sync(syncObject)
- w.bool(false)
- w.reloc(relocObj, info.idx)
+ w.Sync(pkgbits.SyncObject)
+ w.Bool(false)
+ w.Reloc(pkgbits.RelocObj, info.idx)
- w.len(len(info.explicits))
+ w.Len(len(info.explicits))
for _, info := range info.explicits {
w.typInfo(info)
}
@@ -503,36 +506,36 @@ func (pw *pkgWriter) objIdx(obj types2.Object) int {
dict.implicits = decl.implicits
}
- w := pw.newWriter(relocObj, syncObject1)
- wext := pw.newWriter(relocObjExt, syncObject1)
- wname := pw.newWriter(relocName, syncObject1)
- wdict := pw.newWriter(relocObjDict, syncObject1)
+ w := pw.newWriter(pkgbits.RelocObj, pkgbits.SyncObject1)
+ wext := pw.newWriter(pkgbits.RelocObjExt, pkgbits.SyncObject1)
+ wname := pw.newWriter(pkgbits.RelocName, pkgbits.SyncObject1)
+ wdict := pw.newWriter(pkgbits.RelocObjDict, pkgbits.SyncObject1)
- pw.globalsIdx[obj] = w.idx // break cycles
- assert(wext.idx == w.idx)
- assert(wname.idx == w.idx)
- assert(wdict.idx == w.idx)
+ pw.globalsIdx[obj] = w.Idx // break cycles
+ assert(wext.Idx == w.Idx)
+ assert(wname.Idx == w.Idx)
+ assert(wdict.Idx == w.Idx)
w.dict = dict
wext.dict = dict
code := w.doObj(wext, obj)
- w.flush()
- wext.flush()
+ w.Flush()
+ wext.Flush()
wname.qualifiedIdent(obj)
- wname.code(code)
- wname.flush()
+ wname.Code(code)
+ wname.Flush()
wdict.objDict(obj, w.dict)
- wdict.flush()
+ wdict.Flush()
- return w.idx
+ return w.Idx
}
-func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
+func (w *writer) doObj(wext *writer, obj types2.Object) pkgbits.CodeObj {
if obj.Pkg() != w.p.curpkg {
- return objStub
+ return pkgbits.ObjStub
}
switch obj := obj.(type) {
@@ -543,8 +546,8 @@ func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
case *types2.Const:
w.pos(obj)
w.typ(obj.Type())
- w.value(obj.Val())
- return objConst
+ w.Value(obj.Val())
+ return pkgbits.ObjConst
case *types2.Func:
decl, ok := w.p.funDecls[obj]
@@ -556,7 +559,7 @@ func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
w.signature(sig)
w.pos(decl)
wext.funcExt(obj)
- return objFunc
+ return pkgbits.ObjFunc
case *types2.TypeName:
decl, ok := w.p.typDecls[obj]
@@ -565,7 +568,7 @@ func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
if obj.IsAlias() {
w.pos(obj)
w.typ(obj.Type())
- return objAlias
+ return pkgbits.ObjAlias
}
named := obj.Type().(*types2.Named)
@@ -576,18 +579,18 @@ func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
wext.typeExt(obj)
w.typExpr(decl.Type)
- w.len(named.NumMethods())
+ w.Len(named.NumMethods())
for i := 0; i < named.NumMethods(); i++ {
w.method(wext, named.Method(i))
}
- return objType
+ return pkgbits.ObjType
case *types2.Var:
w.pos(obj)
w.typ(obj.Type())
wext.varExt(obj)
- return objVar
+ return pkgbits.ObjVar
}
}
@@ -607,27 +610,27 @@ func (w *writer) objDict(obj types2.Object, dict *writerDict) {
w.dict = dict // TODO(mdempsky): This is a bit sketchy.
- w.len(len(dict.implicits))
+ w.Len(len(dict.implicits))
tparams := objTypeParams(obj)
ntparams := tparams.Len()
- w.len(ntparams)
+ w.Len(ntparams)
for i := 0; i < ntparams; i++ {
w.typ(tparams.At(i).Constraint())
}
nderived := len(dict.derived)
- w.len(nderived)
+ w.Len(nderived)
for _, typ := range dict.derived {
- w.reloc(relocType, typ.idx)
- w.bool(typ.needed)
+ w.Reloc(pkgbits.RelocType, typ.idx)
+ w.Bool(typ.needed)
}
nfuncs := len(dict.funcs)
- w.len(nfuncs)
+ w.Len(nfuncs)
for _, fn := range dict.funcs {
- w.reloc(relocObj, fn.idx)
- w.len(len(fn.explicits))
+ w.Reloc(pkgbits.RelocObj, fn.idx)
+ w.Len(len(fn.explicits))
for _, targ := range fn.explicits {
w.typInfo(targ)
}
@@ -638,7 +641,7 @@ func (w *writer) objDict(obj types2.Object, dict *writerDict) {
}
func (w *writer) typeParamNames(tparams *types2.TypeParamList) {
- w.sync(syncTypeParamNames)
+ w.Sync(pkgbits.SyncTypeParamNames)
ntparams := tparams.Len()
for i := 0; i < ntparams; i++ {
@@ -653,7 +656,7 @@ func (w *writer) method(wext *writer, meth *types2.Func) {
assert(ok)
sig := meth.Type().(*types2.Signature)
- w.sync(syncMethod)
+ w.Sync(pkgbits.SyncMethod)
w.pos(meth)
w.selector(meth)
w.typeParamNames(sig.RecvTypeParams())
@@ -667,7 +670,7 @@ func (w *writer) method(wext *writer, meth *types2.Func) {
// qualifiedIdent writes out the name of an object declared at package
// scope. (For now, it's also used to refer to local defined types.)
func (w *writer) qualifiedIdent(obj types2.Object) {
- w.sync(syncSym)
+ w.Sync(pkgbits.SyncSym)
name := obj.Name()
if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
@@ -681,7 +684,7 @@ func (w *writer) qualifiedIdent(obj types2.Object) {
}
w.pkg(obj.Pkg())
- w.string(name)
+ w.String(name)
}
// TODO(mdempsky): We should be able to omit pkg from both localIdent
@@ -694,17 +697,17 @@ func (w *writer) qualifiedIdent(obj types2.Object) {
// particular function).
func (w *writer) localIdent(obj types2.Object) {
assert(!isGlobal(obj))
- w.sync(syncLocalIdent)
+ w.Sync(pkgbits.SyncLocalIdent)
w.pkg(obj.Pkg())
- w.string(obj.Name())
+ w.String(obj.Name())
}
// selector writes the name of a field or method (i.e., objects that
// can only be accessed using selector expressions).
func (w *writer) selector(obj types2.Object) {
- w.sync(syncSelector)
+ w.Sync(pkgbits.SyncSelector)
w.pkg(obj.Pkg())
- w.string(obj.Name())
+ w.String(obj.Name())
}
// @@@ Compiler extensions
@@ -740,56 +743,56 @@ func (w *writer) funcExt(obj *types2.Func) {
body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, block, w.dict)
assert(len(closureVars) == 0)
- w.sync(syncFuncExt)
+ w.Sync(pkgbits.SyncFuncExt)
w.pragmaFlag(pragma)
w.linkname(obj)
- w.bool(false) // stub extension
- w.reloc(relocBody, body)
- w.sync(syncEOF)
+ w.Bool(false) // stub extension
+ w.Reloc(pkgbits.RelocBody, body)
+ w.Sync(pkgbits.SyncEOF)
}
func (w *writer) typeExt(obj *types2.TypeName) {
decl, ok := w.p.typDecls[obj]
assert(ok)
- w.sync(syncTypeExt)
+ w.Sync(pkgbits.SyncTypeExt)
w.pragmaFlag(asPragmaFlag(decl.Pragma))
// No LSym.SymIdx info yet.
- w.int64(-1)
- w.int64(-1)
+ w.Int64(-1)
+ w.Int64(-1)
}
func (w *writer) varExt(obj *types2.Var) {
- w.sync(syncVarExt)
+ w.Sync(pkgbits.SyncVarExt)
w.linkname(obj)
}
func (w *writer) linkname(obj types2.Object) {
- w.sync(syncLinkname)
- w.int64(-1)
- w.string(w.p.linknames[obj])
+ w.Sync(pkgbits.SyncLinkname)
+ w.Int64(-1)
+ w.String(w.p.linknames[obj])
}
func (w *writer) pragmaFlag(p ir.PragmaFlag) {
- w.sync(syncPragma)
- w.int(int(p))
+ w.Sync(pkgbits.SyncPragma)
+ w.Int(int(p))
}
// @@@ Function bodies
func (pw *pkgWriter) bodyIdx(pkg *types2.Package, sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx int, closureVars []posObj) {
- w := pw.newWriter(relocBody, syncFuncBody)
+ w := pw.newWriter(pkgbits.RelocBody, pkgbits.SyncFuncBody)
w.dict = dict
w.funcargs(sig)
- if w.bool(block != nil) {
+ if w.Bool(block != nil) {
w.stmts(block.List)
w.pos(block.Rbrace)
}
- return w.flush(), w.closureVars
+ return w.Flush(), w.closureVars
}
func (w *writer) funcargs(sig *types2.Signature) {
@@ -813,10 +816,10 @@ func (w *writer) funcarg(param *types2.Var, result bool) {
}
func (w *writer) addLocal(obj *types2.Var) {
- w.sync(syncAddLocal)
+ w.Sync(pkgbits.SyncAddLocal)
idx := len(w.localsIdx)
- if enableSync {
- w.int(idx)
+ if pkgbits.EnableSync {
+ w.Int(idx)
}
if w.localsIdx == nil {
w.localsIdx = make(map[*types2.Var]int)
@@ -825,10 +828,10 @@ func (w *writer) addLocal(obj *types2.Var) {
}
func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) {
- w.sync(syncUseObjLocal)
+ w.Sync(pkgbits.SyncUseObjLocal)
- if idx, ok := w.localsIdx[obj]; w.bool(ok) {
- w.len(idx)
+ if idx, ok := w.localsIdx[obj]; w.Bool(ok) {
+ w.Len(idx)
return
}
@@ -841,22 +844,22 @@ func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) {
w.closureVars = append(w.closureVars, posObj{pos, obj})
w.closureVarsIdx[obj] = idx
}
- w.len(idx)
+ w.Len(idx)
}
func (w *writer) openScope(pos syntax.Pos) {
- w.sync(syncOpenScope)
+ w.Sync(pkgbits.SyncOpenScope)
w.pos(pos)
}
func (w *writer) closeScope(pos syntax.Pos) {
- w.sync(syncCloseScope)
+ w.Sync(pkgbits.SyncCloseScope)
w.pos(pos)
w.closeAnotherScope()
}
func (w *writer) closeAnotherScope() {
- w.sync(syncCloseAnotherScope)
+ w.Sync(pkgbits.SyncCloseAnotherScope)
}
// @@@ Statements
@@ -870,12 +873,12 @@ func (w *writer) stmt(stmt syntax.Stmt) {
}
func (w *writer) stmts(stmts []syntax.Stmt) {
- w.sync(syncStmts)
+ w.Sync(pkgbits.SyncStmts)
for _, stmt := range stmts {
w.stmt1(stmt)
}
- w.code(stmtEnd)
- w.sync(syncStmtsEnd)
+ w.Code(stmtEnd)
+ w.Sync(pkgbits.SyncStmtsEnd)
}
func (w *writer) stmt1(stmt syntax.Stmt) {
@@ -889,37 +892,37 @@ func (w *writer) stmt1(stmt syntax.Stmt) {
case *syntax.AssignStmt:
switch {
case stmt.Rhs == nil:
- w.code(stmtIncDec)
+ w.Code(stmtIncDec)
w.op(binOps[stmt.Op])
w.expr(stmt.Lhs)
w.pos(stmt)
case stmt.Op != 0 && stmt.Op != syntax.Def:
- w.code(stmtAssignOp)
+ w.Code(stmtAssignOp)
w.op(binOps[stmt.Op])
w.expr(stmt.Lhs)
w.pos(stmt)
w.expr(stmt.Rhs)
default:
- w.code(stmtAssign)
+ w.Code(stmtAssign)
w.pos(stmt)
w.exprList(stmt.Rhs)
w.assignList(stmt.Lhs)
}
case *syntax.BlockStmt:
- w.code(stmtBlock)
+ w.Code(stmtBlock)
w.blockStmt(stmt)
case *syntax.BranchStmt:
- w.code(stmtBranch)
+ w.Code(stmtBranch)
w.pos(stmt)
w.op(branchOps[stmt.Tok])
w.optLabel(stmt.Label)
case *syntax.CallStmt:
- w.code(stmtCall)
+ w.Code(stmtCall)
w.pos(stmt)
w.op(callOps[stmt.Tok])
w.expr(stmt.Call)
@@ -930,54 +933,54 @@ func (w *writer) stmt1(stmt syntax.Stmt) {
}
case *syntax.ExprStmt:
- w.code(stmtExpr)
+ w.Code(stmtExpr)
w.expr(stmt.X)
case *syntax.ForStmt:
- w.code(stmtFor)
+ w.Code(stmtFor)
w.forStmt(stmt)
case *syntax.IfStmt:
- w.code(stmtIf)
+ w.Code(stmtIf)
w.ifStmt(stmt)
case *syntax.LabeledStmt:
- w.code(stmtLabel)
+ w.Code(stmtLabel)
w.pos(stmt)
w.label(stmt.Label)
w.stmt1(stmt.Stmt)
case *syntax.ReturnStmt:
- w.code(stmtReturn)
+ w.Code(stmtReturn)
w.pos(stmt)
w.exprList(stmt.Results)
case *syntax.SelectStmt:
- w.code(stmtSelect)
+ w.Code(stmtSelect)
w.selectStmt(stmt)
case *syntax.SendStmt:
- w.code(stmtSend)
+ w.Code(stmtSend)
w.pos(stmt)
w.expr(stmt.Chan)
w.expr(stmt.Value)
case *syntax.SwitchStmt:
- w.code(stmtSwitch)
+ w.Code(stmtSwitch)
w.switchStmt(stmt)
}
}
func (w *writer) assignList(expr syntax.Expr) {
exprs := unpackListExpr(expr)
- w.len(len(exprs))
+ w.Len(len(exprs))
for _, expr := range exprs {
if name, ok := expr.(*syntax.Name); ok && name.Value != "_" {
if obj, ok := w.p.info.Defs[name]; ok {
obj := obj.(*types2.Var)
- w.bool(true)
+ w.Bool(true)
w.pos(obj)
w.localIdent(obj)
w.typ(obj.Type())
@@ -989,7 +992,7 @@ func (w *writer) assignList(expr syntax.Expr) {
}
}
- w.bool(false)
+ w.Bool(false)
w.expr(expr)
}
}
@@ -999,37 +1002,10 @@ func (w *writer) declStmt(decl syntax.Decl) {
default:
w.p.unexpected("declaration", decl)
- case *syntax.ConstDecl:
-
- case *syntax.TypeDecl:
- // Quirk: The legacy inliner doesn't support inlining functions
- // with type declarations. Unified IR doesn't have any need to
- // write out type declarations explicitly (they're always looked
- // up via global index tables instead), so we just write out a
- // marker so the reader knows to synthesize a fake declaration to
- // prevent inlining.
- if quirksMode() {
- w.code(stmtTypeDeclHack)
- }
+ case *syntax.ConstDecl, *syntax.TypeDecl:
case *syntax.VarDecl:
- values := unpackListExpr(decl.Values)
-
- // Quirk: When N variables are declared with N initialization
- // values, we need to decompose that into N interleaved
- // declarations+initializations, because it leads to different
- // (albeit semantically equivalent) code generation.
- if quirksMode() && len(decl.NameList) == len(values) {
- for i, name := range decl.NameList {
- w.code(stmtAssign)
- w.pos(decl)
- w.exprList(values[i])
- w.assignList(name)
- }
- break
- }
-
- w.code(stmtAssign)
+ w.Code(stmtAssign)
w.pos(decl)
w.exprList(decl.Values)
w.assignList(namesAsExpr(decl.NameList))
@@ -1037,17 +1013,17 @@ func (w *writer) declStmt(decl syntax.Decl) {
}
func (w *writer) blockStmt(stmt *syntax.BlockStmt) {
- w.sync(syncBlockStmt)
+ w.Sync(pkgbits.SyncBlockStmt)
w.openScope(stmt.Pos())
w.stmts(stmt.List)
w.closeScope(stmt.Rbrace)
}
func (w *writer) forStmt(stmt *syntax.ForStmt) {
- w.sync(syncForStmt)
+ w.Sync(pkgbits.SyncForStmt)
w.openScope(stmt.Pos())
- if rang, ok := stmt.Init.(*syntax.RangeClause); w.bool(ok) {
+ if rang, ok := stmt.Init.(*syntax.RangeClause); w.Bool(ok) {
w.pos(rang)
w.expr(rang.X)
w.assignList(rang.Lhs)
@@ -1063,7 +1039,7 @@ func (w *writer) forStmt(stmt *syntax.ForStmt) {
}
func (w *writer) ifStmt(stmt *syntax.IfStmt) {
- w.sync(syncIfStmt)
+ w.Sync(pkgbits.SyncIfStmt)
w.openScope(stmt.Pos())
w.pos(stmt)
w.stmt(stmt.Init)
@@ -1074,10 +1050,10 @@ func (w *writer) ifStmt(stmt *syntax.IfStmt) {
}
func (w *writer) selectStmt(stmt *syntax.SelectStmt) {
- w.sync(syncSelectStmt)
+ w.Sync(pkgbits.SyncSelectStmt)
w.pos(stmt)
- w.len(len(stmt.Body))
+ w.Len(len(stmt.Body))
for i, clause := range stmt.Body {
if i > 0 {
w.closeScope(clause.Pos())
@@ -1094,24 +1070,24 @@ func (w *writer) selectStmt(stmt *syntax.SelectStmt) {
}
func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
- w.sync(syncSwitchStmt)
+ w.Sync(pkgbits.SyncSwitchStmt)
w.openScope(stmt.Pos())
w.pos(stmt)
w.stmt(stmt.Init)
- if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.bool(ok) {
+ if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.Bool(ok) {
w.pos(guard)
- if tag := guard.Lhs; w.bool(tag != nil) {
+ if tag := guard.Lhs; w.Bool(tag != nil) {
w.pos(tag)
- w.string(tag.Value)
+ w.String(tag.Value)
}
w.expr(guard.X)
} else {
w.expr(stmt.Tag)
}
- w.len(len(stmt.Body))
+ w.Len(len(stmt.Body))
for i, clause := range stmt.Body {
if i > 0 {
w.closeScope(clause.Pos())
@@ -1148,15 +1124,15 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
}
func (w *writer) label(label *syntax.Name) {
- w.sync(syncLabel)
+ w.Sync(pkgbits.SyncLabel)
// TODO(mdempsky): Replace label strings with dense indices.
- w.string(label.Value)
+ w.String(label.Value)
}
func (w *writer) optLabel(label *syntax.Name) {
- w.sync(syncOptLabel)
- if w.bool(label != nil) {
+ w.Sync(pkgbits.SyncOptLabel)
+ if w.Bool(label != nil) {
w.label(label)
}
}
@@ -1178,41 +1154,28 @@ func (w *writer) expr(expr syntax.Expr) {
}
if tv.IsType() {
- w.code(exprType)
+ w.Code(exprType)
w.typ(tv.Type)
return
}
if tv.Value != nil {
- pos := expr.Pos()
- if quirksMode() {
- if obj != nil {
- // Quirk: IR (and thus iexport) doesn't track position
- // information for uses of declared objects.
- pos = syntax.Pos{}
- } else if tv.Value.Kind() == constant.String {
- // Quirk: noder.sum picks a particular position for certain
- // string concatenations.
- pos = sumPos(expr)
- }
- }
-
- w.code(exprConst)
- w.pos(pos)
+ w.Code(exprConst)
+ w.pos(expr.Pos())
w.typ(tv.Type)
- w.value(tv.Value)
+ w.Value(tv.Value)
// TODO(mdempsky): These details are only important for backend
// diagnostics. Explore writing them out separately.
w.op(constExprOp(expr))
- w.string(syntax.String(expr))
+ w.String(syntax.String(expr))
return
}
}
if obj != nil {
if isGlobal(obj) {
- w.code(exprName)
+ w.Code(exprName)
w.obj(obj, targs)
return
}
@@ -1221,7 +1184,7 @@ func (w *writer) expr(expr syntax.Expr) {
assert(!obj.IsField())
assert(targs.Len() == 0)
- w.code(exprLocal)
+ w.Code(exprLocal)
w.useLocal(expr.Pos(), obj)
return
}
@@ -1231,25 +1194,25 @@ func (w *writer) expr(expr syntax.Expr) {
w.p.unexpected("expression", expr)
case nil: // absent slice index, for condition, or switch tag
- w.code(exprNone)
+ w.Code(exprNone)
case *syntax.Name:
assert(expr.Value == "_")
- w.code(exprBlank)
+ w.Code(exprBlank)
case *syntax.CompositeLit:
- w.code(exprCompLit)
+ w.Code(exprCompLit)
w.compLit(expr)
case *syntax.FuncLit:
- w.code(exprFuncLit)
+ w.Code(exprFuncLit)
w.funcLit(expr)
case *syntax.SelectorExpr:
sel, ok := w.p.info.Selections[expr]
assert(ok)
- w.code(exprSelector)
+ w.Code(exprSelector)
w.expr(expr.X)
w.pos(expr)
w.selector(sel.Obj())
@@ -1258,13 +1221,13 @@ func (w *writer) expr(expr syntax.Expr) {
tv, ok := w.p.info.Types[expr.Index]
assert(ok && tv.IsValue())
- w.code(exprIndex)
+ w.Code(exprIndex)
w.expr(expr.X)
w.pos(expr)
w.expr(expr.Index)
case *syntax.SliceExpr:
- w.code(exprSlice)
+ w.Code(exprSlice)
w.expr(expr.X)
w.pos(expr)
for _, n := range &expr.Index {
@@ -1272,21 +1235,21 @@ func (w *writer) expr(expr syntax.Expr) {
}
case *syntax.AssertExpr:
- w.code(exprAssert)
+ w.Code(exprAssert)
w.expr(expr.X)
w.pos(expr)
w.expr(expr.Type)
case *syntax.Operation:
if expr.Y == nil {
- w.code(exprUnaryOp)
+ w.Code(exprUnaryOp)
w.op(unOps[expr.Op])
w.pos(expr)
w.expr(expr.X)
break
}
- w.code(exprBinaryOp)
+ w.Code(exprBinaryOp)
w.op(binOps[expr.Op])
w.expr(expr.X)
w.pos(expr)
@@ -1299,7 +1262,7 @@ func (w *writer) expr(expr syntax.Expr) {
assert(len(expr.ArgList) == 1)
assert(!expr.HasDots)
- w.code(exprConvert)
+ w.Code(exprConvert)
w.typ(tv.Type)
w.pos(expr)
w.expr(expr.ArgList[0])
@@ -1310,7 +1273,7 @@ func (w *writer) expr(expr syntax.Expr) {
if selector, ok := unparen(expr.Fun).(*syntax.SelectorExpr); ok {
if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal {
w.expr(selector.X)
- w.bool(true) // method call
+ w.Bool(true) // method call
w.pos(selector)
w.selector(sel.Obj())
return
@@ -1318,14 +1281,14 @@ func (w *writer) expr(expr syntax.Expr) {
}
w.expr(expr.Fun)
- w.bool(false) // not a method call (i.e., normal function call)
+ w.Bool(false) // not a method call (i.e., normal function call)
}
- w.code(exprCall)
+ w.Code(exprCall)
writeFunExpr()
w.pos(expr)
w.exprs(expr.ArgList)
- w.bool(expr.HasDots)
+ w.Bool(expr.HasDots)
}
}
@@ -1333,30 +1296,30 @@ func (w *writer) compLit(lit *syntax.CompositeLit) {
tv, ok := w.p.info.Types[lit]
assert(ok)
- w.sync(syncCompLit)
+ w.Sync(pkgbits.SyncCompLit)
w.pos(lit)
w.typ(tv.Type)
typ := tv.Type
- if ptr, ok := types2.StructuralType(typ).(*types2.Pointer); ok {
+ if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok {
typ = ptr.Elem()
}
- str, isStruct := types2.StructuralType(typ).(*types2.Struct)
+ str, isStruct := types2.CoreType(typ).(*types2.Struct)
- w.len(len(lit.ElemList))
+ w.Len(len(lit.ElemList))
for i, elem := range lit.ElemList {
if isStruct {
if kv, ok := elem.(*syntax.KeyValueExpr); ok {
// use position of expr.Key rather than of elem (which has position of ':')
w.pos(kv.Key)
- w.len(fieldIndex(w.p.info, str, kv.Key.(*syntax.Name)))
+ w.Len(fieldIndex(w.p.info, str, kv.Key.(*syntax.Name)))
elem = kv.Value
} else {
w.pos(elem)
- w.len(i)
+ w.Len(i)
}
} else {
- if kv, ok := elem.(*syntax.KeyValueExpr); w.bool(ok) {
+ if kv, ok := elem.(*syntax.KeyValueExpr); w.Bool(ok) {
// use position of expr.Key rather than of elem (which has position of ':')
w.pos(kv.Key)
w.expr(kv.Key)
@@ -1375,21 +1338,17 @@ func (w *writer) funcLit(expr *syntax.FuncLit) {
body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, expr.Body, w.dict)
- w.sync(syncFuncLit)
+ w.Sync(pkgbits.SyncFuncLit)
w.pos(expr)
- w.pos(expr.Type) // for QuirksMode
w.signature(sig)
- w.len(len(closureVars))
+ w.Len(len(closureVars))
for _, cv := range closureVars {
w.pos(cv.pos)
- if quirksMode() {
- cv.pos = expr.Body.Rbrace
- }
w.useLocal(cv.pos, cv.obj)
}
- w.reloc(relocBody, body)
+ w.Reloc(pkgbits.RelocBody, body)
}
type posObj struct {
@@ -1398,7 +1357,7 @@ type posObj struct {
}
func (w *writer) exprList(expr syntax.Expr) {
- w.sync(syncExprList)
+ w.Sync(pkgbits.SyncExprList)
w.exprs(unpackListExpr(expr))
}
@@ -1407,8 +1366,8 @@ func (w *writer) exprs(exprs []syntax.Expr) {
assert(exprs == nil)
}
- w.sync(syncExprs)
- w.len(len(exprs))
+ w.Sync(pkgbits.SyncExprs)
+ w.Len(len(exprs))
for _, expr := range exprs {
w.expr(expr)
}
@@ -1419,8 +1378,8 @@ func (w *writer) op(op ir.Op) {
// export data more stable against internal refactorings, but low
// priority at the moment.
assert(op != 0)
- w.sync(syncOp)
- w.len(int(op))
+ w.Sync(pkgbits.SyncOp)
+ w.Len(int(op))
}
func (w *writer) needType(typ types2.Type) {
@@ -1538,21 +1497,6 @@ func (c *declCollector) Visit(n syntax.Node) syntax.Visitor {
}
}
- // Workaround for #46208. For variable declarations that
- // declare multiple variables and have an explicit type
- // expression, the type expression is evaluated multiple
- // times. This affects toolstash -cmp, because iexport is
- // sensitive to *types.Type pointer identity.
- if quirksMode() && n.Type != nil {
- tv, ok := pw.info.Types[n.Type]
- assert(ok)
- assert(tv.IsType())
- for _, name := range n.NameList {
- obj := pw.info.Defs[name].(*types2.Var)
- pw.dups.add(obj.Type(), tv.Type)
- }
- }
-
case *syntax.BlockStmt:
if !c.withinFunc {
copy := *c
@@ -1621,34 +1565,20 @@ func (pw *pkgWriter) checkPragmas(p syntax.Pragma, allowed ir.PragmaFlag, embedO
}
func (w *writer) pkgInit(noders []*noder) {
- if quirksMode() {
- posBases := posBasesOf(noders)
- w.len(len(posBases))
- for _, posBase := range posBases {
- w.posBase(posBase)
- }
-
- objs := importedObjsOf(w.p.curpkg, w.p.info, noders)
- w.len(len(objs))
- for _, obj := range objs {
- w.qualifiedIdent(obj)
- }
- }
-
- w.len(len(w.p.cgoPragmas))
+ w.Len(len(w.p.cgoPragmas))
for _, cgoPragma := range w.p.cgoPragmas {
- w.strings(cgoPragma)
+ w.Strings(cgoPragma)
}
- w.sync(syncDecls)
+ w.Sync(pkgbits.SyncDecls)
for _, p := range noders {
for _, decl := range p.file.DeclList {
w.pkgDecl(decl)
}
}
- w.code(declEnd)
+ w.Code(declEnd)
- w.sync(syncEOF)
+ w.Sync(pkgbits.SyncEOF)
}
func (w *writer) pkgDecl(decl syntax.Decl) {
@@ -1659,7 +1589,7 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
case *syntax.ImportDecl:
case *syntax.ConstDecl:
- w.code(declOther)
+ w.Code(declOther)
w.pkgObjs(decl.NameList...)
case *syntax.FuncDecl:
@@ -1675,13 +1605,13 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
}
if recv := sig.Recv(); recv != nil {
- w.code(declMethod)
+ w.Code(declMethod)
w.typ(recvBase(recv))
w.selector(obj)
break
}
- w.code(declFunc)
+ w.Code(declFunc)
w.pkgObjs(decl.Name)
case *syntax.TypeDecl:
@@ -1709,11 +1639,11 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
}
}
- w.code(declOther)
+ w.Code(declOther)
w.pkgObjs(decl.Name)
case *syntax.VarDecl:
- w.code(declVar)
+ w.Code(declVar)
w.pos(decl)
w.pkgObjs(decl.NameList...)
w.exprList(decl.Values)
@@ -1722,23 +1652,23 @@ func (w *writer) pkgDecl(decl syntax.Decl) {
if p, ok := decl.Pragma.(*pragmas); ok {
embeds = p.Embeds
}
- w.len(len(embeds))
+ w.Len(len(embeds))
for _, embed := range embeds {
w.pos(embed.Pos)
- w.strings(embed.Patterns)
+ w.Strings(embed.Patterns)
}
}
}
func (w *writer) pkgObjs(names ...*syntax.Name) {
- w.sync(syncDeclNames)
- w.len(len(names))
+ w.Sync(pkgbits.SyncDeclNames)
+ w.Len(len(names))
for _, name := range names {
obj, ok := w.p.info.Defs[name]
assert(ok)
- w.sync(syncDeclName)
+ w.Sync(pkgbits.SyncDeclName)
w.obj(obj, nil)
}
}
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index 3ae6422bf9..7877be3336 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -46,10 +46,9 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
}
func ginsnop(pp *objw.Progs) *obj.Prog {
+ // Generate the preferred hardware nop: ori 0,0,0
p := pp.Prog(ppc64.AOR)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = ppc64.REG_R0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_R0
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: ppc64.REG_R0}
return p
}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index e76f9c5f3c..c809f4658d 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -1424,9 +1424,7 @@ func WriteBasicTypes() {
}
writeType(types.NewPtr(types.Types[types.TSTRING]))
writeType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
- if base.Flag.G > 0 {
- writeType(types.AnyType)
- }
+ writeType(types.AnyType)
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
@@ -1457,7 +1455,7 @@ func WriteBasicTypes() {
type typeAndStr struct {
t *types.Type
- short string // "short" here means NameString
+ short string // "short" here means TypeSymName
regular string
}
@@ -1853,8 +1851,8 @@ func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSy
}
newnam.SetSiggen(true)
- // Except in quirks mode, unified IR creates its own wrappers.
- if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
+ // Unified IR creates its own wrappers.
+ if base.Debug.Unified != 0 {
return lsym
}
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
index c0ccdb1c93..a76358967d 100644
--- a/src/cmd/compile/internal/ssa/debug_lines_test.go
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -78,7 +78,7 @@ func TestDebugLinesPushback(t *testing.T) {
// Unified mangles differently
fn = "(*List[int]).PushBack"
}
- testDebugLines(t, "-N -l -G=3", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true)
+ testDebugLines(t, "-N -l", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true)
}
}
@@ -97,7 +97,7 @@ func TestDebugLinesConvert(t *testing.T) {
// Unified mangles differently
fn = "G[int]"
}
- testDebugLines(t, "-N -l -G=3", "convertline.go", fn, []int{9, 10, 11}, true)
+ testDebugLines(t, "-N -l", "convertline.go", fn, []int{9, 10, 11}, true)
}
}
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 3379e1dac5..96b24a6380 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -256,7 +256,7 @@
(Leq64F ...) => (FLED ...)
(Leq32F ...) => (FLES ...)
-(EqPtr x y) => (SEQZ (SUB <x.Type> x y))
+(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
(Eq32 x y) => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
@@ -264,7 +264,7 @@
(Eq64F ...) => (FEQD ...)
(Eq32F ...) => (FEQS ...)
-(NeqPtr x y) => (SNEZ (SUB <x.Type> x y))
+(NeqPtr x y) => (SNEZ (SUB <typ.Uintptr> x y))
(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
(Neq32 x y) => (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index d2719eb8a1..ee884ca761 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -906,7 +906,7 @@ func (po *poset) Ordered(n1, n2 *Value) bool {
return i1 != i2 && po.reaches(i1, i2, true)
}
-// Ordered reports whether n1<=n2. It returns false either when it is
+// OrderedOrEqual reports whether n1<=n2. It returns false either when it is
// certain that n1<=n2 is false, or if there is not enough information
// to tell.
// Complexity is O(n).
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 885bbaf4a1..a67d13e0da 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -1124,13 +1124,14 @@ func rewriteValueRISCV64_OpEqPtr(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (EqPtr x y)
- // result: (SEQZ (SUB <x.Type> x y))
+ // result: (SEQZ (SUB <typ.Uintptr> x y))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SEQZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@@ -2673,13 +2674,14 @@ func rewriteValueRISCV64_OpNeqPtr(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (NeqPtr x y)
- // result: (SNEZ (SUB <x.Type> x y))
+ // result: (SNEZ (SUB <typ.Uintptr> x y))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 0b54925696..60747d93ca 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -2382,7 +2382,7 @@ func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
- return s.newValue1(ssa.OpCopy, tt, v)
+ return s.newValue1(ssa.OpCvtBoolToUint8, tt, v)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
@@ -6768,6 +6768,34 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
return x
}
+// for wrapper, emit info of wrapped function.
+func emitWrappedFuncInfo(e *ssafn, pp *objw.Progs) {
+ if base.Ctxt.Flag_linkshared {
+ // Relative reference (SymPtrOff) to another shared object doesn't work.
+ // Unfortunate.
+ return
+ }
+
+ wfn := e.curfn.WrappedFunc
+ if wfn == nil {
+ return
+ }
+
+ wsym := wfn.Linksym()
+ x := base.Ctxt.LookupInit(fmt.Sprintf("%s.wrapinfo", wsym.Name), func(x *obj.LSym) {
+ objw.SymPtrOff(x, 0, wsym)
+ x.Set(obj.AttrContentAddressable, true)
+ })
+ e.curfn.LSym.Func().WrapInfo = x
+
+ // Emit a funcdata pointing at the wrap info data.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_WrapInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+}
+
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s State
@@ -6790,6 +6818,8 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
p.To.Sym = openDeferInfo
}
+ emitWrappedFuncInfo(e, pp)
+
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index 636199de47..d183425724 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -521,7 +521,6 @@ func AnySideEffects(n ir.Node) bool {
case ir.ONAME,
ir.ONONAME,
ir.OTYPE,
- ir.OPACK,
ir.OLITERAL,
ir.ONIL,
ir.OADD,
diff --git a/src/cmd/compile/internal/test/testdata/ptrsort.go b/src/cmd/compile/internal/test/testdata/ptrsort.go
index 6cc7ba4851..d26ba581d9 100644
--- a/src/cmd/compile/internal/test/testdata/ptrsort.go
+++ b/src/cmd/compile/internal/test/testdata/ptrsort.go
@@ -6,7 +6,7 @@ package main
import (
"fmt"
- "./mysort"
+ "cmd/compile/internal/test/testdata/mysort"
)
type MyString struct {
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
index 68ab05a538..d1eec6d322 100644
--- a/src/cmd/compile/internal/typecheck/dcl.go
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -70,14 +70,6 @@ func Declare(n *ir.Name, ctxt ir.Class) {
n.SetFrameOffset(0)
}
- if s.Block == types.Block {
- // functype will print errors about duplicate function arguments.
- // Don't repeat the error here.
- if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
- Redeclared(n.Pos(), s, "in this block")
- }
- }
-
s.Block = types.Block
s.Lastlineno = base.Pos
s.Def = n
@@ -103,38 +95,6 @@ func Export(n *ir.Name) {
Target.Exports = append(Target.Exports, n)
}
-// Redeclared emits a diagnostic about symbol s being redeclared at pos.
-func Redeclared(pos src.XPos, s *types.Sym, where string) {
- if !s.Lastlineno.IsKnown() {
- var pkgName *ir.PkgName
- if s.Def == nil {
- for id, pkg := range DotImportRefs {
- if id.Sym().Name == s.Name {
- pkgName = pkg
- break
- }
- }
- } else {
- pkgName = DotImportRefs[s.Def.(*ir.Ident)]
- }
- base.ErrorfAt(pos, "%v redeclared %s\n"+
- "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
- } else {
- prevPos := s.Lastlineno
-
- // When an import and a declaration collide in separate files,
- // present the import as the "redeclared", because the declaration
- // is visible where the import is, but not vice versa.
- // See issue 4510.
- if s.Def == nil {
- pos, prevPos = prevPos, pos
- }
-
- base.ErrorfAt(pos, "%v redeclared %s\n"+
- "\t%v: previous declaration", s, where, base.FmtPos(prevPos))
- }
-}
-
// declare the function proper
// and declare the arguments.
// called in extern-declaration context
@@ -171,90 +131,6 @@ func CheckFuncStack() {
}
}
-// Add a method, declared as a function.
-// - msym is the method symbol
-// - t is function type (with receiver)
-// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
- if msym == nil {
- base.Fatalf("no method symbol")
- }
-
- // get parent type sym
- rf := t.Recv() // ptr to this structure
- if rf == nil {
- base.Errorf("missing receiver")
- return nil
- }
-
- mt := types.ReceiverBaseType(rf.Type)
- if mt == nil || mt.Sym() == nil {
- pa := rf.Type
- t := pa
- if t != nil && t.IsPtr() {
- if t.Sym() != nil {
- base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
- return nil
- }
- t = t.Elem()
- }
-
- switch {
- case t == nil || t.Broke():
- // rely on typecheck having complained before
- case t.Sym() == nil:
- base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
- case t.IsPtr():
- base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
- case t.IsInterface():
- base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
- default:
- // Should have picked off all the reasons above,
- // but just in case, fall back to generic error.
- base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
- }
- return nil
- }
-
- if local && mt.Sym().Pkg != types.LocalPkg {
- base.Errorf("cannot define new methods on non-local type %v", mt)
- return nil
- }
-
- if msym.IsBlank() {
- return nil
- }
-
- if mt.IsStruct() {
- for _, f := range mt.Fields().Slice() {
- if f.Sym == msym {
- base.Errorf("type %v has both field and method named %v", mt, msym)
- f.SetBroke(true)
- return nil
- }
- }
- }
-
- for _, f := range mt.Methods().Slice() {
- if msym.Name != f.Sym.Name {
- continue
- }
- // types.Identical only checks that incoming and result parameters match,
- // so explicitly check that the receiver parameters match too.
- if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
- base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
- }
- return f
- }
-
- f := types.NewField(base.Pos, msym, t)
- f.Nname = n.Nname
- f.SetNointerface(nointerface)
-
- mt.Methods().Append(f)
- return f
-}
-
func autoexport(n *ir.Name, ctxt ir.Class) {
if n.Sym().Pkg != types.LocalPkg {
return
@@ -455,13 +331,6 @@ func autotmpname(n int) string {
// Add a preceding . to avoid clashing with legal names.
prefix := ".autotmp_%d"
- // In quirks mode, pad out the number to stabilize variable
- // sorting. This ensures autotmps 8 and 9 sort the same way even
- // if they get renumbered to 9 and 10, respectively.
- if base.Debug.UnifiedQuirks != 0 {
- prefix = ".autotmp_%06d"
- }
-
s = fmt.Sprintf(prefix, n)
autotmpnames[n] = s
}
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index eb316d33db..0fe8f91696 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -220,21 +220,6 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
ir.SetPos(n.Ntype)
- // Need to handle [...]T arrays specially.
- if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil {
- array.Elem = typecheckNtype(array.Elem)
- elemType := array.Elem.Type()
- if elemType == nil {
- n.SetType(nil)
- return n
- }
- length := typecheckarraylit(elemType, -1, n.List, "array literal")
- n.SetOp(ir.OARRAYLIT)
- n.SetType(types.NewArray(elemType, length))
- n.Ntype = nil
- return n
- }
-
n.Ntype = typecheckNtype(n.Ntype)
t := n.Ntype.Type()
if t == nil {
@@ -375,13 +360,7 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr {
key := kv.Key
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
sym := key.Sym()
- if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
- sym = Lookup(sym.Name)
- }
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index 57b15b7a2b..c6fd273bd1 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -302,20 +302,6 @@ func tcFunc(n *ir.Func) {
}
n.Nname = AssignExpr(n.Nname).(*ir.Name)
- t := n.Nname.Type()
- if t == nil {
- return
- }
- rcvr := t.Recv()
- if rcvr != nil && n.Shortname != nil {
- m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
- if m == nil {
- return
- }
-
- n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname))
- Declare(n.Nname, ir.PFUNC)
- }
}
// tcCall typechecks an OCALL node.
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index ae3c41ca04..fe0c80ac58 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -607,7 +607,7 @@ func (p *iexporter) doDecl(n *ir.Name) {
// Do same for ComparableType as for ErrorType.
underlying = types.ComparableType
}
- if base.Flag.G > 0 && underlying == types.AnyType.Underlying() {
+ if underlying == types.AnyType.Underlying() {
// Do same for AnyType as for ErrorType.
underlying = types.AnyType
}
@@ -621,12 +621,7 @@ func (p *iexporter) doDecl(n *ir.Name) {
break
}
- // Sort methods, for consistency with types2.
- methods := append([]*types.Field(nil), t.Methods().Slice()...)
- if base.Debug.UnifiedQuirks != 0 {
- sort.Sort(types.MethodsByName(methods))
- }
-
+ methods := t.Methods().Slice()
w.uint64(uint64(len(methods)))
for _, m := range methods {
w.pos(m.Pos)
@@ -954,7 +949,6 @@ func (w *exportWriter) startType(k itag) {
func (w *exportWriter) doTyp(t *types.Type) {
s := t.Sym()
if s != nil && t.OrigSym() != nil {
- assert(base.Flag.G > 0)
// This is an instantiated type - could be a re-instantiation like
// Value[T2] or a full instantiation like Value[int].
if strings.Index(s.Name, "[") < 0 {
@@ -979,7 +973,6 @@ func (w *exportWriter) doTyp(t *types.Type) {
// type, rather than a defined type with typeparam underlying type, like:
// type orderedAbs[T any] T
if t.IsTypeParam() && t.Underlying() == t {
- assert(base.Flag.G > 0)
if s.Pkg == types.BuiltinPkg || s.Pkg == types.UnsafePkg {
base.Fatalf("builtin type missing from typIndex: %v", t)
}
@@ -1052,14 +1045,6 @@ func (w *exportWriter) doTyp(t *types.Type) {
}
}
- // Sort methods and embedded types, for consistency with types2.
- // Note: embedded types may be anonymous, and types2 sorts them
- // with sort.Stable too.
- if base.Debug.UnifiedQuirks != 0 {
- sort.Sort(types.MethodsByName(methods))
- sort.Stable(types.EmbeddedsByName(embeddeds))
- }
-
w.startType(interfaceType)
w.setPkg(t.Pkg(), true)
@@ -1077,7 +1062,6 @@ func (w *exportWriter) doTyp(t *types.Type) {
}
case types.TUNION:
- assert(base.Flag.G > 0)
// TODO(danscales): possibly put out the tilde bools in more
// compact form.
w.startType(unionType)
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
index bc34d3933a..ef91f550a5 100644
--- a/src/cmd/compile/internal/typecheck/iimport.go
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -354,15 +354,18 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
// declaration before recursing.
n := importtype(pos, sym)
t := n.Type()
+
+ // Because of recursion, we need to defer width calculations and
+ // instantiations on intermediate types until the top-level type is
+ // fully constructed. Note that we can have recursion via type
+ // constraints.
+ types.DeferCheckSize()
+ deferDoInst()
if tag == 'U' {
rparams := r.typeList()
t.SetRParams(rparams)
}
- // We also need to defer width calculations until
- // after the underlying type has been assigned.
- types.DeferCheckSize()
- deferDoInst()
underlying := r.typ()
t.SetUnderlying(underlying)
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 9892471142..bc39015846 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -22,10 +22,6 @@ func AssignConv(n ir.Node, t *types.Type, context string) ir.Node {
return assignconvfn(n, t, func() string { return context })
}
-// DotImportRefs maps idents introduced by importDot back to the
-// ir.PkgName they were dot-imported through.
-var DotImportRefs map[*ir.Ident]*ir.PkgName
-
// LookupNum looks up the symbol starting with prefix and ending with
// the decimal n. If prefix is too long, LookupNum panics.
func LookupNum(prefix string, n int) *types.Sym {
@@ -1424,6 +1420,68 @@ func genericTypeName(sym *types.Sym) string {
return sym.Name[0:strings.Index(sym.Name, "[")]
}
+// getShapes appends the list of the shape types that are used within type t to
+// listp. The type traversal is simplified for two reasons: (1) we can always stop a
+// type traversal when t.HasShape() is false; and (2) shape types can't appear inside
+// a named type, except for the type args of a generic type. So, the traversal will
+// always stop before we have to deal with recursive types.
+func getShapes(t *types.Type, listp *[]*types.Type) {
+ if !t.HasShape() {
+ return
+ }
+ if t.IsShape() {
+ *listp = append(*listp, t)
+ return
+ }
+
+ if t.Sym() != nil {
+ // A named type can't have shapes in it, except for type args of a
+ // generic type. We will have to deal with this differently once we
+ // alloc local types in generic functions (#47631).
+ for _, rparam := range t.RParams() {
+ getShapes(rparam, listp)
+ }
+ return
+ }
+
+ switch t.Kind() {
+ case types.TARRAY, types.TPTR, types.TSLICE, types.TCHAN:
+ getShapes(t.Elem(), listp)
+
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Recvs().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+ for _, f := range t.Params().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+ for _, f := range t.Results().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+ for _, f := range t.TParams().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+
+ case types.TINTER:
+ for _, f := range t.Methods().Slice() {
+ getShapes(f.Type, listp)
+ }
+
+ case types.TMAP:
+ getShapes(t.Key(), listp)
+ getShapes(t.Elem(), listp)
+
+ default:
+ panic(fmt.Sprintf("Bad type in getShapes: %v", t.Kind()))
+ }
+
+}
+
// Shapify takes a concrete type and a type param index, and returns a GCshape type that can
// be used in place of the input type and still generate identical code.
// No methods are added - all methods calls directly on a shape should
@@ -1432,9 +1490,9 @@ func genericTypeName(sym *types.Sym) string {
// For now, we only consider two types to have the same shape, if they have exactly
// the same underlying type or they are both pointer types.
//
-// tparam is the associated typeparam. If there is a structural type for
-// the associated type param (not common), then a pointer type t is mapped to its
-// underlying type, rather than being merged with other pointers.
+// tparam is the associated typeparam - it must be TTYPEPARAM type. If there is a
+// structural type for the associated type param (not common), then a pointer type t
+// is mapped to its underlying type, rather than being merged with other pointers.
//
// Shape types are also distinguished by the index of the type in a type param/arg
// list. We need to do this so we can distinguish and substitute properly for two
@@ -1442,6 +1500,30 @@ func genericTypeName(sym *types.Sym) string {
// instantiation.
func Shapify(t *types.Type, index int, tparam *types.Type) *types.Type {
assert(!t.IsShape())
+ if t.HasShape() {
+ // We are sometimes dealing with types from a shape instantiation
+ // that were constructed from existing shape types, so t may
+ // sometimes have shape types inside it. In that case, we find all
+ // those shape types with getShapes() and replace them with their
+ // underlying type.
+ //
+ // If we don't do this, we may create extra unneeded shape types that
+ // have these other shape types embedded in them. This may lead to
+ // generating extra shape instantiations, and a mismatch between the
+ // instantiations that we used in generating dictionaries and the
+ // instantations that are actually called. (#51303).
+ list := []*types.Type{}
+ getShapes(t, &list)
+ list2 := make([]*types.Type, len(list))
+ for i, shape := range list {
+ list2[i] = shape.Underlying()
+ }
+ ts := Tsubster{
+ Tparams: list,
+ Targs: list2,
+ }
+ t = ts.Typ(t)
+ }
// Map all types with the same underlying type to the same shape.
u := t.Underlying()
diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go
index c4c1ef58ca..4ef2cbd55f 100644
--- a/src/cmd/compile/internal/typecheck/type.go
+++ b/src/cmd/compile/internal/typecheck/type.go
@@ -5,72 +5,11 @@
package typecheck
import (
- "go/constant"
-
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
-// tcArrayType typechecks an OTARRAY node.
-func tcArrayType(n *ir.ArrayType) ir.Node {
- n.Elem = typecheckNtype(n.Elem)
- if n.Elem.Type() == nil {
- return n
- }
- if n.Len == nil { // [...]T
- if !n.Diag() {
- n.SetDiag(true)
- base.Errorf("use of [...] array outside of array literal")
- }
- return n
- }
- n.Len = indexlit(Expr(n.Len))
- size := n.Len
- if ir.ConstType(size) != constant.Int {
- switch {
- case size.Type() == nil:
- // Error already reported elsewhere.
- case size.Type().IsInteger() && size.Op() != ir.OLITERAL:
- base.Errorf("non-constant array bound %v", size)
- default:
- base.Errorf("invalid array bound %v", size)
- }
- return n
- }
-
- v := size.Val()
- if ir.ConstOverflow(v, types.Types[types.TINT]) {
- base.Errorf("array bound is too large")
- return n
- }
-
- if constant.Sign(v) < 0 {
- base.Errorf("array bound must be non-negative")
- return n
- }
-
- bound, _ := constant.Int64Val(v)
- t := types.NewArray(n.Elem.Type(), bound)
- n.SetOTYPE(t)
- types.CheckSize(t)
- return n
-}
-
-// tcChanType typechecks an OTCHAN node.
-func tcChanType(n *ir.ChanType) ir.Node {
- n.Elem = typecheckNtype(n.Elem)
- l := n.Elem
- if l.Type() == nil {
- return n
- }
- if l.Type().NotInHeap() {
- base.Errorf("chan of incomplete (or unallocatable) type not allowed")
- }
- n.SetOTYPE(types.NewChan(l.Type(), n.Dir))
- return n
-}
-
// tcFuncType typechecks an OTFUNC node.
func tcFuncType(n *ir.FuncType) ir.Node {
misc := func(f *types.Field, nf *ir.Field) {
@@ -97,71 +36,6 @@ func tcFuncType(n *ir.FuncType) ir.Node {
return n
}
-// tcInterfaceType typechecks an OTINTER node.
-func tcInterfaceType(n *ir.InterfaceType) ir.Node {
- if len(n.Methods) == 0 {
- n.SetOTYPE(types.Types[types.TINTER])
- return n
- }
-
- lno := base.Pos
- methods := tcFields(n.Methods, nil)
- base.Pos = lno
-
- n.SetOTYPE(types.NewInterface(types.LocalPkg, methods, false))
- return n
-}
-
-// tcMapType typechecks an OTMAP node.
-func tcMapType(n *ir.MapType) ir.Node {
- n.Key = typecheckNtype(n.Key)
- n.Elem = typecheckNtype(n.Elem)
- l := n.Key
- r := n.Elem
- if l.Type() == nil || r.Type() == nil {
- return n
- }
- if l.Type().NotInHeap() {
- base.Errorf("incomplete (or unallocatable) map key not allowed")
- }
- if r.Type().NotInHeap() {
- base.Errorf("incomplete (or unallocatable) map value not allowed")
- }
- n.SetOTYPE(types.NewMap(l.Type(), r.Type()))
- mapqueue = append(mapqueue, n) // check map keys when all types are settled
- return n
-}
-
-// tcSliceType typechecks an OTSLICE node.
-func tcSliceType(n *ir.SliceType) ir.Node {
- n.Elem = typecheckNtype(n.Elem)
- if n.Elem.Type() == nil {
- return n
- }
- t := types.NewSlice(n.Elem.Type())
- n.SetOTYPE(t)
- types.CheckSize(t)
- return n
-}
-
-// tcStructType typechecks an OTSTRUCT node.
-func tcStructType(n *ir.StructType) ir.Node {
- lno := base.Pos
-
- fields := tcFields(n.Fields, func(f *types.Field, nf *ir.Field) {
- if nf.Embedded {
- checkembeddedtype(f.Type)
- f.Embedded = 1
- }
- f.Note = nf.Note
- })
- checkdupfields("field", fields)
-
- base.Pos = lno
- n.SetOTYPE(types.NewStruct(types.LocalPkg, fields))
- return n
-}
-
// tcField typechecks a generic Field.
// misc can be provided to handle specialized typechecking.
func tcField(n *ir.Field, misc func(*types.Field, *ir.Field)) *types.Field {
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index f6be298667..71a7841684 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -145,13 +145,6 @@ func Resolve(n ir.Node) (res ir.Node) {
}
if sym := n.Sym(); sym.Pkg != types.LocalPkg {
- // We might have an ir.Ident from oldname or importDot.
- if id, ok := n.(*ir.Ident); ok {
- if pkgName := DotImportRefs[id]; pkgName != nil {
- pkgName.Used = true
- }
- }
-
return expandDecl(n)
}
@@ -297,7 +290,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
if n.Typecheck() == 1 || n.Typecheck() == 3 {
switch n.Op() {
- case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL:
break
default:
@@ -529,43 +522,14 @@ func typecheck1(n ir.Node, top int) ir.Node {
// type already set
return n
- case ir.OPACK:
- n := n.(*ir.PkgName)
- base.Errorf("use of package %v without selector", n.Sym())
- n.SetDiag(true)
- return n
-
// types (ODEREF is with exprs)
case ir.OTYPE:
return n
- case ir.OTSLICE:
- n := n.(*ir.SliceType)
- return tcSliceType(n)
-
- case ir.OTARRAY:
- n := n.(*ir.ArrayType)
- return tcArrayType(n)
-
- case ir.OTMAP:
- n := n.(*ir.MapType)
- return tcMapType(n)
-
- case ir.OTCHAN:
- n := n.(*ir.ChanType)
- return tcChanType(n)
-
- case ir.OTSTRUCT:
- n := n.(*ir.StructType)
- return tcStructType(n)
-
- case ir.OTINTER:
- n := n.(*ir.InterfaceType)
- return tcInterfaceType(n)
-
case ir.OTFUNC:
n := n.(*ir.FuncType)
return tcFuncType(n)
+
// type or expr
case ir.ODEREF:
n := n.(*ir.StarExpr)
@@ -1729,18 +1693,6 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node {
return Expr(nn)
}
-var mapqueue []*ir.MapType
-
-func CheckMapKeys() {
- for _, n := range mapqueue {
- k := n.Type().MapType().Key
- if !k.Broke() && !types.IsComparable(k) {
- base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
- }
- }
- mapqueue = nil
-}
-
func typecheckdeftype(n *ir.Name) {
if base.EnableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdeftype", n)(nil)
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index e1b395559a..a42d97cd31 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -72,6 +72,7 @@ const (
fmtDebug
fmtTypeID
fmtTypeIDName
+ fmtTypeIDHash
)
// Sym
@@ -144,10 +145,21 @@ func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
if q := pkgqual(s.Pkg, verb, mode); q != "" {
b.WriteString(q)
b.WriteByte('.')
- if mode == fmtTypeIDName {
+ switch mode {
+ case fmtTypeIDName:
// If name is a generic instantiation, it might have local package placeholders
// in it. Replace those placeholders with the package name. See issue 49547.
name = strings.Replace(name, LocalPkg.Prefix, q, -1)
+ case fmtTypeIDHash:
+ // If name is a generic instantiation, don't hash the instantiating types.
+ // This isn't great, but it is safe. If we hash the instantiating types, then
+ // we need to make sure they have just the package name. At this point, they
+ // either have "", or the whole package path, and it is hard to reconcile
+ // the two without depending on -p (which we might do someday).
+ // See issue 51250.
+ if i := strings.Index(name, "["); i >= 0 {
+ name = name[:i]
+ }
}
}
b.WriteString(name)
@@ -157,6 +169,9 @@ func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
// symbols from the given package in the given mode.
// If it returns the empty string, no qualification is needed.
func pkgqual(pkg *Pkg, verb rune, mode fmtMode) string {
+ if pkg == nil {
+ return ""
+ }
if verb != 'S' {
switch mode {
case fmtGo: // This is for the user
@@ -173,7 +188,7 @@ func pkgqual(pkg *Pkg, verb rune, mode fmtMode) string {
case fmtDebug:
return pkg.Name
- case fmtTypeIDName:
+ case fmtTypeIDName, fmtTypeIDHash:
// dcommontype, typehash
return pkg.Name
@@ -331,7 +346,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
if t == AnyType || t == ByteType || t == RuneType {
// in %-T mode collapse predeclared aliases with their originals.
switch mode {
- case fmtTypeIDName, fmtTypeID:
+ case fmtTypeIDName, fmtTypeIDHash, fmtTypeID:
t = Types[t.Kind()]
default:
sconv2(b, t.Sym(), 'S', mode)
@@ -422,7 +437,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
case TPTR:
b.WriteByte('*')
switch mode {
- case fmtTypeID, fmtTypeIDName:
+ case fmtTypeID, fmtTypeIDName, fmtTypeIDHash:
if verb == 'S' {
tconv2(b, t.Elem(), 'S', mode, visited)
return
@@ -484,7 +499,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
case IsExported(f.Sym.Name):
sconv2(b, f.Sym, 'S', mode)
default:
- if mode != fmtTypeIDName {
+ if mode != fmtTypeIDName && mode != fmtTypeIDHash {
mode = fmtTypeID
}
sconv2(b, f.Sym, 'v', mode)
@@ -554,7 +569,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
b.WriteByte(byte(open))
fieldVerb := 'v'
switch mode {
- case fmtTypeID, fmtTypeIDName, fmtGo:
+ case fmtTypeID, fmtTypeIDName, fmtTypeIDHash, fmtGo:
// no argument names on function signature, and no "noescape"/"nosplit" tags
fieldVerb = 'S'
}
@@ -657,7 +672,7 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
// Compute tsym, the symbol that would normally be used as
// the field name when embedding f.Type.
- // TODO(mdempsky): Check for other occurences of this logic
+ // TODO(mdempsky): Check for other occurrences of this logic
// and deduplicate.
typ := f.Type
if typ.IsPtr() {
@@ -688,7 +703,7 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
if name == ".F" {
name = "F" // Hack for toolstash -cmp.
}
- if !IsExported(name) && mode != fmtTypeIDName {
+ if !IsExported(name) && mode != fmtTypeIDName && mode != fmtTypeIDHash {
name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg)
}
} else {
@@ -756,7 +771,7 @@ func FmtConst(v constant.Value, sharp bool) string {
// TypeHash computes a hash value for type t to use in type switch statements.
func TypeHash(t *Type) uint32 {
- p := t.NameString()
+ p := tconv(t, 0, fmtTypeIDHash)
// Using MD5 is overkill, but reduces accidental collisions.
h := md5.Sum([]byte(p))
diff --git a/src/cmd/compile/internal/types/universe.go b/src/cmd/compile/internal/types/universe.go
index 55ed7bd6d0..4dff4548da 100644
--- a/src/cmd/compile/internal/types/universe.go
+++ b/src/cmd/compile/internal/types/universe.go
@@ -115,10 +115,6 @@ func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) {
AnyType.SetUnderlying(NewInterface(BuiltinPkg, []*Field{}, false))
ResumeCheckSize()
- if base.Flag.G == 0 {
- ComparableType.Sym().Def = nil
- }
-
Types[TUNSAFEPTR] = defBasic(TUNSAFEPTR, UnsafePkg, "Pointer")
Types[TBLANK] = newType(TBLANK)
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index ee4f275bc0..6230c58401 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -421,9 +421,15 @@ func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Packa
}
// AssertableTo reports whether a value of type V can be asserted to have type T.
+// The behavior of AssertableTo is undefined if V is a generalized interface; i.e.,
+// an interface that may only be used as a type constraint in Go code.
func AssertableTo(V *Interface, T Type) bool {
- m, _ := (*Checker)(nil).assertableTo(V, T)
- return m == nil
+ // Checker.newAssertableTo suppresses errors for invalid types, so we need special
+ // handling here.
+ if T.Underlying() == Typ[Invalid] {
+ return false
+ }
+ return (*Checker)(nil).newAssertableTo(V, T) == nil
}
// AssignableTo reports whether a value of type V is assignable to a variable of type T.
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 094374f7f1..5c38c59c80 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -474,52 +474,54 @@ func TestInstanceInfo(t *testing.T) {
// `func(float64)`,
// },
- {`package s1; func f[T any, P interface{~*T}](x T) {}; func _(x string) { f(x) }`,
+ {`package s1; func f[T any, P interface{*T}](x T) {}; func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {`package s2; func f[T any, P interface{~*T}](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s2; func f[T any, P interface{*T}](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {`package s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s3; type C[T any] interface{chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {`package s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {`package t1; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = f[string] }`,
+ {`package t1; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {`package t2; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
+ {`package t2; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {`package t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ {`package t3; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
},
- {`package t4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ {`package t4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = (f[int]) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
},
- {`package i0; import lib "generic_lib"; func _() { lib.F(42) }`,
+
+ {`package i0; import "lib"; func _() { lib.F(42) }`,
`F`,
[]string{`int`},
`func(int)`,
},
+
{`package type0; type T[P interface{~int}] struct{ x P }; var _ T[int]`,
`T`,
[]string{`int`},
@@ -540,7 +542,7 @@ func TestInstanceInfo(t *testing.T) {
[]string{`[]int`, `int`},
`struct{x []int; y int}`,
},
- {`package type4; import lib "generic_lib"; var _ lib.T[int]`,
+ {`package type4; import "lib"; var _ lib.T[int]`,
`T`,
[]string{`int`},
`[]int`,
@@ -548,7 +550,7 @@ func TestInstanceInfo(t *testing.T) {
}
for _, test := range tests {
- const lib = `package generic_lib
+ const lib = `package lib
func F[P any](P) {}
@@ -1697,7 +1699,7 @@ func F(){
var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F
var a []int
- for i, x := range /*i=undef*/ /*x=var:16*/ a /*i=var:20*/ /*x=var:20*/ { _ = i; _ = x }
+ for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
var i interface{}
switch y := i.(type) { /*y=undef*/
@@ -2313,27 +2315,27 @@ type Bad Bad // invalid type
conf := Config{Error: func(error) {}}
pkg, _ := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
- scope := pkg.Scope()
+ lookup := func(tname string) Type { return pkg.Scope().Lookup(tname).Type() }
var (
- EmptyIface = scope.Lookup("EmptyIface").Type().Underlying().(*Interface)
- I = scope.Lookup("I").Type().(*Named)
+ EmptyIface = lookup("EmptyIface").Underlying().(*Interface)
+ I = lookup("I").(*Named)
II = I.Underlying().(*Interface)
- C = scope.Lookup("C").Type().(*Named)
+ C = lookup("C").(*Named)
CI = C.Underlying().(*Interface)
- Integer = scope.Lookup("Integer").Type().Underlying().(*Interface)
- EmptyTypeSet = scope.Lookup("EmptyTypeSet").Type().Underlying().(*Interface)
- N1 = scope.Lookup("N1").Type()
+ Integer = lookup("Integer").Underlying().(*Interface)
+ EmptyTypeSet = lookup("EmptyTypeSet").Underlying().(*Interface)
+ N1 = lookup("N1")
N1p = NewPointer(N1)
- N2 = scope.Lookup("N2").Type()
+ N2 = lookup("N2")
N2p = NewPointer(N2)
- N3 = scope.Lookup("N3").Type()
- N4 = scope.Lookup("N4").Type()
- Bad = scope.Lookup("Bad").Type()
+ N3 = lookup("N3")
+ N4 = lookup("N4")
+ Bad = lookup("Bad")
)
tests := []struct {
- t Type
- i *Interface
+ V Type
+ T *Interface
want bool
}{
{I, II, true},
@@ -2364,8 +2366,20 @@ type Bad Bad // invalid type
}
for _, test := range tests {
- if got := Implements(test.t, test.i); got != test.want {
- t.Errorf("Implements(%s, %s) = %t, want %t", test.t, test.i, got, test.want)
+ if got := Implements(test.V, test.T); got != test.want {
+ t.Errorf("Implements(%s, %s) = %t, want %t", test.V, test.T, got, test.want)
+ }
+
+ // The type assertion x.(T) is valid if T is an interface or if T implements the type of x.
+ // The assertion is never valid if T is a bad type.
+ V := test.T
+ T := test.V
+ want := false
+ if _, ok := T.Underlying().(*Interface); (ok || Implements(T, V)) && T != Bad {
+ want = true
+ }
+ if got := AssertableTo(V, T); got != want {
+ t.Errorf("AssertableTo(%s, %s) = %t, want %t", V, T, got, want)
}
}
}
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index 936930f0b1..d88b03748f 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -294,15 +294,14 @@ func (check *Checker) typesSummary(list []Type, variadic bool) string {
return "(" + strings.Join(res, ", ") + ")"
}
-func (check *Checker) assignError(rhs []syntax.Expr, nvars, nvals int) {
- measure := func(x int, unit string) string {
- s := fmt.Sprintf("%d %s", x, unit)
- if x != 1 {
- s += "s"
- }
- return s
+func measure(x int, unit string) string {
+ if x != 1 {
+ unit += "s"
}
+ return fmt.Sprintf("%d %s", x, unit)
+}
+func (check *Checker) assignError(rhs []syntax.Expr, nvars, nvals int) {
vars := measure(nvars, "variable")
vals := measure(nvals, "value")
rhs0 := rhs[0]
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index f9db07fdea..428897c628 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -82,10 +82,24 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// of S and the respective parameter passing rules apply."
S := x.typ
var T Type
- if s, _ := structuralType(S).(*Slice); s != nil {
+ if s, _ := coreType(S).(*Slice); s != nil {
T = s.elem
} else {
- check.errorf(x, invalidArg+"%s is not a slice", x)
+ var cause string
+ switch {
+ case x.isNil():
+ cause = "have untyped nil"
+ case isTypeParam(S):
+ if u := coreType(S); u != nil {
+ cause = check.sprintf("%s has core type %s", x, u)
+ } else {
+ cause = check.sprintf("%s has no core type", x)
+ }
+ default:
+ cause = check.sprintf("have %s", x)
+ }
+ // don't use invalidArg prefix here as it would repeat "argument" in the error message
+ check.errorf(x, "first argument to append must be a slice; %s", cause)
return
}
@@ -101,7 +115,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if x.mode == invalid {
return
}
- if t := structuralString(x.typ); t != nil && isString(t) {
+ if t := coreString(x.typ); t != nil && isString(t) {
if check.Types != nil {
sig := makeSig(S, S, x.typ)
sig.variadic = true
@@ -331,14 +345,14 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Copy:
// copy(x, y []T) int
- dst, _ := structuralType(x.typ).(*Slice)
+ dst, _ := coreType(x.typ).(*Slice)
var y operand
arg(&y, 1)
if y.mode == invalid {
return
}
- src0 := structuralString(y.typ)
+ src0 := coreString(y.typ)
if src0 != nil && isString(src0) {
src0 = NewSlice(universeByte)
}
@@ -472,13 +486,13 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
}
var min int // minimum number of arguments
- switch structuralType(T).(type) {
+ switch coreType(T).(type) {
case *Slice:
min = 2
case *Map, *Chan:
min = 1
case nil:
- check.errorf(arg0, invalidArg+"cannot make %s: no structural type", arg0)
+ check.errorf(arg0, invalidArg+"cannot make %s: no core type", arg0)
return
default:
check.errorf(arg0, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0)
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 15a42ca3dc..d12ee49adb 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -168,7 +168,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
cgocall := x.mode == cgofunc
// a type parameter may be "called" if all types have the same signature
- sig, _ := structuralType(x.typ).(*Signature)
+ sig, _ := coreType(x.typ).(*Signature)
if sig == nil {
check.errorf(x, invalidOp+"cannot call non-function %s", x)
x.mode = invalid
@@ -525,7 +525,11 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
}
check.exprOrType(x, e.X, false)
- if x.mode == invalid {
+ switch x.mode {
+ case builtin:
+ check.errorf(e.Pos(), "cannot select on %s", x)
+ goto Error
+ case invalid:
goto Error
}
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index bfed16993b..4ec6a7b4fd 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -18,19 +18,6 @@ var nopos syntax.Pos
// debugging/development support
const debug = false // leave on during development
-// If forceStrict is set, the type-checker enforces additional
-// rules not specified by the Go 1 spec, but which will
-// catch guaranteed run-time errors if the respective
-// code is executed. In other words, programs passing in
-// strict mode are Go 1 compliant, but not all Go 1 programs
-// will pass in strict mode. The additional rules are:
-//
-// - A type assertion x.(T) where T is an interface type
-// is invalid if any (statically known) method that exists
-// for both x and T have different signatures.
-//
-const forceStrict = false
-
// exprInfo stores information about an untyped expression.
type exprInfo struct {
isLhs bool // expression is lhs operand of a shift with delayed type-check
@@ -139,7 +126,7 @@ type Checker struct {
untyped map[syntax.Expr]exprInfo // map of expressions without final type
delayed []action // stack of delayed action segments; segments are processed in FIFO order
objPath []Object // path of object dependencies during type inference (for cycle reporting)
- defTypes []*Named // defined types created during type checking, for final validation.
+ cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking
// environment within which the current object is type-checked (valid only
// for the duration of type-checking a specific object)
@@ -218,6 +205,16 @@ func (check *Checker) pop() Object {
return obj
}
+type cleaner interface {
+ cleanup()
+}
+
+// needsCleanup records objects/types that implement the cleanup method
+// which will be called at the end of type-checking.
+func (check *Checker) needsCleanup(c cleaner) {
+ check.cleaners = append(check.cleaners, c)
+}
+
// NewChecker returns a new Checker instance for a given package.
// Package files may be added incrementally via checker.Files.
func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
@@ -260,6 +257,8 @@ func (check *Checker) initFiles(files []*syntax.File) {
check.methods = nil
check.untyped = nil
check.delayed = nil
+ check.objPath = nil
+ check.cleaners = nil
// determine package name and collect valid files
pkg := check.pkg
@@ -328,8 +327,8 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) {
print("== processDelayed ==")
check.processDelayed(0) // incl. all functions
- print("== expandDefTypes ==")
- check.expandDefTypes()
+ print("== cleanup ==")
+ check.cleanup()
print("== initOrder ==")
check.initOrder()
@@ -357,7 +356,6 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) {
check.recvTParamMap = nil
check.brokenAliases = nil
check.unionTypeSets = nil
- check.defTypes = nil
check.ctxt = nil
// TODO(gri) There's more memory we should release at this point.
@@ -385,27 +383,13 @@ func (check *Checker) processDelayed(top int) {
check.delayed = check.delayed[:top]
}
-func (check *Checker) expandDefTypes() {
- // Ensure that every defined type created in the course of type-checking has
- // either non-*Named underlying, or is unresolved.
- //
- // This guarantees that we don't leak any types whose underlying is *Named,
- // because any unresolved instances will lazily compute their underlying by
- // substituting in the underlying of their origin. The origin must have
- // either been imported or type-checked and expanded here, and in either case
- // its underlying will be fully expanded.
- for i := 0; i < len(check.defTypes); i++ {
- n := check.defTypes[i]
- switch n.underlying.(type) {
- case nil:
- if n.resolver == nil {
- panic("nil underlying")
- }
- case *Named:
- n.under() // n.under may add entries to check.defTypes
- }
- n.check = nil
+// cleanup runs cleanup for all collected cleaners.
+func (check *Checker) cleanup() {
+ // Don't use a range clause since Named.cleanup may add more cleaners.
+ for i := 0; i < len(check.cleaners); i++ {
+ check.cleaners[i].cleanup()
}
+ check.cleaners = nil
}
func (check *Checker) record(x *operand) {
diff --git a/src/cmd/compile/internal/types2/compilersupport.go b/src/cmd/compile/internal/types2/compilersupport.go
index b35e752b8f..33dd8e8baa 100644
--- a/src/cmd/compile/internal/types2/compilersupport.go
+++ b/src/cmd/compile/internal/types2/compilersupport.go
@@ -19,12 +19,12 @@ func AsSignature(t Type) *Signature {
return u
}
-// If typ is a type parameter, structuralType returns the single underlying
+// If typ is a type parameter, CoreType returns the single underlying
// type of all types in the corresponding type constraint if it exists, or
// nil otherwise. If the type set contains only unrestricted and restricted
// channel types (with identical element types), the single underlying type
// is the restricted channel type if the restrictions are always the same.
-// If typ is not a type parameter, structuralType returns the underlying type.
-func StructuralType(t Type) Type {
- return structuralType(t)
+// If typ is not a type parameter, CoreType returns the underlying type.
+func CoreType(t Type) Type {
+ return coreType(t)
}
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index 7fe1d5056b..08b3cbff29 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -49,11 +49,14 @@ func (check *Checker) conversion(x *operand, T Type) {
// have specific types, constant x cannot be
// converted.
ok = T.(*TypeParam).underIs(func(u Type) bool {
- // t is nil if there are no specific type terms
+ // u is nil if there are no specific type terms
if u == nil {
cause = check.sprintf("%s does not contain specific types", T)
return false
}
+ if isString(x.typ) && isBytesOrRunes(u) {
+ return true
+ }
if !constConvertibleTo(u, nil) {
cause = check.sprintf("cannot convert %s to %s (in %s)", x, u, T)
return false
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 0e8f5085ba..579fa55e59 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -569,7 +569,6 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Fiel
// Keep track of bounds for later validation.
var bound Type
- var bounds []Type
for i, f := range list {
// Optimization: Re-use the previous type bound if it hasn't changed.
// This also preserves the grouped output of type parameter lists
@@ -584,7 +583,6 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Fiel
check.error(f.Type, "cannot use a type parameter as constraint")
bound = Typ[Invalid]
}
- bounds = append(bounds, bound)
}
tparams[i].bound = bound
}
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
index 77ae75a0a2..422f520795 100644
--- a/src/cmd/compile/internal/types2/errors.go
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -124,6 +124,17 @@ func sprintf(qf Qualifier, debug bool, format string, args ...interface{}) strin
}
buf.WriteByte(']')
arg = buf.String()
+ case []*TypeParam:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(typeString(x, qf, debug)) // use typeString so we get subscripts when debugging
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
}
args[i] = arg
}
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 4fdabe754e..c587c40f80 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -182,9 +182,9 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) {
return
case syntax.Recv:
- u := structuralType(x.typ)
+ u := coreType(x.typ)
if u == nil {
- check.errorf(x, invalidOp+"cannot receive from %s: no structural type", x)
+ check.errorf(x, invalidOp+"cannot receive from %s: no core type", x)
x.mode = invalid
return
}
@@ -899,7 +899,7 @@ func (check *Checker) incomparableCause(typ Type) string {
}
// see if we can extract a more specific error
var cause string
- comparable(typ, nil, func(format string, args ...interface{}) {
+ comparable(typ, true, nil, func(format string, args ...interface{}) {
cause = check.sprintf(format, args...)
})
return cause
@@ -1359,7 +1359,11 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
case hint != nil:
// no composite literal type present - use hint (element type of enclosing type)
typ = hint
- base, _ = deref(structuralType(typ)) // *T implies &T{}
+ base, _ = deref(coreType(typ)) // *T implies &T{}
+ if base == nil {
+ check.errorf(e, "invalid composite literal element type %s: no core type", typ)
+ goto Error
+ }
default:
// TODO(gri) provide better error messages depending on context
@@ -1367,7 +1371,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
goto Error
}
- switch utyp := structuralType(base).(type) {
+ switch utyp := coreType(base).(type) {
case *Struct:
// Prevent crash if the struct referred to is not yet set up.
// See analogous comment for *Array.
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index 4995d2d730..61009c121e 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -182,7 +182,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
}
if !valid {
- check.errorf(x, invalidOp+"cannot index %s", x)
+ check.errorf(e.Pos(), invalidOp+"cannot index %s", x)
x.mode = invalid
return false
}
@@ -213,9 +213,9 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
valid := false
length := int64(-1) // valid if >= 0
- switch u := structuralString(x.typ).(type) {
+ switch u := coreString(x.typ).(type) {
case nil:
- check.errorf(x, invalidOp+"cannot slice %s: %s has no structural type", x, x.typ)
+ check.errorf(x, invalidOp+"cannot slice %s: %s has no core type", x, x.typ)
x.mode = invalid
return
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 51b26eb2aa..29633028f3 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -41,6 +41,13 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
}()
}
+ if traceInference {
+ check.dump("-- inferA %s%s ➞ %s", tparams, params, targs)
+ defer func() {
+ check.dump("=> inferA %s ➞ %s", tparams, result)
+ }()
+ }
+
// There must be at least one type parameter, and no more type arguments than type parameters.
n := len(tparams)
assert(n > 0 && len(targs) <= n)
@@ -54,6 +61,64 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
}
// len(targs) < n
+ const enableTparamRenaming = true
+ if enableTparamRenaming {
+ // For the purpose of type inference we must differentiate type parameters
+ // occurring in explicit type or value function arguments from the type
+ // parameters we are solving for via unification, because they may be the
+ // same in self-recursive calls. For example:
+ //
+ // func f[P *Q, Q any](p P, q Q) {
+ // f(p)
+ // }
+ //
+ // In this example, the fact that the P used in the instantation f[P] has
+ // the same pointer identity as the P we are trying to solve for via
+ // unification is coincidental: there is nothing special about recursive
+ // calls that should cause them to conflate the identity of type arguments
+ // with type parameters. To put it another way: any such self-recursive
+ // call is equivalent to a mutually recursive call, which does not run into
+ // any problems of type parameter identity. For example, the following code
+ // is equivalent to the code above.
+ //
+ // func f[P interface{*Q}, Q any](p P, q Q) {
+ // f2(p)
+ // }
+ //
+ // func f2[P interface{*Q}, Q any](p P, q Q) {
+ // f(p)
+ // }
+ //
+ // We can turn the first example into the second example by renaming type
+ // parameters in the original signature to give them a new identity. As an
+ // optimization, we do this only for self-recursive calls.
+
+ // We can detect if we are in a self-recursive call by comparing the
+ // identity of the first type parameter in the current function with the
+ // first type parameter in tparams. This works because type parameters are
+ // unique to their type parameter list.
+ selfRecursive := check.sig != nil && check.sig.tparams.Len() > 0 && tparams[0] == check.sig.tparams.At(0)
+
+ if selfRecursive {
+ // In self-recursive inference, rename the type parameters with new type
+ // parameters that are the same but for their pointer identity.
+ tparams2 := make([]*TypeParam, len(tparams))
+ for i, tparam := range tparams {
+ tname := NewTypeName(tparam.Obj().Pos(), tparam.Obj().Pkg(), tparam.Obj().Name(), nil)
+ tparams2[i] = NewTypeParam(tname, nil)
+ tparams2[i].index = tparam.index // == i
+ }
+
+ renameMap := makeRenameMap(tparams, tparams2)
+ for i, tparam := range tparams {
+ tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil)
+ }
+
+ tparams = tparams2
+ params = check.subst(pos, params, renameMap, nil).(*Tuple)
+ }
+ }
+
// If we have more than 2 arguments, we may have arguments with named and unnamed types.
// If that is the case, permutate params and args such that the arguments with named
// types are first in the list. This doesn't affect type inference if all types are taken
@@ -403,6 +468,13 @@ func (w *tpWalker) isParameterizedTypeList(list []Type) bool {
func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type) (types []Type, index int) {
assert(len(tparams) >= len(targs) && len(targs) > 0)
+ if traceInference {
+ check.dump("-- inferB %s ➞ %s", tparams, targs)
+ defer func() {
+ check.dump("=> inferB %s ➞ %s", tparams, types)
+ }()
+ }
+
// Setup bidirectional unification between constraints
// and the corresponding type arguments (which may be nil!).
u := newUnifier(false)
@@ -416,27 +488,88 @@ func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type)
}
}
- // If a constraint has a structural type, unify the corresponding type parameter with it.
- for _, tpar := range tparams {
- sbound := structuralType(tpar)
- if sbound != nil {
- // If the structural type is the underlying type of a single
- // defined type in the constraint, use that defined type instead.
- if named, _ := tpar.singleType().(*Named); named != nil {
- sbound = named
- }
- if !u.unify(tpar, sbound) {
- // TODO(gri) improve error message by providing the type arguments
- // which we know already
- check.errorf(pos, "%s does not match %s", tpar, sbound)
- return nil, 0
+ // Repeatedly apply constraint type inference as long as
+ // there are still unknown type arguments and progress is
+ // being made.
+ //
+ // This is an O(n^2) algorithm where n is the number of
+ // type parameters: if there is progress (and iteration
+ // continues), at least one type argument is inferred
+ // per iteration and we have a doubly nested loop.
+ // In practice this is not a problem because the number
+ // of type parameters tends to be very small (< 5 or so).
+ // (It should be possible for unification to efficiently
+ // signal newly inferred type arguments; then the loops
+ // here could handle the respective type parameters only,
+ // but that will come at a cost of extra complexity which
+ // may not be worth it.)
+ for n := u.x.unknowns(); n > 0; {
+ nn := n
+
+ for i, tpar := range tparams {
+ // If there is a core term (i.e., a core type with tilde information)
+ // unify the type parameter with the core type.
+ if core, single := coreTerm(tpar); core != nil {
+ // A type parameter can be unified with its core type in two cases.
+ tx := u.x.at(i)
+ switch {
+ case tx != nil:
+ // The corresponding type argument tx is known.
+ // In this case, if the core type has a tilde, the type argument's underlying
+ // type must match the core type, otherwise the type argument and the core type
+ // must match.
+ // If tx is an external type parameter, don't consider its underlying type
+ // (which is an interface). Core type unification will attempt to unify against
+ // core.typ.
+ // Note also that even with inexact unification we cannot leave away the under
+ // call here because it's possible that both tx and core.typ are named types,
+ // with under(tx) being a (named) basic type matching core.typ. Such cases do
+ // not match with inexact unification.
+ if core.tilde && !isTypeParam(tx) {
+ tx = under(tx)
+ }
+ if !u.unify(tx, core.typ) {
+ // TODO(gri) improve error message by providing the type arguments
+ // which we know already
+ // Don't use term.String() as it always qualifies types, even if they
+ // are in the current package.
+ tilde := ""
+ if core.tilde {
+ tilde = "~"
+ }
+ check.errorf(pos, "%s does not match %s%s", tpar, tilde, core.typ)
+ return nil, 0
+ }
+
+ case single && !core.tilde:
+ // The corresponding type argument tx is unknown and there's a single
+ // specific type and no tilde.
+ // In this case the type argument must be that single type; set it.
+ u.x.set(i, core.typ)
+
+ default:
+ // Unification is not possible and no progress was made.
+ continue
+ }
+
+ // The number of known type arguments may have changed.
+ nn = u.x.unknowns()
+ if nn == 0 {
+ break // all type arguments are known
+ }
}
}
+
+ assert(nn <= n)
+ if nn == n {
+ break // no progress
+ }
+ n = nn
}
// u.x.types() now contains the incoming type arguments plus any additional type
- // arguments which were inferred from structural types. The newly inferred non-
- // nil entries may still contain references to other type parameters.
+ // arguments which were inferred from core terms. The newly inferred non-nil
+ // entries may still contain references to other type parameters.
// For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int
// was given, unification produced the type list [int, []C, *A]. We eliminate the
// remaining type parameters by substituting the type parameters in this type list
@@ -504,8 +637,8 @@ func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type)
}
// Once nothing changes anymore, we may still have type parameters left;
- // e.g., a structural constraint *P may match a type parameter Q but we
- // don't have any type arguments to fill in for *P or Q (issue #45548).
+ // e.g., a constraint with core type *P may match a type parameter Q but
+ // we don't have any type arguments to fill in for *P or Q (issue #45548).
// Don't let such inferences escape, instead nil them out.
for i, typ := range types {
if typ != nil && isParameterized(tparams, typ) {
@@ -525,6 +658,42 @@ func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type)
return
}
+// If the type parameter has a single specific type S, coreTerm returns (S, true).
+// Otherwise, if tpar has a core type T, it returns a term corresponding to that
+// core type and false. In that case, if any term of tpar has a tilde, the core
+// term has a tilde. In all other cases coreTerm returns (nil, false).
+func coreTerm(tpar *TypeParam) (*term, bool) {
+ n := 0
+ var single *term // valid if n == 1
+ var tilde bool
+ tpar.is(func(t *term) bool {
+ if t == nil {
+ assert(n == 0)
+ return false // no terms
+ }
+ n++
+ single = t
+ if t.tilde {
+ tilde = true
+ }
+ return true
+ })
+ if n == 1 {
+ if debug {
+ assert(debug && under(single.typ) == coreType(tpar))
+ }
+ return single, true
+ }
+ if typ := coreType(tpar); typ != nil {
+ // A core type is always an underlying type.
+ // If any term of tpar has a tilde, we don't
+ // have a precise core type and we must return
+ // a tilde as well.
+ return &term{tilde, typ}, false
+ }
+ return nil, false
+}
+
type cycleFinder struct {
tparams []*TypeParam
types []Type
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index f54938b6e1..c2653a3834 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -204,7 +204,7 @@ func (check *Checker) implements(V, T Type) error {
// If T is comparable, V must be comparable.
// Remember as a pending error and report only if we don't have a more specific error.
var pending error
- if Ti.IsComparable() && ((Vi != nil && !Vi.IsComparable()) || (Vi == nil && !Comparable(V))) {
+ if Ti.IsComparable() && !comparable(V, false, nil, nil) {
pending = errorf("%s does not implement comparable", V)
}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
index ca5140d092..75597abaf9 100644
--- a/src/cmd/compile/internal/types2/interface.go
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -37,7 +37,7 @@ func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
}
// set method receivers if necessary
- typ := new(Interface)
+ typ := (*Checker)(nil).newInterface()
for _, m := range methods {
if sig := m.typ.(*Signature); sig.recv == nil {
sig.recv = NewVar(m.pos, m.pkg, "", typ)
@@ -54,6 +54,15 @@ func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
return typ
}
+// check may be nil
+func (check *Checker) newInterface() *Interface {
+ typ := &Interface{check: check}
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
// MarkImplicit marks the interface t as implicit, meaning this interface
// corresponds to a constraint literal such as ~T or A|B without explicit
// interface embedding. MarkImplicit should be called before any concurrent use
@@ -100,6 +109,11 @@ func (t *Interface) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
+func (t *Interface) cleanup() {
+ t.check = nil
+ t.embedPos = nil
+}
+
func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
addEmbedded := func(pos syntax.Pos, typ Type) {
ityp.embeddeds = append(ityp.embeddeds, typ)
@@ -162,16 +176,10 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
// (don't sort embeddeds: they must correspond to *embedPos entries)
sortMethods(ityp.methods)
- // Compute type set with a non-nil *Checker as soon as possible
- // to report any errors. Subsequent uses of type sets will use
- // this computed type set and won't need to pass in a *Checker.
- //
- // Pin the checker to the interface type in the interim, in case the type set
- // must be used before delayed funcs are processed (see issue #48234).
- // TODO(rfindley): clean up use of *Checker with computeInterfaceTypeSet
- ityp.check = check
+ // Compute type set as soon as possible to report any errors.
+ // Subsequent uses of type sets will use this computed type
+ // set and won't need to pass in a *Checker.
check.later(func() {
computeInterfaceTypeSet(check, iface.Pos(), ityp)
- ityp.check = nil
}).describef(iface, "compute type set for %s", ityp)
}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index b8ddd94cd7..0a2d2a5790 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -66,12 +66,12 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
obj, index, indirect = lookupFieldOrMethod(T, addressable, pkg, name, false)
- // If we didn't find anything and if we have a type parameter with a structural constraint,
- // see if there is a matching field (but not a method, those need to be declared explicitly
- // in the constraint). If the structural constraint is a named pointer type (see above), we
- // are ok here because only fields are accepted as results.
+ // If we didn't find anything and if we have a type parameter with a core type,
+ // see if there is a matching field (but not a method, those need to be declared
+ // explicitly in the constraint). If the constraint is a named pointer type (see
+ // above), we are ok here because only fields are accepted as results.
if obj == nil && isTypeParam(T) {
- if t := structuralType(T); t != nil {
+ if t := coreType(T); t != nil {
obj, index, indirect = lookupFieldOrMethod(t, addressable, pkg, name, false)
if _, ok := obj.(*Var); !ok {
obj, index, indirect = nil, nil, false // accept fields (variables) only
@@ -425,18 +425,31 @@ func (check *Checker) funcString(f *Func) string {
// method required by V and whether it is missing or just has the wrong type.
// The receiver may be nil if assertableTo is invoked through an exported API call
// (such as AssertableTo), i.e., when all methods have been type-checked.
-// If the global constant forceStrict is set, assertions that are known to fail
-// are not permitted.
+// TODO(gri) replace calls to this function with calls to newAssertableTo.
func (check *Checker) assertableTo(V *Interface, T Type) (method, wrongType *Func) {
// no static check is required if T is an interface
// spec: "If T is an interface type, x.(T) asserts that the
// dynamic type of x implements the interface T."
- if IsInterface(T) && !forceStrict {
+ if IsInterface(T) {
return
}
+ // TODO(gri) fix this for generalized interfaces
return check.missingMethod(T, V, false)
}
+// newAssertableTo reports whether a value of type V can be asserted to have type T.
+// It also implements behavior for interfaces that currently are only permitted
+// in constraint position (we have not yet defined that behavior in the spec).
+func (check *Checker) newAssertableTo(V *Interface, T Type) error {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if IsInterface(T) {
+ return nil
+ }
+ return check.implements(T, V)
+}
+
// deref dereferences typ if it is a *Pointer and returns its base and true.
// Otherwise it returns (typ, false).
func deref(typ Type) (Type, bool) {
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index bb522e8fe3..5c6a1cf5d8 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -72,11 +72,31 @@ func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tpar
}
// Ensure that typ is always expanded and sanity-checked.
if check != nil {
- check.defTypes = append(check.defTypes, typ)
+ check.needsCleanup(typ)
}
return typ
}
+func (t *Named) cleanup() {
+ // Ensure that every defined type created in the course of type-checking has
+ // either non-*Named underlying, or is unresolved.
+ //
+ // This guarantees that we don't leak any types whose underlying is *Named,
+ // because any unresolved instances will lazily compute their underlying by
+ // substituting in the underlying of their origin. The origin must have
+ // either been imported or type-checked and expanded here, and in either case
+ // its underlying will be fully expanded.
+ switch t.underlying.(type) {
+ case nil:
+ if t.resolver == nil {
+ panic("nil underlying")
+ }
+ case *Named:
+ t.under() // t.under may add entries to check.cleaners
+ }
+ t.check = nil
+}
+
// Obj returns the type name for the declaration defining the named type t. For
// instantiated types, this is the type name of the base type.
func (t *Named) Obj() *TypeName { return t.orig.obj } // for non-instances this is the same as t.obj
@@ -360,11 +380,11 @@ func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypePara
// that it wasn't substituted. In this case we need to create a new
// *Interface before modifying receivers.
if iface == n.orig.underlying {
- iface = &Interface{
- embeddeds: iface.embeddeds,
- complete: iface.complete,
- implicit: iface.implicit, // should be false but be conservative
- }
+ old := iface
+ iface = check.newInterface()
+ iface.embeddeds = old.embeddeds
+ iface.complete = old.complete
+ iface.implicit = old.implicit // should be false but be conservative
underlying = iface
}
iface.methods = methods
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index 279d0775bd..ba259341f6 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -31,7 +31,7 @@ func isBasic(t Type, info BasicInfo) bool {
// The allX predicates below report whether t is an X.
// If t is a type parameter the result is true if isX is true
// for all specified types of the type parameter's type set.
-// allX is an optimized version of isX(structuralType(t)) (which
+// allX is an optimized version of isX(coreType(t)) (which
// is the same as underIs(t, isX)).
func allBoolean(t Type) bool { return allBasic(t, IsBoolean) }
@@ -45,7 +45,7 @@ func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) }
// allBasic reports whether under(t) is a basic type with the specified info.
// If t is a type parameter, the result is true if isBasic(t, info) is true
// for all specific types of the type parameter's type set.
-// allBasic(t, info) is an optimized version of isBasic(structuralType(t), info).
+// allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
func allBasic(t Type, info BasicInfo) bool {
if tpar, _ := t.(*TypeParam); tpar != nil {
return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
@@ -102,11 +102,12 @@ func isGeneric(t Type) bool {
// Comparable reports whether values of type T are comparable.
func Comparable(T Type) bool {
- return comparable(T, nil, nil)
+ return comparable(T, true, nil, nil)
}
+// If dynamic is set, non-type parameter interfaces are always comparable.
// If reportf != nil, it may be used to report why T is not comparable.
-func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})) bool {
+func comparable(T Type, dynamic bool, seen map[Type]bool, reportf func(string, ...interface{})) bool {
if seen[T] {
return true
}
@@ -124,7 +125,7 @@ func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})
return true
case *Struct:
for _, f := range t.fields {
- if !comparable(f.typ, seen, nil) {
+ if !comparable(f.typ, dynamic, seen, nil) {
if reportf != nil {
reportf("struct containing %s cannot be compared", f.typ)
}
@@ -133,7 +134,7 @@ func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})
}
return true
case *Array:
- if !comparable(t.elem, seen, nil) {
+ if !comparable(t.elem, dynamic, seen, nil) {
if reportf != nil {
reportf("%s cannot be compared", t)
}
@@ -141,7 +142,7 @@ func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})
}
return true
case *Interface:
- return !isTypeParam(T) || t.typeSet().IsComparable(seen)
+ return dynamic && !isTypeParam(T) || t.typeSet().IsComparable(seen)
}
return false
}
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index c87fab749c..c98024f924 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -116,11 +116,10 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
// lookup in the scope.
for i, p := range rparams {
if p.Value == "_" {
- tpar := sig.rparams.At(i)
if check.recvTParamMap == nil {
check.recvTParamMap = make(map[*syntax.Name]*TypeParam)
}
- check.recvTParamMap[p] = tpar
+ check.recvTParamMap[p] = tparams[i]
}
}
// determine receiver type to get its type parameters
@@ -136,22 +135,23 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
}
}
// provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if sig.RecvTypeParams().Len() == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, sig.RecvTypeParams().Len())
- for i, t := range sig.RecvTypeParams().list() {
- list[i] = t
- check.mono.recordCanon(t, recvTParams[i])
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tpar := range sig.RecvTypeParams().list() {
- bound := recvTParams[i].bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- tpar.bound = check.subst(tpar.obj.pos, bound, smap, nil)
+ if len(tparams) == len(recvTParams) {
+ smap := makeRenameMap(recvTParams, tparams)
+ for i, tpar := range tparams {
+ recvTPar := recvTParams[i]
+ check.mono.recordCanon(tpar, recvTPar)
+ // recvTPar.bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the current
+ // context.
+ tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil)
}
+ } else if len(tparams) < len(recvTParams) {
+ // Reporting an error here is a stop-gap measure to avoid crashes in the
+ // compiler when a type parameter/argument cannot be inferred later. It
+ // may lead to follow-on errors (see issues #51339, #51343).
+ // TODO(gri) find a better solution
+ got := measure(len(tparams), "type parameter")
+ check.errorf(recvPar, "got %s, but receiver base type declares %d", got, len(recvTParams))
}
}
}
@@ -194,66 +194,69 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
case 1:
recv = recvList[0]
}
+ sig.recv = recv
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
+ // Delay validation of receiver type as it may cause premature expansion
+ // of types the receiver type is dependent on (see issues #51232, #51233).
+ check.later(func() {
+ rtyp, _ := deref(recv.typ)
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if rtyp != Typ[Invalid] {
- var err string
- switch T := rtyp.(type) {
- case *Named:
- T.resolve(check.bestContext(nil))
- // The receiver type may be an instantiated type referred to
- // by an alias (which cannot have receiver parameters for now).
- if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
- check.errorf(recv.pos, "cannot define methods on instantiated type %s", recv.typ)
- break
- }
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if rtyp != Typ[Invalid] {
+ var err string
+ switch T := rtyp.(type) {
+ case *Named:
+ T.resolve(check.bestContext(nil))
+ // The receiver type may be an instantiated type referred to
+ // by an alias (which cannot have receiver parameters for now).
+ if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
+ check.errorf(recv.pos, "cannot define methods on instantiated type %s", recv.typ)
+ break
+ }
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ // The underlying type of a receiver base type can be a type parameter;
+ // e.g. for methods with a generic receiver T[P] with type T[P any] P.
+ // TODO(gri) Such declarations are currently disallowed.
+ // Revisit the need for underIs.
+ underIs(T, func(u Type) bool {
+ switch u := u.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ return false
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ return false
+ }
+ return true
+ })
+ }
+ case *Basic:
+ err = "basic or unnamed type"
if check.conf.CompilerErrorMessages {
check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
err = ""
}
- } else {
- // The underlying type of a receiver base type can be a type parameter;
- // e.g. for methods with a generic receiver T[P] with type T[P any] P.
- underIs(T, func(u Type) bool {
- switch u := u.(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- return false
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- return false
- }
- return true
- })
+ default:
+ check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
}
- case *Basic:
- err = "basic or unnamed type"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
+ if err != "" {
+ check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
}
- default:
- check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
}
- if err != "" {
- check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
+ }).describef(recv, "validate receiver %s", recv)
}
sig.params = NewTuple(params...)
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index 03da98af34..4c8eac725f 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -409,9 +409,9 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
if ch.mode == invalid || val.mode == invalid {
return
}
- u := structuralType(ch.typ)
+ u := coreType(ch.typ)
if u == nil {
- check.errorf(s, invalidOp+"cannot send to %s: no structural type", &ch)
+ check.errorf(s, invalidOp+"cannot send to %s: no core type", &ch)
return
}
uch, _ := u.(*Chan)
@@ -626,14 +626,15 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
case *syntax.ForStmt:
inner |= breakOk | continueOk
- check.openScope(s, "for")
- defer check.closeScope()
if rclause, _ := s.Init.(*syntax.RangeClause); rclause != nil {
check.rangeStmt(inner, s, rclause)
break
}
+ check.openScope(s, "for")
+ defer check.closeScope()
+
check.simpleStmt(s.Init)
if s.Cond != nil {
var x operand
@@ -809,8 +810,6 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu
}
func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) {
- // scope already opened
-
// determine lhs, if any
sKey := rclause.Lhs // possibly nil
var sValue, sExtra syntax.Expr
@@ -835,9 +834,9 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
// determine key/value types
var key, val Type
if x.mode != invalid {
- // Ranging over a type parameter is permitted if it has a structural type.
+ // Ranging over a type parameter is permitted if it has a core type.
var cause string
- u := structuralType(x.typ)
+ u := coreType(x.typ)
if t, _ := u.(*Chan); t != nil {
if sValue != nil {
check.softErrorf(sValue, "range over %s permits only one iteration variable", &x)
@@ -852,7 +851,7 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
// ok to continue
}
if u == nil {
- cause = check.sprintf("%s has no structural type", x.typ)
+ cause = check.sprintf("%s has no core type", x.typ)
}
}
key, val = rangeKeyVal(u)
@@ -866,6 +865,11 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
}
}
+ // Open the for-statement block scope now, after the range clause.
+ // Iteration variables declared with := need to go in this scope (was issue #51437).
+ check.openScope(s, "range")
+ defer check.closeScope()
+
// check assignment to/declaration of iteration variables
// (irregular assignment, cannot easily map to existing assignment checks)
@@ -874,9 +878,7 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
rhs := [2]Type{key, val} // key, val may be nil
if rclause.Def {
- // short variable declaration; variable scope starts after the range clause
- // (the for loop opens a new scope, so variables on the lhs never redeclare
- // previously declared variables)
+ // short variable declaration
var vars []*Var
for i, lhs := range lhs {
if lhs == nil {
@@ -913,12 +915,8 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
// declare variables
if len(vars) > 0 {
- scopePos := syntax.EndPos(rclause.X) // TODO(gri) should this just be s.Body.Pos (spec clarification)?
+ scopePos := s.Body.Pos()
for _, obj := range vars {
- // spec: "The scope of a constant or variable identifier declared inside
- // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl
- // for short variable declarations) and ends at the end of the innermost
- // containing block."
check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
}
} else {
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index f2e8fecc05..037f04797b 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -21,6 +21,17 @@ func makeSubstMap(tpars []*TypeParam, targs []Type) substMap {
return proj
}
+// makeRenameMap is like makeSubstMap, but creates a map used to rename type
+// parameters in from with the type parameters in to.
+func makeRenameMap(from, to []*TypeParam) substMap {
+ assert(len(from) == len(to))
+ proj := make(substMap, len(from))
+ for i, tpar := range from {
+ proj[tpar] = to[i]
+ }
+ return proj
+}
+
func (m substMap) empty() bool {
return len(m) == 0
}
@@ -149,7 +160,10 @@ func (subst *subster) typ(typ Type) Type {
methods, mcopied := subst.funcList(t.methods)
embeddeds, ecopied := subst.typeList(t.embeddeds)
if mcopied || ecopied {
- iface := &Interface{embeddeds: embeddeds, implicit: t.implicit, complete: t.complete}
+ iface := subst.check.newInterface()
+ iface.embeddeds = embeddeds
+ iface.implicit = t.implicit
+ iface.complete = t.complete
// If we've changed the interface type, we may need to replace its
// receiver if the receiver type is the original interface. Receivers of
// *Named type are replaced during named type expansion.
diff --git a/src/cmd/compile/internal/types2/termlist.go b/src/cmd/compile/internal/types2/termlist.go
index 844e39e3bf..a0108c4638 100644
--- a/src/cmd/compile/internal/types2/termlist.go
+++ b/src/cmd/compile/internal/types2/termlist.go
@@ -92,15 +92,6 @@ func (xl termlist) norm() termlist {
return rl
}
-// If the type set represented by xl is specified by a single (non-𝓤) term,
-// singleType returns that type. Otherwise it returns nil.
-func (xl termlist) singleType() Type {
- if nl := xl.norm(); len(nl) == 1 {
- return nl[0].typ // if nl.isAll() then typ is nil, which is ok
- }
- return nil
-}
-
// union returns the union xl ∪ yl.
func (xl termlist) union(yl termlist) termlist {
return append(xl, yl...).norm()
diff --git a/src/cmd/compile/internal/types2/termlist_test.go b/src/cmd/compile/internal/types2/termlist_test.go
index 1bdf9e1386..d1e3bdf88e 100644
--- a/src/cmd/compile/internal/types2/termlist_test.go
+++ b/src/cmd/compile/internal/types2/termlist_test.go
@@ -106,35 +106,6 @@ func TestTermlistNorm(t *testing.T) {
}
}
-func TestTermlistSingleType(t *testing.T) {
- // helper to deal with nil types
- tstring := func(typ Type) string {
- if typ == nil {
- return "nil"
- }
- return typ.String()
- }
-
- for test, want := range map[string]string{
- "∅": "nil",
- "𝓤": "nil",
- "int": "int",
- "myInt": "myInt",
- "~int": "int",
- "~int ∪ string": "nil",
- "~int ∪ myInt": "int",
- "∅ ∪ int": "int",
- "∅ ∪ ~int": "int",
- "∅ ∪ ~int ∪ string": "nil",
- } {
- xl := maketl(test)
- got := tstring(xl.singleType())
- if got != want {
- t.Errorf("(%v).singleType() == %v; want %v", test, got, want)
- }
- }
-}
-
func TestTermlistUnion(t *testing.T) {
for _, test := range []struct {
xl, yl, want string
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.go2 b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
index 48a39891bf..7c3f0c96ad 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
@@ -148,7 +148,7 @@ func _[
_ = make /* ERROR expects 2 or 3 arguments */ (S1)
_ = make(S1, 10, 20)
_ = make /* ERROR expects 2 or 3 arguments */ (S1, 10, 20, 30)
- _ = make(S2 /* ERROR cannot make S2: no structural type */ , 10)
+ _ = make(S2 /* ERROR cannot make S2: no core type */ , 10)
type M0 map[string]int
_ = make(map[string]int)
@@ -156,7 +156,7 @@ func _[
_ = make(M1)
_ = make(M1, 10)
_ = make/* ERROR expects 1 or 2 arguments */(M1, 10, 20)
- _ = make(M2 /* ERROR cannot make M2: no structural type */ )
+ _ = make(M2 /* ERROR cannot make M2: no core type */ )
type C0 chan int
_ = make(chan int)
@@ -164,7 +164,7 @@ func _[
_ = make(C1)
_ = make(C1, 10)
_ = make/* ERROR expects 1 or 2 arguments */(C1, 10, 20)
- _ = make(C2 /* ERROR cannot make C2: no structural type */ )
+ _ = make(C2 /* ERROR cannot make C2: no core type */ )
_ = make(C3)
}
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.src b/src/cmd/compile/internal/types2/testdata/check/builtins.src
index de27f5c632..358e9c5c0d 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.src
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.src
@@ -15,9 +15,9 @@ func append1() {
var x int
var s []byte
_ = append() // ERROR not enough arguments
- _ = append("foo" /* ERROR not a slice */ )
- _ = append(nil /* ERROR not a slice */ , s)
- _ = append(x /* ERROR not a slice */ , s)
+ _ = append("foo" /* ERROR must be a slice */ )
+ _ = append(nil /* ERROR must be a slice */ , s)
+ _ = append(x /* ERROR must be a slice */ , s)
_ = append(s)
_ = append(s, nil...)
append /* ERROR not used */ (s)
@@ -77,7 +77,7 @@ func append3() {
_ = append(f2())
_ = append(f3())
_ = append(f5())
- _ = append(ff /* ERROR not a slice */ ()) // TODO(gri) better error message
+ _ = append(ff /* ERROR must be a slice */ ()) // TODO(gri) better error message
}
func cap1() {
diff --git a/src/cmd/compile/internal/types2/testdata/check/funcinference.go2 b/src/cmd/compile/internal/types2/testdata/check/funcinference.go2
index 7160e18b19..45d0781cd7 100644
--- a/src/cmd/compile/internal/types2/testdata/check/funcinference.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/funcinference.go2
@@ -8,21 +8,21 @@ import "strconv"
type any interface{}
-func f0[A any, B interface{~*C}, C interface{~*D}, D interface{~*A}](A, B, C, D) {}
+func f0[A any, B interface{*C}, C interface{*D}, D interface{*A}](A, B, C, D) {}
func _() {
f := f0[string]
f("a", nil, nil, nil)
f0("a", nil, nil, nil)
}
-func f1[A any, B interface{~*A}](A, B) {}
+func f1[A any, B interface{*A}](A, B) {}
func _() {
f := f1[int]
f(int(0), new(int))
f1(int(0), new(int))
}
-func f2[A any, B interface{~[]A}](A, B) {}
+func f2[A any, B interface{[]A}](A, B) {}
func _() {
f := f2[byte]
f(byte(0), []byte{})
@@ -38,7 +38,7 @@ func _() {
// f3(x, &x, &x)
// }
-func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C) {}
+func f4[A any, B interface{[]C}, C interface{*A}](A, B, C) {}
func _() {
f := f4[int]
var x int
@@ -46,7 +46,7 @@ func _() {
f4(x, []*int{}, &x)
}
-func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A { panic(0) }
+func f5[A interface{struct{b B; c C}}, B any, C interface{*B}](x B) A { panic(0) }
func _() {
x := f5(1.2)
var _ float64 = x.b
@@ -79,14 +79,14 @@ var _ = Double(MySlice{1})
type Setter[B any] interface {
Set(string)
- ~*B
+ *B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
result := make([]T, len(s))
for i, v := range s {
// The type of &result[i] is *T which is in the type list
- // of Setter2, so we can convert it to PT.
+ // of Setter, so we can convert it to PT.
p := PT(&result[i])
// PT has a Set method.
p.Set(v)
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinference.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinference.go2
index 8876ccaa4e..3d3380da9c 100644
--- a/src/cmd/compile/internal/types2/testdata/check/typeinference.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinference.go2
@@ -14,7 +14,7 @@ func _() {
}
// recursive inference
-type Tr[A any, B ~*C, C ~*D, D ~*A] int
+type Tr[A any, B *C, C *D, D *A] int
func _() {
var x Tr[string]
var y Tr[string, ***string, **string, *string]
@@ -25,11 +25,11 @@ func _() {
}
// other patterns of inference
-type To0[A any, B ~[]A] int
-type To1[A any, B ~struct{a A}] int
-type To2[A any, B ~[][]A] int
-type To3[A any, B ~[3]*A] int
-type To4[A any, B any, C ~struct{a A; b B}] int
+type To0[A any, B []A] int
+type To1[A any, B struct{a A}] int
+type To2[A any, B [][]A] int
+type To3[A any, B [3]*A] int
+type To4[A any, B any, C struct{a A; b B}] int
func _() {
var _ To0[int]
var _ To1[int]
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2 b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
index ef58241519..68b1f0f5c5 100644
--- a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
@@ -134,11 +134,11 @@ func _[T interface{ ~string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR 3
type myByte1 []byte
type myByte2 []byte
func _[T interface{ []byte | myByte1 | myByte2 }] (x T, i, j, k int) { var _ T = x[i:j:k] }
-func _[T interface{ []byte | myByte1 | []int }] (x T, i, j, k int) { var _ T = x[ /* ERROR no structural type */ i:j:k] }
+func _[T interface{ []byte | myByte1 | []int }] (x T, i, j, k int) { var _ T = x[ /* ERROR no core type */ i:j:k] }
func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j] }
func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR 3-index slice of string */ ] }
-func _[T interface{ []byte | myByte1 | []int | string }] (x T, i, j, k int) { var _ T = x[ /* ERROR no structural type */ i:j] }
+func _[T interface{ []byte | myByte1 | []int | string }] (x T, i, j, k int) { var _ T = x[ /* ERROR no core type */ i:j] }
// len/cap built-ins
@@ -230,7 +230,7 @@ func _[
for _, _ = range s1 {}
var s2 S2
- for range s2 /* ERROR cannot range over s2.*no structural type */ {}
+ for range s2 /* ERROR cannot range over s2.*no core type */ {}
var a0 []int
for range a0 {}
@@ -243,7 +243,7 @@ func _[
for _, _ = range a1 {}
var a2 A2
- for range a2 /* ERROR cannot range over a2.*no structural type */ {}
+ for range a2 /* ERROR cannot range over a2.*no core type */ {}
var p0 *[10]int
for range p0 {}
@@ -256,7 +256,7 @@ func _[
for _, _ = range p1 {}
var p2 P2
- for range p2 /* ERROR cannot range over p2.*no structural type */ {}
+ for range p2 /* ERROR cannot range over p2.*no core type */ {}
var m0 map[string]int
for range m0 {}
@@ -269,7 +269,7 @@ func _[
for _, _ = range m1 {}
var m2 M2
- for range m2 /* ERROR cannot range over m2.*no structural type */ {}
+ for range m2 /* ERROR cannot range over m2.*no core type */ {}
}
// type inference checks
diff --git a/src/cmd/compile/internal/types2/testdata/examples/inference.go2 b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
index 0732f06a39..e3d6bfb212 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/inference.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
@@ -78,7 +78,7 @@ func _() {
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice) {}
+func related2[Elem any, Slice interface{[]Elem}](e Elem, s Slice) {}
func _() {
// related2 can be called with explicit instantiation.
@@ -109,16 +109,8 @@ func _() {
related3[int, []int]()
related3[byte, List[byte]]()
- // Alternatively, the 2nd type argument can be inferred
- // from the first one through constraint type inference.
- related3[int]()
-
- // The inferred type is the structural type of the Slice
- // type parameter.
- var _ []int = related3[int]()
-
- // It is not the defined parameterized type List.
- type anotherList []float32
- var _ anotherList = related3[float32]() // valid
- var _ anotherList = related3 /* ERROR cannot use .* \(value of type List\[float32\]\) as anotherList */ [float32, List[float32]]()
+ // The 2nd type argument cannot be inferred from the first
+ // one because there's two possible choices: []Elem and
+ // List[Elem].
+ related3[int]( /* ERROR cannot infer Slice */ )
}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/methods.go2 b/src/cmd/compile/internal/types2/testdata/examples/methods.go2
index 1d76d553dc..a46f789d60 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/methods.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/methods.go2
@@ -35,7 +35,7 @@ func (t T1[[ /* ERROR must be an identifier */ ]int]) m2() {}
// style. In m3 below, int is the name of the local receiver type parameter
// and it shadows the predeclared identifier int which then cannot be used
// anymore as expected.
-// This is no different from locally redelaring a predeclared identifier
+// This is no different from locally re-declaring a predeclared identifier
// and usually should be avoided. There are some notable exceptions; e.g.,
// sometimes it makes sense to use the identifier "copy" which happens to
// also be the name of a predeclared built-in function.
diff --git a/src/cmd/compile/internal/types2/testdata/examples/types.go2 b/src/cmd/compile/internal/types2/testdata/examples/types.go2
index 077fcfdbb7..ae9c0151d1 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/types.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/types.go2
@@ -292,7 +292,7 @@ func _[T interface{~int|~float64}]() {
// It is possible to create composite literals of type parameter
// type as long as it's possible to create a composite literal
-// of the structural type of the type parameter's constraint.
+// of the core type of the type parameter's constraint.
func _[P interface{ ~[]int }]() P {
return P{}
return P{1, 2, 3}
@@ -307,7 +307,7 @@ func _[P interface{ ~[]E }, E interface{ map[string]P } ]() P {
}
// This is a degenerate case with a singleton type set, but we can create
-// composite literals even if the structural type is a defined type.
+// composite literals even if the core type is a defined type.
type MyInts []int
func _[P MyInts]() P {
diff --git a/src/cmd/compile/internal/types2/testdata/examples/typesets.go2 b/src/cmd/compile/internal/types2/testdata/examples/typesets.go2
index e19dcf8da3..55ef02284b 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/typesets.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/typesets.go2
@@ -35,7 +35,7 @@ func _() int {
return deref(p)
}
-func addrOfCopy[V any, P ~*V](v V) P {
+func addrOfCopy[V any, P *V](v V) P {
return &v
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
index e4bcee51fe..2955c261f9 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
@@ -9,7 +9,7 @@ const L = 10
type (
_ [L]struct{}
_ [A /* ERROR undeclared name A for array length */ ]struct{}
- _ [B /* ERROR not an expression */ ]struct{}
+ _ [B /* ERROR invalid array length B */ ]struct{}
_[A any] struct{}
B int
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
index 46ac51ebdd..3c78f85aa4 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
@@ -12,7 +12,7 @@ type C4 interface{ chan int | chan<- int }
type C5[T any] interface{ ~chan T | <-chan T }
func _[T any](ch T) {
- <-ch // ERROR cannot receive from ch .* no structural type
+ <-ch // ERROR cannot receive from ch .* no core type
}
func _[T C0](ch T) {
@@ -28,7 +28,7 @@ func _[T C2](ch T) {
}
func _[T C3](ch T) {
- <-ch // ERROR cannot receive from ch .* no structural type
+ <-ch // ERROR cannot receive from ch .* no core type
}
func _[T C4](ch T) {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
index b8ba0ad4a7..01c9672745 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
@@ -4,7 +4,7 @@
package p
-func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
+func f[F interface{*Q}, G interface{*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
index 83a8f3a5da..5c1fa80b29 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
@@ -12,7 +12,7 @@ type C4 interface{ chan int | chan<- int }
type C5[T any] interface{ ~chan T | chan<- T }
func _[T any](ch T) {
- ch /* ERROR cannot send to ch .* no structural type */ <- 0
+ ch /* ERROR cannot send to ch .* no core type */ <- 0
}
func _[T C0](ch T) {
@@ -28,7 +28,7 @@ func _[T C2](ch T) {
}
func _[T C3](ch T) {
- ch /* ERROR cannot send to ch .* no structural type */ <- 0
+ ch /* ERROR cannot send to ch .* no core type */ <- 0
}
func _[T C4](ch T) {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2
index 3d4f1b4707..72eea1ef59 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2
@@ -2,24 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This issue is still open:
-// - the error messages could be better or are incorrect
-// - unification fails due to stack overflow that is caught
-
package p
func f[P any](a, _ P) {
var x int
// TODO(gri) these error messages, while correct, could be better
- f(a, x /* ERROR type int of x does not match P */)
+ f(a, x /* ERROR type int of x does not match inferred type P for P */)
f(x, a /* ERROR type P of a does not match inferred type int for P */)
}
func g[P any](a, b P) {
g(a, b)
- // TODO(gri) these error messages are incorrect because the code is valid
- g(&a, & /* ERROR type \*P of &b does not match inferred type \*P for P */ b)
- g([]P{}, [ /* ERROR type \[\]P of \[\]P{} does not match inferred type \[\]P for P */ ]P{})
+ g(&a, &b)
+ g([]P{}, []P{})
// work-around: provide type argument explicitly
g[*P](&a, &b)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2
index bea3dc14a0..0f60f47120 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This issue is still open:
-// - the error messages are unclear
-// - unification fails due to stack overflow that is caught
-
package p
func f[P *Q, Q any](P, Q) {
- // TODO(gri) these error messages are unclear
- _ = f[ /* ERROR P does not match \*Q */ P]
- _ = f[ /* ERROR cannot infer P */ *P]
+ _ = f[P]
+}
+
+func f2[P /* ERROR instantiation cycle */ *Q, Q any](P, Q) {
+ _ = f2[*P]
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go2
new file mode 100644
index 0000000000..f289d2e52d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go2
@@ -0,0 +1,25 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+// The following is OK, per the special handling for type literals discussed in issue #49482.
+type _[P *struct{}] struct{}
+type _[P *int,] int
+type _[P (*int),] int
+
+const P = 2 // declare P to avoid noisy 'undeclared name' errors below.
+
+// The following parse as invalid array types.
+type _[P *int /* ERROR "int \(type\) is not an expression" */ ] int
+type _[P /* ERROR non-function P */ (*int)] int
+
+// The following should be parsed as a generic type, but is instead parsed as an array type.
+type _[P *struct /* ERROR "not an expression" */ {}| int /* ERROR "not an expression" */ ] struct{}
+
+// The following fails to parse, due to the '~'
+type _[P *struct /* ERROR "not an expression" */ {}|~ /* ERROR "unexpected ~" */ int] struct{}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go2
new file mode 100644
index 0000000000..50870226e4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go2
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P1 any, P2 ~byte](s1 P1, s2 P2) {
+ _ = append(nil /* ERROR first argument to append must be a slice; have untyped nil */ , 0)
+ _ = append(s1 /* ERROR s1 .* has no core type */ , 0)
+ _ = append(s2 /* ERROR s2 .* has core type byte */ , 0)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2
index b6454ab003..50487fa2ff 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2
@@ -51,7 +51,7 @@ func f2[P interface{ Sfm; m() }](p P) {
var _ = f2[Sfm]
-// special case: structural type is a named pointer type
+// special case: core type is a named pointer type
type PSfm *Sfm
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go2
new file mode 100644
index 0000000000..afc7b2414c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go2
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The core type of M2 unifies with the type of m1
+// during function argument type inference.
+// M2's constraint is unnamed.
+func f1[K1 comparable, E1 any](m1 map[K1]E1) {}
+
+func f2[M2 map[string]int](m2 M2) {
+ f1(m2)
+}
+
+// The core type of M3 unifies with the type of m1
+// during function argument type inference.
+// M3's constraint is named.
+type Map3 map[string]int
+
+func f3[M3 Map3](m3 M3) {
+ f1(m3)
+}
+
+// The core type of M5 unifies with the core type of M4
+// during constraint type inference.
+func f4[M4 map[K4]int, K4 comparable](m4 M4) {}
+
+func f5[M5 map[K5]int, K5 comparable](m5 M5) {
+ f4(m5)
+}
+
+// test case from issue
+
+func Copy[MC ~map[KC]VC, KC comparable, VC any](dst, src MC) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
+
+func Merge[MM ~map[KM]VM, KM comparable, VM any](ms ...MM) MM {
+ result := MM{}
+ for _, m := range ms {
+ Copy(result, m)
+ }
+ return result
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go
new file mode 100644
index 0000000000..b84391df19
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "fmt"
+
+type (
+ _ [fmt /* ERROR invalid array length fmt */ ]int
+ _ [float64 /* ERROR invalid array length float64 */ ]int
+ _ [f /* ERROR invalid array length f */ ]int
+ _ [nil /* ERROR invalid array length nil */ ]int
+)
+
+func f()
+
+var _ fmt.Stringer // use fmt
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go2
new file mode 100644
index 0000000000..3edc505382
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go2
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Type checking the following code should not cause an infinite recursion.
+func f[M map[K]int, K comparable](m M) {
+ f(m)
+}
+
+// Equivalent code using mutual recursion.
+func f1[M map[K]int, K comparable](m M) {
+ f2(m)
+}
+func f2[M map[K]int, K comparable](m M) {
+ f1(m)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2
new file mode 100644
index 0000000000..ef873e6ea8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2
@@ -0,0 +1,164 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Constraint type inference should be independent of the
+// ordering of the type parameter declarations. Try all
+// permutations in the test case below.
+// Permutations produced by https://go.dev/play/p/PHcZNGJTEBZ.
+
+func f00[S1 ~[]E1, S2 ~[]E2, E1 ~byte, E2 ~byte](S1, S2) {}
+func f01[S2 ~[]E2, S1 ~[]E1, E1 ~byte, E2 ~byte](S1, S2) {}
+func f02[E1 ~byte, S1 ~[]E1, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f03[S1 ~[]E1, E1 ~byte, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f04[S2 ~[]E2, E1 ~byte, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f05[E1 ~byte, S2 ~[]E2, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f06[E2 ~byte, S2 ~[]E2, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f07[S2 ~[]E2, E2 ~byte, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f08[S1 ~[]E1, E2 ~byte, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f09[E2 ~byte, S1 ~[]E1, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f10[S2 ~[]E2, S1 ~[]E1, E2 ~byte, E1 ~byte](S1, S2) {}
+func f11[S1 ~[]E1, S2 ~[]E2, E2 ~byte, E1 ~byte](S1, S2) {}
+func f12[S1 ~[]E1, E1 ~byte, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f13[E1 ~byte, S1 ~[]E1, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f14[E2 ~byte, S1 ~[]E1, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f15[S1 ~[]E1, E2 ~byte, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f16[E1 ~byte, E2 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f17[E2 ~byte, E1 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f18[E2 ~byte, E1 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f19[E1 ~byte, E2 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f20[S2 ~[]E2, E2 ~byte, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f21[E2 ~byte, S2 ~[]E2, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f22[E1 ~byte, S2 ~[]E2, E2 ~byte, S1 ~[]E1](S1, S2) {}
+func f23[S2 ~[]E2, E1 ~byte, E2 ~byte, S1 ~[]E1](S1, S2) {}
+
+type myByte byte
+
+func _(a []byte, b []myByte) {
+ f00(a, b)
+ f01(a, b)
+ f02(a, b)
+ f03(a, b)
+ f04(a, b)
+ f05(a, b)
+ f06(a, b)
+ f07(a, b)
+ f08(a, b)
+ f09(a, b)
+ f10(a, b)
+ f11(a, b)
+ f12(a, b)
+ f13(a, b)
+ f14(a, b)
+ f15(a, b)
+ f16(a, b)
+ f17(a, b)
+ f18(a, b)
+ f19(a, b)
+ f20(a, b)
+ f21(a, b)
+ f22(a, b)
+ f23(a, b)
+}
+
+// Constraint type inference may have to iterate.
+// Again, the order of the type parameters shouldn't matter.
+
+func g0[S ~[]E, M ~map[string]S, E any](m M) {}
+func g1[M ~map[string]S, S ~[]E, E any](m M) {}
+func g2[E any, S ~[]E, M ~map[string]S](m M) {}
+func g3[S ~[]E, E any, M ~map[string]S](m M) {}
+func g4[M ~map[string]S, E any, S ~[]E](m M) {}
+func g5[E any, M ~map[string]S, S ~[]E](m M) {}
+
+func _(m map[string][]byte) {
+ g0(m)
+ g1(m)
+ g2(m)
+ g3(m)
+ g4(m)
+ g5(m)
+}
+
+// Worst-case scenario.
+// There are 10 unknown type parameters. In each iteration of
+// constraint type inference we infer one more, from right to left.
+// Each iteration looks repeatedly at all 11 type parameters,
+// requiring a total of 10*11 = 110 iterations with the current
+// implementation. Pathological case.
+
+func h[K any, J ~*K, I ~*J, H ~*I, G ~*H, F ~*G, E ~*F, D ~*E, C ~*D, B ~*C, A ~*B](x A) {}
+
+func _(x **********int) {
+ h(x)
+}
+
+// Examples with channel constraints and tilde.
+
+func ch1[P chan<- int]() (_ P) { return } // core(P) == chan<- int (single type, no tilde)
+func ch2[P ~chan int]() { return } // core(P) == ~chan<- int (tilde)
+func ch3[P chan E, E any](E) { return } // core(P) == chan<- E (single type, no tilde)
+func ch4[P chan E | ~chan<- E, E any](E) { return } // core(P) == ~chan<- E (tilde)
+func ch5[P chan int | chan<- int]() { return } // core(P) == chan<- int (not a single type)
+
+func _() {
+ // P can be inferred as there's a single specific type and no tilde.
+ var _ chan int = ch1 /* ERROR cannot use ch1.*value of type chan<- int */ ()
+ var _ chan<- int = ch1()
+
+ // P cannot be inferred as there's a tilde.
+ ch2( /* ERROR cannot infer P */ )
+ type myChan chan int
+ ch2[myChan]()
+
+ // P can be inferred as there's a single specific type and no tilde.
+ var e int
+ ch3(e)
+
+ // P cannot be inferred as there's more than one specific type and a tilde.
+ ch4( /* ERROR cannot infer P */ e)
+ _ = ch4[chan int]
+
+ // P cannot be inferred as there's more than one specific type.
+ ch5( /* ERROR cannot infer P */ )
+ ch5[chan<- int]()
+}
+
+// test case from issue
+
+func equal[M1 ~map[K1]V1, M2 ~map[K2]V2, K1, K2 ~uint32, V1, V2 ~string](m1 M1, m2 M2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || V2(v1) != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+func equalFixed[K1, K2 ~uint32, V1, V2 ~string](m1 map[K1]V1, m2 map[K2]V2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || v1 != V1(v2) {
+ return false
+ }
+ }
+ return true
+}
+
+type (
+ someNumericID uint32
+ someStringID string
+)
+
+func _() {
+ foo := map[uint32]string{10: "bar"}
+ bar := map[someNumericID]someStringID{10: "bar"}
+ equal(foo, bar)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go2
new file mode 100644
index 0000000000..6e575a376d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go2
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn[RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn func() Fn[RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn[RCT] {
+ return c.makeFn()
+}
+
+func NewConcrete[RCT RC[RG], RG any](Rc RCT) F[RCT] {
+ return &concreteF[RCT]{
+ makeFn: nil,
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go2
new file mode 100644
index 0000000000..5c8393d039
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go2
@@ -0,0 +1,25 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type FFn[RCT RC[RG], RG any] func() Fn[RCT]
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn[RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn FFn[RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn[RCT] {
+ return c.makeFn()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go2
new file mode 100644
index 0000000000..bc4208e6ee
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go2
@@ -0,0 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+
+type S1 struct{ x int }
+type S2 struct{ x any }
+type S3 struct{ x [10]interface{ m() } }
+
+func _[P1 comparable, P2 S2]() {
+ _ = f[S1]
+ _ = f[S2 /* ERROR S2 does not implement comparable */ ]
+ _ = f[S3 /* ERROR S3 does not implement comparable */ ]
+
+ type L1 struct { x P1 }
+ type L2 struct { x P2 }
+ _ = f[L1]
+ _ = f[L2 /* ERROR L2 does not implement comparable */ ]
+}
+
+
+// example from issue
+
+type Set[T comparable] map[T]struct{}
+
+func NewSetFromSlice[T comparable](items []T) *Set[T] {
+ s := Set[T]{}
+
+ for _, item := range items {
+ s[item] = struct{}{}
+ }
+
+ return &s
+}
+
+type T struct{ x any }
+
+func main() {
+ NewSetFromSlice( /* ERROR T does not implement comparable */ []T{
+ {"foo"},
+ {5},
+ })
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go2
new file mode 100644
index 0000000000..0b5a1af082
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go2
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S1 struct{}
+type S2 struct{}
+
+func _[P *S1|*S2]() {
+ _= []P{{ /* ERROR invalid composite literal element type P: no core type */ }}
+}
+
+func _[P *S1|S1]() {
+ _= []P{{ /* ERROR invalid composite literal element type P: no core type */ }}
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go2
new file mode 100644
index 0000000000..40706ec493
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go2
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+type T[P any, B *P] struct{}
+
+func (T /* ERROR cannot use generic type */ ) m0() {}
+func (T /* ERROR got 1 type parameter, but receiver base type declares 2 */ [_]) m1() {}
+func (T[_, _]) m2() {}
+// TODO(gri) this error is unfortunate (issue #51343)
+func (T /* ERROR got 3 arguments but 2 type parameters */ [_, _, _]) m3() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go
new file mode 100644
index 0000000000..447ce036ae
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ len. /* ERROR cannot select on len */ Println
+ len. /* ERROR cannot select on len */ Println()
+ _ = len. /* ERROR cannot select on len */ Println
+ _ = len[ /* ERROR cannot index len */ 0]
+ _ = *len /* ERROR cannot indirect len */
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go2
new file mode 100644
index 0000000000..4eba071801
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go2
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Map map[string]int
+
+func f[M ~map[K]V, K comparable, V any](M) {}
+func g[M map[K]V, K comparable, V any](M) {}
+
+func _[M1 ~map[K]V, M2 map[K]V, K comparable, V any]() {
+ var m1 M1
+ f(m1)
+ g( /* ERROR M1 does not implement map\[K\]V */ m1) // M1 has tilde
+
+ var m2 M2
+ f(m2)
+ g(m2) // M1 does not have tilde
+
+ var m3 Map
+ f(m3)
+ g( /* ERROR Map does not implement map\[string\]int */ m3) // M in g does not have tilde
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go2
new file mode 100644
index 0000000000..ef6223927a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go2
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myString string
+
+func _[P ~string | ~[]byte | ~[]rune]() {
+ _ = P("")
+ const s myString = ""
+ _ = P(s)
+}
+
+func _[P myString]() {
+ _ = P("")
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go
new file mode 100644
index 0000000000..376261516e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T struct{}
+
+func (T) m() []int { return nil }
+
+func f(x T) {
+ for _, x := range func() []int {
+ return x.m() // x declared in parameter list of f
+ }() {
+ _ = x // x declared by range clause
+ }
+}
diff --git a/src/cmd/compile/internal/types2/type.go b/src/cmd/compile/internal/types2/type.go
index 9487ac5a84..0fe39dbca4 100644
--- a/src/cmd/compile/internal/types2/type.go
+++ b/src/cmd/compile/internal/types2/type.go
@@ -7,9 +7,7 @@ package types2
// A Type represents a type of Go.
// All types implement the Type interface.
type Type interface {
- // Underlying returns the underlying type of a type
- // w/o following forwarding chains. Only used by
- // client packages.
+ // Underlying returns the underlying type of a type.
Underlying() Type
// String returns a string representation of a type.
@@ -27,13 +25,13 @@ func under(t Type) Type {
return t.Underlying()
}
-// If t is not a type parameter, structuralType returns the underlying type.
-// If t is a type parameter, structuralType returns the single underlying
+// If t is not a type parameter, coreType returns the underlying type.
+// If t is a type parameter, coreType returns the single underlying
// type of all types in its type set if it exists, or nil otherwise. If the
// type set contains only unrestricted and restricted channel types (with
// identical element types), the single underlying type is the restricted
// channel type if the restrictions are always the same, or nil otherwise.
-func structuralType(t Type) Type {
+func coreType(t Type) Type {
tpar, _ := t.(*TypeParam)
if tpar == nil {
return under(t)
@@ -59,10 +57,10 @@ func structuralType(t Type) Type {
return nil
}
-// structuralString is like structuralType but also considers []byte
+// coreString is like coreType but also considers []byte
// and strings as identical. In this case, if successful and we saw
// a string, the result is of type (possibly untyped) string.
-func structuralString(t Type) Type {
+func coreString(t Type) Type {
tpar, _ := t.(*TypeParam)
if tpar == nil {
return under(t) // string or untyped string
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
index 971fdaec73..9ed3369ff4 100644
--- a/src/cmd/compile/internal/types2/typeparam.go
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -36,6 +36,7 @@ func NewTypeParam(obj *TypeName, constraint Type) *TypeParam {
return (*Checker)(nil).newTypeParam(obj, constraint)
}
+// check may be nil
func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
// Always increment lastID, even if it is not used.
id := nextID()
@@ -50,9 +51,7 @@ func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
// iface may mutate typ.bound, so we must ensure that iface() is called
// at least once before the resulting TypeParam escapes.
if check != nil {
- check.later(func() {
- typ.iface()
- })
+ check.needsCleanup(typ)
} else if constraint != nil {
typ.iface()
}
@@ -93,9 +92,12 @@ func (t *TypeParam) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
+func (t *TypeParam) cleanup() {
+ t.iface()
+ t.check = nil
+}
+
// iface returns the constraint interface of t.
-// TODO(gri) If we make tparamIsIface the default, this should be renamed to under
-// (similar to Named.under).
func (t *TypeParam) iface() *Interface {
bound := t.bound
@@ -136,16 +138,6 @@ func (t *TypeParam) iface() *Interface {
return ityp
}
-// singleType returns the single type of the type parameter constraint; or nil.
-func (t *TypeParam) singleType() Type {
- return t.iface().typeSet().singleType()
-}
-
-// hasTerms reports whether the type parameter constraint has specific type terms.
-func (t *TypeParam) hasTerms() bool {
- return t.iface().typeSet().hasTerms()
-}
-
// is calls f with the specific type terms of t's constraint and reports whether
// all calls to f returned true. If there are no specific terms, is
// returns the result of f(nil).
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
index fff348bcf4..65ae04819e 100644
--- a/src/cmd/compile/internal/types2/typeset.go
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -39,7 +39,7 @@ func (s *_TypeSet) IsComparable(seen map[Type]bool) bool {
return s.comparable
}
return s.is(func(t *term) bool {
- return t != nil && comparable(t.typ, seen, nil)
+ return t != nil && comparable(t.typ, false, seen, nil)
})
}
@@ -103,9 +103,6 @@ func (s *_TypeSet) String() string {
// hasTerms reports whether the type set has specific type terms.
func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll() }
-// singleType returns the single type in s if there is exactly one; otherwise the result is nil.
-func (s *_TypeSet) singleType() Type { return s.terms.singleType() }
-
// subsetOf reports whether s1 ⊆ s2.
func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) }
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index de778fb010..2847aa76c0 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -342,7 +342,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
return typ
case *syntax.InterfaceType:
- typ := new(Interface)
+ typ := check.newInterface()
def.setUnderlying(typ)
if def != nil {
typ.obj = def.obj
@@ -502,12 +502,20 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
// and returns the constant length >= 0, or a value < 0
// to indicate an error (and thus an unknown length).
func (check *Checker) arrayLength(e syntax.Expr) int64 {
- // If e is an undeclared identifier, the array declaration might be an
- // attempt at a parameterized type declaration with missing constraint.
- // Provide a better error message than just "undeclared name: X".
- if name, _ := e.(*syntax.Name); name != nil && check.lookup(name.Value) == nil {
- check.errorf(name, "undeclared name %s for array length", name.Value)
- return -1
+ // If e is an identifier, the array declaration might be an
+ // attempt at a parameterized type declaration with missing
+ // constraint. Provide an error message that mentions array
+ // length.
+ if name, _ := e.(*syntax.Name); name != nil {
+ obj := check.lookup(name.Value)
+ if obj == nil {
+ check.errorf(name, "undeclared name %s for array length", name.Value)
+ return -1
+ }
+ if _, ok := obj.(*Const); !ok {
+ check.errorf(name, "invalid array length %s", name.Value)
+ return -1
+ }
}
var x operand
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index 079db3276c..97d327cf8b 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -9,6 +9,7 @@ package types2
import (
"bytes"
"fmt"
+ "strings"
)
// The unifier maintains two separate sets of type parameters x and y
@@ -26,7 +27,7 @@ import (
// parameter P ("x" side), but the argument type P must be left alone so
// that unification resolves the type parameter P to P.
//
-// For bidirection unification, both sets are provided. This enables
+// For bidirectional unification, both sets are provided. This enables
// unification to go from argument to parameter type and vice versa.
// For constraint type inference, we use bidirectional unification
// where both the x and y type parameters are identical. This is done
@@ -41,6 +42,19 @@ const (
// Whether to panic when unificationDepthLimit is reached. Turn on when
// investigating infinite recursion.
panicAtUnificationDepthLimit = false
+
+ // If enableCoreTypeUnification is set, unification will consider
+ // the core types, if any, of non-local (unbound) type parameters.
+ enableCoreTypeUnification = true
+
+ // If traceInference is set, unification will print a trace of its operation.
+ // Interpretation of trace:
+ // x ≡ y attempt to unify types x and y
+ // p ➞ y type parameter p is set to type y (p is inferred to be y)
+ // p ⇄ q type parameters p and q match (p is inferred to be q and vice versa)
+ // x ≢ y types x and y cannot be unified
+ // [p, q, ...] ➞ [x, y, ...] mapping from type parameters to types
+ traceInference = false
)
// A unifier maintains the current type parameters for x and y
@@ -58,6 +72,7 @@ type unifier struct {
// exactly. If exact is not set, a named type's underlying type
// is considered if unification would fail otherwise, and the
// direction of channels is ignored.
+// TODO(gri) exact is not set anymore by a caller. Consider removing it.
func newUnifier(exact bool) *unifier {
u := &unifier{exact: exact}
u.x.unifier = u
@@ -70,6 +85,10 @@ func (u *unifier) unify(x, y Type) bool {
return u.nify(x, y, nil)
}
+func (u *unifier) tracef(format string, args ...interface{}) {
+ fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, true, format, args...))
+}
+
// A tparamsList describes a list of type parameters and the types inferred for them.
type tparamsList struct {
unifier *unifier
@@ -121,6 +140,9 @@ func (d *tparamsList) init(tparams []*TypeParam) {
// If both type parameters already have a type associated with them and they are
// not joined, join fails and returns false.
func (u *unifier) join(i, j int) bool {
+ if traceInference {
+ u.tracef("%s ⇄ %s", u.x.tparams[i], u.y.tparams[j])
+ }
ti := u.x.indices[i]
tj := u.y.indices[j]
switch {
@@ -210,6 +232,9 @@ func (d *tparamsList) at(i int) Type {
func (d *tparamsList) set(i int, typ Type) {
assert(typ != nil)
u := d.unifier
+ if traceInference {
+ u.tracef("%s ➞ %s", d.tparams[i], typ)
+ }
switch ti := d.indices[i]; {
case ti < 0:
u.types[-ti-1] = typ
@@ -222,6 +247,17 @@ func (d *tparamsList) set(i int, typ Type) {
}
}
+// unknowns returns the number of type parameters for which no type has been set yet.
+func (d *tparamsList) unknowns() int {
+ n := 0
+ for _, ti := range d.indices {
+ if ti <= 0 {
+ n++
+ }
+ }
+ return n
+}
+
// types returns the list of inferred types (via unification) for the type parameters
// described by d, and an index. If all types were inferred, the returned index is < 0.
// Otherwise, it is the index of the first type parameter which couldn't be inferred;
@@ -247,9 +283,16 @@ func (u *unifier) nifyEq(x, y Type, p *ifacePair) bool {
// adapted version of Checker.identical. For changes to that
// code the corresponding changes should be made here.
// Must not be called directly from outside the unifier.
-func (u *unifier) nify(x, y Type, p *ifacePair) bool {
+func (u *unifier) nify(x, y Type, p *ifacePair) (result bool) {
+ if traceInference {
+ u.tracef("%s ≡ %s", x, y)
+ }
+
// Stop gap for cases where unification fails.
if u.depth >= unificationDepthLimit {
+ if traceInference {
+ u.tracef("depth %d >= %d", u.depth, unificationDepthLimit)
+ }
if panicAtUnificationDepthLimit {
panic("unification reached recursion depth limit")
}
@@ -258,6 +301,9 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
u.depth++
defer func() {
u.depth--
+ if traceInference && !result {
+ u.tracef("%s ≢ %s", x, y)
+ }
}()
if !u.exact {
@@ -267,8 +313,14 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
// (We use !hasName to exclude any type with a name, including
// basic types and type parameters; the rest are unamed types.)
if nx, _ := x.(*Named); nx != nil && !hasName(y) {
+ if traceInference {
+ u.tracef("under %s ≡ %s", nx, y)
+ }
return u.nify(nx.under(), y, p)
} else if ny, _ := y.(*Named); ny != nil && !hasName(x) {
+ if traceInference {
+ u.tracef("%s ≡ under %s", x, ny)
+ }
return u.nify(x, ny.under(), p)
}
}
@@ -302,6 +354,39 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
return true
}
+ // If we get here and x or y is a type parameter, they are type parameters
+ // from outside our declaration list. Try to unify their core types, if any
+ // (see issue #50755 for a test case).
+ if enableCoreTypeUnification && !u.exact {
+ if isTypeParam(x) && !hasName(y) {
+ // When considering the type parameter for unification
+ // we look at the adjusted core term (adjusted core type
+ // with tilde information).
+ // If the adjusted core type is a named type N; the
+ // corresponding core type is under(N). Since !u.exact
+ // and y doesn't have a name, unification will end up
+ // comparing under(N) to y, so we can just use the core
+ // type instead. And we can ignore the tilde because we
+ // already look at the underlying types on both sides
+ // and we have known types on both sides.
+ // Optimization.
+ if cx := coreType(x); cx != nil {
+ if traceInference {
+ u.tracef("core %s ≡ %s", x, y)
+ }
+ return u.nify(cx, y, p)
+ }
+ } else if isTypeParam(y) && !hasName(x) {
+ // see comment above
+ if cy := coreType(y); cy != nil {
+ if traceInference {
+ u.tracef("%s ≡ core %s", x, y)
+ }
+ return u.nify(x, cy, p)
+ }
+ }
+ }
+
// For type unification, do not shortcut (x == y) for identical
// types. Instead keep comparing them element-wise to unify the
// matching (and equal type parameter types). A simple test case
@@ -490,7 +575,7 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
// avoid a crash in case of nil type
default:
- panic(fmt.Sprintf("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
+ panic(sprintf(nil, true, "u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
}
return false
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
index c508eadc7c..f365ad1e27 100644
--- a/src/cmd/compile/internal/types2/validtype.go
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -79,7 +79,7 @@ func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeIn
// would have reported a type cycle and couldn't have been
// imported in the first place.
assert(t.obj.pkg == check.pkg)
- t.underlying = Typ[Invalid] // t is in the current package (no race possibilty)
+ t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
// Find the starting point of the cycle and report it.
for i, tn := range path {
if tn == t.obj {
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
index 4d1c5621fe..f7bd2e0e07 100644
--- a/src/cmd/compile/internal/walk/closure.go
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -227,7 +227,7 @@ func methodValueWrapper(dot *ir.SelectorExpr) *ir.Name {
}
sym.SetUniq(true)
- if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
+ if base.Debug.Unified != 0 {
base.FatalfAt(dot.Pos(), "missing wrapper for %v", meth)
}
@@ -235,15 +235,7 @@ func methodValueWrapper(dot *ir.SelectorExpr) *ir.Name {
saveLineNo := base.Pos
ir.CurFunc = nil
- // Set line number equal to the line number where the method is declared.
- if pos := dot.Selection.Pos; pos.IsKnown() {
- base.Pos = pos
- }
- // Note: !dot.Selection.Pos.IsKnown() happens for method expressions where
- // the method is implicitly declared. The Error method of the
- // built-in error type is one such method. We leave the line
- // number at the use of the method expression in this
- // case. See issue 29389.
+ base.Pos = base.AutogeneratedPos
tfn := ir.NewFuncType(base.Pos, nil,
typecheck.NewFuncParams(t0.Params(), true),
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
index b985b4caeb..e46f828d65 100644
--- a/src/cmd/compile/internal/walk/complit.go
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -419,7 +419,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// make the map var
a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil)
a.SetEsc(n.Esc())
- a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(int64(len(n.List)))}
+ a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(n.Len + int64(len(n.List)))}
litas(m, a, init)
entries := n.List
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 861c122456..cc37f95764 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -1433,6 +1433,15 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
o.stmt(as)
}
+
+ // Remember that we issued these assignments so we can include that count
+ // in the map alloc hint.
+ // We're assuming here that all the keys in the map literal are distinct.
+ // If any are equal, this will be an overcount. Probably not worth accounting
+ // for that, as equal keys in map literals are rare, and at worst we waste
+ // a bit of space.
+ n.Len += int64(len(dynamics))
+
return m
}
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
index fde8f50895..5cea66f5ff 100644
--- a/src/cmd/compile/internal/walk/select.go
+++ b/src/cmd/compile/internal/walk/select.go
@@ -239,21 +239,28 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
// dispatch cases
dispatch := func(cond ir.Node, cas *ir.CommClause) {
- cond = typecheck.Expr(cond)
- cond = typecheck.DefaultLit(cond, nil)
-
- r := ir.NewIfStmt(base.Pos, cond, nil, nil)
+ var list ir.Nodes
if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 {
n := n.(*ir.AssignListStmt)
if !ir.IsBlank(n.Lhs[1]) {
x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK)
- r.Body.Append(typecheck.Stmt(x))
+ list.Append(typecheck.Stmt(x))
}
}
- r.Body.Append(cas.Body.Take()...)
- r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+ list.Append(cas.Body.Take()...)
+ list.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+
+ var r ir.Node
+ if cond != nil {
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+ r = ir.NewIfStmt(base.Pos, cond, list, nil)
+ } else {
+ r = ir.NewBlockStmt(base.Pos, list)
+ }
+
init = append(init, r)
}
@@ -263,6 +270,10 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
}
for i, cas := range casorder {
ir.SetPos(cas)
+ if i == len(casorder)-1 {
+ dispatch(nil, cas)
+ break
+ }
dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(int64(i))), cas)
}
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 036f8c52fa..79ccf2b167 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -63,6 +63,7 @@ var bootstrapDirs = []string{
"internal/buildcfg",
"internal/goexperiment",
"internal/goversion",
+ "internal/pkgbits",
"internal/race",
"internal/unsafeheader",
"internal/xcoff",
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index 4b67565430..ab30089881 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -333,15 +333,10 @@ var (
benchMatches []string
)
-func (t *tester) registerStdTest(pkg string, useG3 bool) {
+func (t *tester) registerStdTest(pkg string) {
heading := "Testing packages."
testPrefix := "go_test:"
gcflags := gogcflags
- if useG3 {
- heading = "Testing packages with -G=3."
- testPrefix = "go_test_g3:"
- gcflags += " -G=3"
- }
testName := testPrefix + pkg
if t.runRx == nil || t.runRx.MatchString(testName) == t.runRxWant {
@@ -442,10 +437,7 @@ func (t *tester) registerTests() {
if len(t.runNames) > 0 {
for _, name := range t.runNames {
if strings.HasPrefix(name, "go_test:") {
- t.registerStdTest(strings.TrimPrefix(name, "go_test:"), false)
- }
- if strings.HasPrefix(name, "go_test_g3:") {
- t.registerStdTest(strings.TrimPrefix(name, "go_test_g3:"), true)
+ t.registerStdTest(strings.TrimPrefix(name, "go_test:"))
}
if strings.HasPrefix(name, "go_test_bench:") {
t.registerRaceBenchTest(strings.TrimPrefix(name, "go_test_bench:"))
@@ -468,15 +460,8 @@ func (t *tester) registerTests() {
fatalf("Error running go list std cmd: %v:\n%s", err, cmd.Stderr)
}
pkgs := strings.Fields(string(all))
- if false {
- // Disable -G=3 option for standard tests for now, since
- // they are flaky on the builder.
- for _, pkg := range pkgs {
- t.registerStdTest(pkg, true /* -G=3 flag */)
- }
- }
for _, pkg := range pkgs {
- t.registerStdTest(pkg, false)
+ t.registerStdTest(pkg)
}
if t.race {
for _, pkg := range pkgs {
@@ -1119,7 +1104,7 @@ func (t *tester) cgoTest(dt *distTest) error {
// Skip internal linking cases on arm64 to support GCC-9.4 and above.
// See issue #39466.
- skipInternalLink := goarch == "arm64" && goos != "windows"
+ skipInternalLink := goarch == "arm64" && goos != "darwin"
if t.internalLink() && !skipInternalLink {
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal")
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 13a3f00d6f..825de1e64a 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -177,14 +177,6 @@
// directory, but it is not accessed. When -modfile is specified, an
// alternate go.sum file is also used: its path is derived from the
// -modfile flag by trimming the ".mod" extension and appending ".sum".
-// -workfile file
-// in module aware mode, use the given go.work file as a workspace file.
-// By default or when -workfile is "auto", the go command searches for a
-// file named go.work in the current directory and then containing directories
-// until one is found. If a valid go.work file is found, the modules
-// specified will collectively be used as the main modules. If -workfile
-// is "off", or a go.work file is not found in "auto" mode, workspace
-// mode is disabled.
// -overlay file
// read a JSON config file that provides an overlay for build operations.
// The file is a JSON struct with a single field, named 'Replace', that
@@ -877,7 +869,10 @@
// for the go/build package's Context type.
//
// The -json flag causes the package data to be printed in JSON format
-// instead of using the template format.
+// instead of using the template format. The JSON flag can optionally be
+// provided with a set of comma-separated required field names to be output.
+// If so, those required fields will always appear in JSON output, but
+// others may be omitted to save work in computing the JSON struct.
//
// The -compiled flag causes list to set CompiledGoFiles to the Go source
// files presented to the compiler. Typically this means that it repeats
@@ -1364,7 +1359,7 @@
//
// Workspace maintenance
//
-// Go workspace provides access to operations on workspaces.
+// Work provides access to operations on workspaces.
//
// Note that support for workspaces is built into many other commands, not
// just 'go work'.
@@ -1372,6 +1367,12 @@
// See 'go help modules' for information about Go's module system of which
// workspaces are a part.
//
+// See https://go.dev/ref/mod#workspaces for an in-depth reference on
+// workspaces.
+//
+// See https://go.dev/doc/tutorial/workspaces for an introductory
+// tutorial on workspaces.
+//
// A workspace is specified by a go.work file that specifies a set of
// module directories with the "use" directive. These modules are used as
// root modules by the go command for builds and related operations. A
@@ -1493,9 +1494,8 @@
// Version string
// }
//
-// See the workspaces design proposal at
-// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
-// more information.
+// See the workspaces reference at https://go.dev/ref/mod#workspaces
+// for more information.
//
//
// Initialize workspace file
@@ -1515,6 +1515,9 @@
// Each argument path is added to a use directive in the go.work file. The
// current go version will also be listed in the go.work file.
//
+// See the workspaces reference at https://go.dev/ref/mod#workspaces
+// for more information.
+//
//
// Sync workspace build list to modules
//
@@ -1538,12 +1541,15 @@
// build list's version of each module is always the same or higher than
// that in each workspace module.
//
+// See the workspaces reference at https://go.dev/ref/mod#workspaces
+// for more information.
+//
//
// Add modules to workspace file
//
// Usage:
//
-// go work use [-r] [moddirs]
+// go work use [-r] moddirs
//
// Use provides a command-line interface for adding
// directories, optionally recursively, to a go.work file.
@@ -1557,6 +1563,9 @@
// were specified as arguments: namely, use directives will be added for
// directories that exist, and removed for directories that do not exist.
//
+// See the workspaces reference at https://go.dev/ref/mod#workspaces
+// for more information.
+//
//
// Compile and run Go program
//
@@ -2075,6 +2084,14 @@
// GOVCS
// Lists version control commands that may be used with matching servers.
// See 'go help vcs'.
+// GOWORK
+// In module aware mode, use the given go.work file as a workspace file.
+// By default or when GOWORK is "auto", the go command searches for a
+// file named go.work in the current directory and then containing directories
+// until one is found. If a valid go.work file is found, the modules
+// specified will collectively be used as the main modules. If GOWORK
+// is "off", or a go.work file is not found in "auto" mode, workspace
+// mode is disabled.
//
// Environment variables for use with cgo:
//
diff --git a/src/cmd/go/internal/base/flag.go b/src/cmd/go/internal/base/flag.go
index 2c72c7e562..120420a126 100644
--- a/src/cmd/go/internal/base/flag.go
+++ b/src/cmd/go/internal/base/flag.go
@@ -62,13 +62,6 @@ func AddModFlag(flags *flag.FlagSet) {
flags.Var(explicitStringFlag{value: &cfg.BuildMod, explicit: &cfg.BuildModExplicit}, "mod", "")
}
-// AddWorkfileFlag adds the workfile flag to the flag set. It enables workspace
-// mode for commands that support it by resetting the cfg.WorkFile variable
-// to "" (equivalent to auto) rather than off.
-func AddWorkfileFlag(flags *flag.FlagSet) {
- flags.Var(explicitStringFlag{value: &cfg.WorkFile, explicit: &cfg.WorkFileExplicit}, "workfile", "")
-}
-
// AddModCommonFlags adds the module-related flags common to build commands
// and 'go mod' subcommands.
func AddModCommonFlags(flags *flag.FlagSet) {
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
index 7f68d7bb62..deab3dddd0 100644
--- a/src/cmd/go/internal/cfg/cfg.go
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -49,10 +49,8 @@ var (
BuildWork bool // -work flag
BuildX bool // -x flag
- ModCacheRW bool // -modcacherw flag
- ModFile string // -modfile flag
- WorkFile string // -workfile flag
- WorkFileExplicit bool // whether -workfile was set explicitly
+ ModCacheRW bool // -modcacherw flag
+ ModFile string // -modfile flag
CmdName string // "build", "install", "list", "mod tidy", etc.
diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
index e56dd8223f..c1adf8cef4 100644
--- a/src/cmd/go/internal/envcmd/env.go
+++ b/src/cmd/go/internal/envcmd/env.go
@@ -154,6 +154,10 @@ func ExtraEnvVars() []cfg.EnvVar {
}
modload.InitWorkfile()
gowork := modload.WorkFilePath()
+ // As a special case, if a user set off explicitly, report that in GOWORK.
+ if cfg.Getenv("GOWORK") == "off" {
+ gowork = "off"
+ }
return []cfg.EnvVar{
{Name: "GOMOD", Value: gomod},
{Name: "GOWORK", Value: gowork},
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index d1eaad1c12..28ddaac8f1 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -545,6 +545,14 @@ General-purpose environment variables:
GOVCS
Lists version control commands that may be used with matching servers.
See 'go help vcs'.
+ GOWORK
+ In module aware mode, use the given go.work file as a workspace file.
+ By default or when GOWORK is "auto", the go command searches for a
+ file named go.work in the current directory and then containing directories
+ until one is found. If a valid go.work file is found, the modules
+ specified will collectively be used as the main modules. If GOWORK
+ is "off", or a go.work file is not found in "auto" mode, workspace
+ mode is disabled.
Environment variables for use with cgo:
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index d9a7078ccf..9cebb934bf 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -13,7 +13,9 @@ import (
"fmt"
"io"
"os"
+ "reflect"
"sort"
+ "strconv"
"strings"
"text/template"
@@ -157,7 +159,10 @@ For more information about the meaning of these fields see the documentation
for the go/build package's Context type.
The -json flag causes the package data to be printed in JSON format
-instead of using the template format.
+instead of using the template format. The JSON flag can optionally be
+provided with a set of comma-separated required field names to be output.
+If so, those required fields will always appear in JSON output, but
+others may be omitted to save work in computing the JSON struct.
The -compiled flag causes list to set CompiledGoFiles to the Go source
files presented to the compiler. Typically this means that it repeats
@@ -316,30 +321,79 @@ For more about modules, see https://golang.org/ref/mod.
func init() {
CmdList.Run = runList // break init cycle
work.AddBuildFlags(CmdList, work.DefaultBuildFlags)
- base.AddWorkfileFlag(&CmdList.Flag)
+ CmdList.Flag.Var(&listJsonFields, "json", "")
}
var (
- listCompiled = CmdList.Flag.Bool("compiled", false, "")
- listDeps = CmdList.Flag.Bool("deps", false, "")
- listE = CmdList.Flag.Bool("e", false, "")
- listExport = CmdList.Flag.Bool("export", false, "")
- listFmt = CmdList.Flag.String("f", "", "")
- listFind = CmdList.Flag.Bool("find", false, "")
- listJson = CmdList.Flag.Bool("json", false, "")
- listM = CmdList.Flag.Bool("m", false, "")
- listRetracted = CmdList.Flag.Bool("retracted", false, "")
- listTest = CmdList.Flag.Bool("test", false, "")
- listU = CmdList.Flag.Bool("u", false, "")
- listVersions = CmdList.Flag.Bool("versions", false, "")
+ listCompiled = CmdList.Flag.Bool("compiled", false, "")
+ listDeps = CmdList.Flag.Bool("deps", false, "")
+ listE = CmdList.Flag.Bool("e", false, "")
+ listExport = CmdList.Flag.Bool("export", false, "")
+ listFmt = CmdList.Flag.String("f", "", "")
+ listFind = CmdList.Flag.Bool("find", false, "")
+ listJson bool
+ listJsonFields jsonFlag // If not empty, only output these fields.
+ listM = CmdList.Flag.Bool("m", false, "")
+ listRetracted = CmdList.Flag.Bool("retracted", false, "")
+ listTest = CmdList.Flag.Bool("test", false, "")
+ listU = CmdList.Flag.Bool("u", false, "")
+ listVersions = CmdList.Flag.Bool("versions", false, "")
)
+// A StringsFlag is a command-line flag that interprets its argument
+// as a space-separated list of possibly-quoted strings.
+type jsonFlag map[string]bool
+
+func (v *jsonFlag) Set(s string) error {
+ if v, err := strconv.ParseBool(s); err == nil {
+ listJson = v
+ return nil
+ }
+ listJson = true
+ if *v == nil {
+ *v = make(map[string]bool)
+ }
+ for _, f := range strings.Split(s, ",") {
+ (*v)[f] = true
+ }
+ return nil
+}
+
+func (v *jsonFlag) String() string {
+ var fields []string
+ for f := range *v {
+ fields = append(fields, f)
+ }
+ sort.Strings(fields)
+ return strings.Join(fields, ",")
+}
+
+func (v *jsonFlag) IsBoolFlag() bool {
+ return true
+}
+
+func (v *jsonFlag) needAll() bool {
+ return len(*v) == 0
+}
+
+func (v *jsonFlag) needAny(fields ...string) bool {
+ if v.needAll() {
+ return true
+ }
+ for _, f := range fields {
+ if (*v)[f] {
+ return true
+ }
+ }
+ return false
+}
+
var nl = []byte{'\n'}
func runList(ctx context.Context, cmd *base.Command, args []string) {
modload.InitWorkfile()
- if *listFmt != "" && *listJson == true {
+ if *listFmt != "" && listJson == true {
base.Fatalf("go list -f cannot be used with -json")
}
@@ -358,9 +412,18 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
}
}
- var do func(any)
- if *listJson {
+ var do func(x any)
+ if listJson {
do = func(x any) {
+ if !listJsonFields.needAll() {
+ v := reflect.ValueOf(x).Elem() // do is always called with a non-nil pointer.
+ // Clear all non-requested fields.
+ for i := 0; i < v.NumField(); i++ {
+ if !listJsonFields.needAny(v.Type().Field(i).Name) {
+ v.Field(i).Set(reflect.Zero(v.Type().Field(i).Type))
+ }
+ }
+ }
b, err := json.MarshalIndent(x, "", "\t")
if err != nil {
out.Flush()
@@ -590,7 +653,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
}
// Do we need to run a build to gather information?
- needStale := *listJson || strings.Contains(*listFmt, ".Stale")
+ needStale := (listJson && listJsonFields.needAny("Stale", "StaleReason")) || strings.Contains(*listFmt, ".Stale")
if needStale || *listExport || *listCompiled {
var b work.Builder
b.Init()
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index aba1dfd1c1..1a510b86c7 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -825,11 +825,11 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
}
r := resolvedImportCache.Do(importKey, func() any {
var r resolvedImport
- if build.IsLocalImport(path) {
+ if cfg.ModulesEnabled {
+ r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path)
+ } else if build.IsLocalImport(path) {
r.dir = filepath.Join(parentDir, path)
r.path = dirToImportPath(r.dir)
- } else if cfg.ModulesEnabled {
- r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path)
} else if mode&ResolveImport != 0 {
// We do our own path resolution, because we want to
// find out the key to use in packageCache without the
@@ -1119,6 +1119,7 @@ func dirAndRoot(path string, dir, root string) (string, string) {
}
if !str.HasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || path != "command-line-arguments" && !build.IsLocalImport(path) && filepath.Join(root, path) != dir {
+ debug.PrintStack()
base.Fatalf("unexpected directory layout:\n"+
" import path: %s\n"+
" root: %s\n"+
@@ -2235,13 +2236,17 @@ func (p *Package) setBuildInfo() {
var debugModFromModinfo func(*modinfo.ModulePublic) *debug.Module
debugModFromModinfo = func(mi *modinfo.ModulePublic) *debug.Module {
+ version := mi.Version
+ if version == "" {
+ version = "(devel)"
+ }
dm := &debug.Module{
Path: mi.Path,
- Version: mi.Version,
+ Version: version,
}
if mi.Replace != nil {
dm.Replace = debugModFromModinfo(mi.Replace)
- } else {
+ } else if mi.Version != "" {
dm.Sum = modfetch.Sum(module.Version{Path: mi.Path, Version: mi.Version})
}
return dm
@@ -2424,12 +2429,7 @@ func (p *Package) setBuildInfo() {
appendSetting("vcs.modified", strconv.FormatBool(st.Uncommitted))
}
- text, err := info.MarshalText()
- if err != nil {
- setPkgErrorf("error formatting build info: %v", err)
- return
- }
- p.Internal.BuildInfo = string(text)
+ p.Internal.BuildInfo = info.String()
}
// SafeArg reports whether arg is a "safe" command-line argument,
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
index 6b8a010fd9..5bc6cbc4bb 100644
--- a/src/cmd/go/internal/modcmd/download.go
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -70,7 +70,6 @@ func init() {
// TODO(jayconrod): https://golang.org/issue/35849 Apply -x to other 'go mod' commands.
cmdDownload.Flag.BoolVar(&cfg.BuildX, "x", false, "")
base.AddModCommonFlags(&cmdDownload.Flag)
- base.AddWorkfileFlag(&cmdDownload.Flag)
}
type moduleJSON struct {
diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go
index 9b6aa1fb14..9568c65740 100644
--- a/src/cmd/go/internal/modcmd/graph.go
+++ b/src/cmd/go/internal/modcmd/graph.go
@@ -42,7 +42,6 @@ var (
func init() {
cmdGraph.Flag.Var(&graphGo, "go", "")
base.AddModCommonFlags(&cmdGraph.Flag)
- base.AddWorkfileFlag(&cmdGraph.Flag)
}
func runGraph(ctx context.Context, cmd *base.Command, args []string) {
diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go
index 3f0c005d5d..459bf5d070 100644
--- a/src/cmd/go/internal/modcmd/verify.go
+++ b/src/cmd/go/internal/modcmd/verify.go
@@ -39,7 +39,6 @@ See https://golang.org/ref/mod#go-mod-verify for more about 'go mod verify'.
func init() {
base.AddModCommonFlags(&cmdVerify.Flag)
- base.AddWorkfileFlag(&cmdVerify.Flag)
}
func runVerify(ctx context.Context, cmd *base.Command, args []string) {
diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go
index d8355cca95..2d3f1eb05b 100644
--- a/src/cmd/go/internal/modcmd/why.go
+++ b/src/cmd/go/internal/modcmd/why.go
@@ -59,7 +59,6 @@ var (
func init() {
cmdWhy.Run = runWhy // break init cycle
base.AddModCommonFlags(&cmdWhy.Flag)
- base.AddWorkfileFlag(&cmdWhy.Flag)
}
func runWhy(ctx context.Context, cmd *base.Command, args []string) {
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index 2206c7c840..dfaf16def6 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -305,17 +305,46 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
//
// (If the version is +incompatible, then the go.mod file must not exist:
// +incompatible is not an ongoing opt-out from semantic import versioning.)
- var canUseIncompatible func() bool
- canUseIncompatible = func() bool {
- var ok bool
- if r.codeDir == "" && r.pathMajor == "" {
+ incompatibleOk := map[string]bool{}
+ canUseIncompatible := func(v string) bool {
+ if r.codeDir != "" || r.pathMajor != "" {
+ // A non-empty codeDir indicates a module within a subdirectory,
+ // which necessarily has a go.mod file indicating the module boundary.
+ // A non-empty pathMajor indicates a module path with a major-version
+ // suffix, which must match.
+ return false
+ }
+
+ ok, seen := incompatibleOk[""]
+ if !seen {
_, errGoMod := r.code.ReadFile(info.Name, "go.mod", codehost.MaxGoMod)
- if errGoMod != nil {
- ok = true
+ ok = (errGoMod != nil)
+ incompatibleOk[""] = ok
+ }
+ if !ok {
+ // A go.mod file exists at the repo root.
+ return false
+ }
+
+ // Per https://go.dev/issue/51324, previous versions of the 'go' command
+ // didn't always check for go.mod files in subdirectories, so if the user
+ // requests a +incompatible version explicitly, we should continue to allow
+ // it. Otherwise, if vN/go.mod exists, expect that release tags for that
+ // major version are intended for the vN module.
+ if v != "" && !strings.HasSuffix(statVers, "+incompatible") {
+ major := semver.Major(v)
+ ok, seen = incompatibleOk[major]
+ if !seen {
+ _, errGoModSub := r.code.ReadFile(info.Name, path.Join(major, "go.mod"), codehost.MaxGoMod)
+ ok = (errGoModSub != nil)
+ incompatibleOk[major] = ok
+ }
+ if !ok {
+ return false
}
}
- canUseIncompatible = func() bool { return ok }
- return ok
+
+ return true
}
// checkCanonical verifies that the canonical version v is compatible with the
@@ -367,7 +396,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
base := strings.TrimSuffix(v, "+incompatible")
var errIncompatible error
if !module.MatchPathMajor(base, r.pathMajor) {
- if canUseIncompatible() {
+ if canUseIncompatible(base) {
v = base + "+incompatible"
} else {
if r.pathMajor != "" {
@@ -495,7 +524,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
// Save the highest non-retracted canonical tag for the revision.
// If we don't find a better match, we'll use it as the canonical version.
if tagIsCanonical && semver.Compare(highestCanonical, v) < 0 && !isRetracted(v) {
- if module.MatchPathMajor(v, r.pathMajor) || canUseIncompatible() {
+ if module.MatchPathMajor(v, r.pathMajor) || canUseIncompatible(v) {
highestCanonical = v
}
}
@@ -513,12 +542,12 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
// retracted versions.
allowedMajor := func(major string) func(v string) bool {
return func(v string) bool {
- return (major == "" || semver.Major(v) == major) && !isRetracted(v)
+ return ((major == "" && canUseIncompatible(v)) || semver.Major(v) == major) && !isRetracted(v)
}
}
if pseudoBase == "" {
var tag string
- if r.pseudoMajor != "" || canUseIncompatible() {
+ if r.pseudoMajor != "" || canUseIncompatible("") {
tag, _ = r.code.RecentTag(info.Name, tagPrefix, allowedMajor(r.pseudoMajor))
} else {
// Allow either v1 or v0, but not incompatible higher versions.
diff --git a/src/cmd/go/internal/modfetch/coderepo_test.go b/src/cmd/go/internal/modfetch/coderepo_test.go
index d98ea87da2..bb9268adb8 100644
--- a/src/cmd/go/internal/modfetch/coderepo_test.go
+++ b/src/cmd/go/internal/modfetch/coderepo_test.go
@@ -458,6 +458,54 @@ var codeRepoTests = []codeRepoTest{
rev: "v3.0.0-devel",
err: `resolves to version v0.1.1-0.20220203155313-d59622f6e4d7 (v3.0.0-devel is not a tag)`,
},
+
+ // If v2/go.mod exists, then we should prefer to match the "v2"
+ // pseudo-versions to the nested module, and resolve the module in the parent
+ // directory to only compatible versions.
+ //
+ // However (https://go.dev/issue/51324), previous versions of the 'go' command
+ // didn't always do so, so if the user explicitly requests a +incompatible
+ // version (as would be present in an existing go.mod file), we should
+ // continue to allow it.
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "80beb17a1603",
+ version: "v0.0.0-20220222205507-80beb17a1603",
+ name: "80beb17a16036f17a5aedd1bb5bd6d407b3c6dc5",
+ short: "80beb17a1603",
+ time: time.Date(2022, 2, 22, 20, 55, 7, 0, time.UTC),
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.0",
+ err: `module contains a go.mod file, so module path must match major version ("vcs-test.golang.org/git/v2sub.git/v2")`,
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.1-0.20220222205507-80beb17a1603",
+ err: `module contains a go.mod file, so module path must match major version ("vcs-test.golang.org/git/v2sub.git/v2")`,
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.0+incompatible",
+ version: "v2.0.0+incompatible",
+ name: "5fcd3eaeeb391d399f562fd45a50dac9fc34ae8b",
+ short: "5fcd3eaeeb39",
+ time: time.Date(2022, 2, 22, 20, 53, 33, 0, time.UTC),
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.1-0.20220222205507-80beb17a1603+incompatible",
+ version: "v2.0.1-0.20220222205507-80beb17a1603+incompatible",
+ name: "80beb17a16036f17a5aedd1bb5bd6d407b3c6dc5",
+ short: "80beb17a1603",
+ time: time.Date(2022, 2, 22, 20, 55, 7, 0, time.UTC),
+ },
}
func TestCodeRepo(t *testing.T) {
diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go
index f5423b48ad..21d5f54688 100644
--- a/src/cmd/go/internal/modfetch/fetch.go
+++ b/src/cmd/go/internal/modfetch/fetch.go
@@ -319,7 +319,7 @@ func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err e
//
// If the hash does not match go.sum (or the sumdb if enabled), hashZip returns
// an error and does not write ziphashfile.
-func hashZip(mod module.Version, zipfile, ziphashfile string) error {
+func hashZip(mod module.Version, zipfile, ziphashfile string) (err error) {
hash, err := dirhash.HashZip(zipfile, dirhash.DefaultHash)
if err != nil {
return err
@@ -331,16 +331,17 @@ func hashZip(mod module.Version, zipfile, ziphashfile string) error {
if err != nil {
return err
}
+ defer func() {
+ if closeErr := hf.Close(); err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }()
if err := hf.Truncate(int64(len(hash))); err != nil {
return err
}
if _, err := hf.WriteAt([]byte(hash), 0); err != nil {
return err
}
- if err := hf.Close(); err != nil {
- return err
- }
-
return nil
}
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index 812e48a156..4862f625b4 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -248,12 +248,26 @@ func (e *invalidImportError) Unwrap() error {
// return the module, its root directory, and a list of other modules that
// lexically could have provided the package but did not.
func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph) (m module.Version, dir string, altMods []module.Version, err error) {
+ invalidf := func(format string, args ...interface{}) (module.Version, string, []module.Version, error) {
+ return module.Version{}, "", nil, &invalidImportError{
+ importPath: path,
+ err: fmt.Errorf(format, args...),
+ }
+ }
+
if strings.Contains(path, "@") {
- return module.Version{}, "", nil, fmt.Errorf("import path should not have @version")
+ return invalidf("import path %q should not have @version", path)
}
if build.IsLocalImport(path) {
- return module.Version{}, "", nil, fmt.Errorf("relative import not supported")
+ return invalidf("%q is relative, but relative import paths are not supported in module mode", path)
}
+ if filepath.IsAbs(path) {
+ return invalidf("%q is not a package path; see 'go help packages'", path)
+ }
+ if search.IsMetaPackage(path) {
+ return invalidf("%q is not an importable package; see 'go help packages'", path)
+ }
+
if path == "C" {
// There's no directory for import "C".
return module.Version{}, "", nil, nil
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index 23f4efd02a..f960edd251 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -288,20 +288,25 @@ func BinDir() string {
// operate in workspace mode. It should not be called by other commands,
// for example 'go mod tidy', that don't operate in workspace mode.
func InitWorkfile() {
- switch cfg.WorkFile {
+ if RootMode == NoRoot {
+ workFilePath = ""
+ return
+ }
+
+ switch gowork := cfg.Getenv("GOWORK"); gowork {
case "off":
workFilePath = ""
case "", "auto":
workFilePath = findWorkspaceFile(base.Cwd())
default:
- if !filepath.IsAbs(cfg.WorkFile) {
- base.Fatalf("the path provided to -workfile must be an absolute path")
+ if !filepath.IsAbs(gowork) {
+ base.Fatalf("the path provided to GOWORK must be an absolute path")
}
- workFilePath = cfg.WorkFile
+ workFilePath = gowork
}
}
-// WorkFilePath returns the path of the go.work file, or "" if not in
+// WorkFilePath returns the absolute path of the go.work file, or "" if not in
// workspace mode. WorkFilePath must be called after InitWorkfile.
func WorkFilePath() string {
return workFilePath
@@ -1033,11 +1038,25 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile
for _, r := range modFiles[i].Replace {
if replacedByWorkFile[r.Old.Path] {
continue
- } else if prev, ok := replacements[r.Old]; ok && !curModuleReplaces[r.Old] && prev != r.New {
- base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v\nuse \"go work edit -replace %v=[override]\" to resolve", r.Old, prev, r.New, r.Old)
+ }
+ var newV module.Version = r.New
+ if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) {
+ // Since we are in a workspace, we may be loading replacements from
+ // multiple go.mod files. Relative paths in those replacement are
+ // relative to the go.mod file, not the workspace, so the same string
+ // may refer to two different paths and different strings may refer to
+ // the same path. Convert them all to be absolute instead.
+ //
+ // (We could do this outside of a workspace too, but it would mean that
+ // replacement paths in error strings needlessly differ from what's in
+ // the go.mod file.)
+ newV.Path = filepath.Join(rootDirs[i], newV.Path)
+ }
+ if prev, ok := replacements[r.Old]; ok && !curModuleReplaces[r.Old] && prev != newV {
+ base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v\nuse \"go work edit -replace %v=[override]\" to resolve", r.Old, prev, newV, r.Old)
}
curModuleReplaces[r.Old] = true
- replacements[r.Old] = r.New
+ replacements[r.Old] = newV
v, ok := mainModules.highestReplaced[r.Old.Path]
if !ok || semver.Compare(r.Old.Version, v) > 0 {
@@ -1095,7 +1114,7 @@ func setDefaultBuildMod() {
if inWorkspaceMode() && cfg.BuildMod != "readonly" {
base.Fatalf("go: -mod may only be set to readonly when in workspace mode, but it is set to %q"+
"\n\tRemove the -mod flag to use the default readonly value,"+
- "\n\tor set -workfile=off to disable workspace mode.", cfg.BuildMod)
+ "\n\tor set GOWORK=off to disable workspace mode.", cfg.BuildMod)
}
// Don't override an explicit '-mod=' argument.
return
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index 617b634d26..d4847efb98 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -479,7 +479,11 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs
}
if !found && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" {
m.Dirs = []string{}
- m.AddError(fmt.Errorf("directory prefix %s outside available modules", base.ShortPath(absDir)))
+ scope := "main module or its selected dependencies"
+ if inWorkspaceMode() {
+ scope = "modules listed in go.work or their selected dependencies"
+ }
+ m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope))
return
}
}
@@ -601,7 +605,11 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str
pkg := pathInModuleCache(ctx, absDir, rs)
if pkg == "" {
- return "", fmt.Errorf("directory %s outside available modules", base.ShortPath(absDir))
+ scope := "main module or its selected dependencies"
+ if inWorkspaceMode() {
+ scope = "modules listed in go.work or their selected dependencies"
+ }
+ return "", fmt.Errorf("directory %s outside %s", base.ShortPath(absDir), scope)
}
return pkg, nil
}
@@ -1667,24 +1675,6 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch
// load loads an individual package.
func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
- if strings.Contains(pkg.path, "@") {
- // Leave for error during load.
- return
- }
- if build.IsLocalImport(pkg.path) || filepath.IsAbs(pkg.path) {
- // Leave for error during load.
- // (Module mode does not allow local imports.)
- return
- }
-
- if search.IsMetaPackage(pkg.path) {
- pkg.err = &invalidImportError{
- importPath: pkg.path,
- err: fmt.Errorf("%q is not an importable package; see 'go help packages'", pkg.path),
- }
- return
- }
-
var mg *ModuleGraph
if ld.requirements.pruning == unpruned {
var err error
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
index 627cf1dbc0..75c278a7df 100644
--- a/src/cmd/go/internal/modload/modfile.go
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -802,7 +802,7 @@ var latestVersionIgnoringRetractionsCache par.Cache // path → queryLatestVersi
// an absolute path or a relative path starting with a '.' or '..'
// path component.
func ToDirectoryPath(path string) string {
- if modfile.IsDirectoryPath(path) {
+ if path == "." || modfile.IsDirectoryPath(path) {
return path
}
// The path is not a relative path or an absolute path, so make it relative
diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go
index c4b70b64fe..312b49ef5d 100644
--- a/src/cmd/go/internal/run/run.go
+++ b/src/cmd/go/internal/run/run.go
@@ -65,7 +65,6 @@ func init() {
CmdRun.Run = runRun // break init loop
work.AddBuildFlags(CmdRun, work.DefaultBuildFlags)
- base.AddWorkfileFlag(&CmdRun.Flag)
CmdRun.Flag.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "")
}
@@ -74,8 +73,6 @@ func printStderr(args ...any) (int, error) {
}
func runRun(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
-
if shouldUseOutsideModuleMode(args) {
// Set global module flags for 'go run cmd@version'.
// This must be done before modload.Init, but we need to call work.BuildInit
@@ -85,7 +82,10 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) {
modload.RootMode = modload.NoRoot
modload.AllowMissingModuleImports()
modload.Init()
+ } else {
+ modload.InitWorkfile()
}
+
work.BuildInit()
var b work.Builder
b.Init()
diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go
index b9d1ec91ff..c046caca25 100644
--- a/src/cmd/go/internal/test/testflag.go
+++ b/src/cmd/go/internal/test/testflag.go
@@ -28,7 +28,6 @@ import (
func init() {
work.AddBuildFlags(CmdTest, work.OmitVFlag)
- base.AddWorkfileFlag(&CmdTest.Flag)
cf := CmdTest.Flag
cf.BoolVar(&testC, "c", false, "")
diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go
index 52502e95c6..1c0eb5407d 100644
--- a/src/cmd/go/internal/version/version.go
+++ b/src/cmd/go/internal/version/version.go
@@ -6,7 +6,6 @@
package version
import (
- "bytes"
"context"
"debug/buildinfo"
"errors"
@@ -156,12 +155,8 @@ func scanFile(file string, info fs.FileInfo, mustPrint bool) {
fmt.Printf("%s: %s\n", file, bi.GoVersion)
bi.GoVersion = "" // suppress printing go version again
- mod, err := bi.MarshalText()
- if err != nil {
- fmt.Fprintf(os.Stderr, "%s: formatting build info: %v\n", file, err)
- return
- }
+ mod := bi.String()
if *versionM && len(mod) > 0 {
- fmt.Printf("\t%s\n", bytes.ReplaceAll(mod[:len(mod)-1], []byte("\n"), []byte("\n\t")))
+ fmt.Printf("\t%s\n", strings.ReplaceAll(mod[:len(mod)-1], "\n", "\n\t"))
}
}
diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go
index 88b3c570a0..d3e0dd8116 100644
--- a/src/cmd/go/internal/vet/vet.go
+++ b/src/cmd/go/internal/vet/vet.go
@@ -13,6 +13,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/load"
+ "cmd/go/internal/modload"
"cmd/go/internal/trace"
"cmd/go/internal/work"
)
@@ -54,6 +55,7 @@ See also: go fmt, go fix.
func runVet(ctx context.Context, cmd *base.Command, args []string) {
vetFlags, pkgArgs := vetFlags(args)
+ modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that.
if cfg.DebugTrace != "" {
var close func() error
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index 1c278d3d99..0b5848a77d 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -130,14 +130,6 @@ and test commands:
directory, but it is not accessed. When -modfile is specified, an
alternate go.sum file is also used: its path is derived from the
-modfile flag by trimming the ".mod" extension and appending ".sum".
- -workfile file
- in module aware mode, use the given go.work file as a workspace file.
- By default or when -workfile is "auto", the go command searches for a
- file named go.work in the current directory and then containing directories
- until one is found. If a valid go.work file is found, the modules
- specified will collectively be used as the main modules. If -workfile
- is "off", or a go.work file is not found in "auto" mode, workspace
- mode is disabled.
-overlay file
read a JSON config file that provides an overlay for build operations.
The file is a JSON struct with a single field, named 'Replace', that
@@ -217,7 +209,6 @@ func init() {
AddBuildFlags(CmdBuild, DefaultBuildFlags)
AddBuildFlags(CmdInstall, DefaultBuildFlags)
- base.AddWorkfileFlag(&CmdBuild.Flag)
}
// Note that flags consulted by other parts of the code
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 48a74458bd..ac80f503cd 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -2013,6 +2013,7 @@ func (b *Builder) showOutput(a *Action, dir, desc, out string) {
if reldir := base.ShortPath(dir); reldir != dir {
suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir)
suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir)
+ suffix = strings.ReplaceAll(suffix, "\n\t"+dir, "\n\t"+reldir)
}
suffix = strings.ReplaceAll(suffix, " "+b.WorkDir, " $WORK")
diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go
index e7b1b13271..1478c19389 100644
--- a/src/cmd/go/internal/workcmd/edit.go
+++ b/src/cmd/go/internal/workcmd/edit.go
@@ -84,9 +84,8 @@ writing it back to go.mod. The JSON output corresponds to these Go types:
Version string
}
-See the workspaces design proposal at
-https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
-more information.
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
`,
}
@@ -110,8 +109,6 @@ func init() {
cmdEdit.Flag.Var(flagFunc(flagEditworkDropUse), "dropuse", "")
cmdEdit.Flag.Var(flagFunc(flagEditworkReplace), "replace", "")
cmdEdit.Flag.Var(flagFunc(flagEditworkDropReplace), "dropreplace", "")
-
- base.AddWorkfileFlag(&cmdEdit.Flag)
}
func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
@@ -137,7 +134,7 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
}
if gowork == "" {
- base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using -workfile flag)")
+ base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
}
anyFlags :=
diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go
index cefecee832..c2513bac35 100644
--- a/src/cmd/go/internal/workcmd/init.go
+++ b/src/cmd/go/internal/workcmd/init.go
@@ -27,13 +27,14 @@ modules will be created.
Each argument path is added to a use directive in the go.work file. The
current go version will also be listed in the go.work file.
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
`,
Run: runInit,
}
func init() {
base.AddModCommonFlags(&cmdInit.Flag)
- base.AddWorkfileFlag(&cmdInit.Flag)
}
func runInit(ctx context.Context, cmd *base.Command, args []string) {
@@ -41,12 +42,10 @@ func runInit(ctx context.Context, cmd *base.Command, args []string) {
modload.ForceUseModules = true
- // TODO(matloob): support using the -workfile path
- // To do that properly, we'll have to make the module directories
- // make dirs relative to workFile path before adding the paths to
- // the directory entries
-
- workFile := filepath.Join(base.Cwd(), "go.work")
+ workFile := modload.WorkFilePath()
+ if workFile == "" {
+ workFile = filepath.Join(base.Cwd(), "go.work")
+ }
modload.CreateWorkFile(ctx, workFile, args)
}
diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go
index 948fc5d370..7712eb6b6b 100644
--- a/src/cmd/go/internal/workcmd/sync.go
+++ b/src/cmd/go/internal/workcmd/sync.go
@@ -33,20 +33,22 @@ if the dependency module's version is not already the same as the build
list's version. Note that Minimal Version Selection guarantees that the
build list's version of each module is always the same or higher than
that in each workspace module.
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
`,
Run: runSync,
}
func init() {
base.AddModCommonFlags(&cmdSync.Flag)
- base.AddWorkfileFlag(&cmdSync.Flag)
}
func runSync(ctx context.Context, cmd *base.Command, args []string) {
modload.ForceUseModules = true
modload.InitWorkfile()
if modload.WorkFilePath() == "" {
- base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using -workfile flag)")
+ base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
}
workGraph := modload.LoadModGraph(ctx, "")
diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go
index d3bc1b7d55..e20041f79f 100644
--- a/src/cmd/go/internal/workcmd/use.go
+++ b/src/cmd/go/internal/workcmd/use.go
@@ -10,14 +10,17 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/fsys"
"cmd/go/internal/modload"
+ "cmd/go/internal/str"
"context"
+ "errors"
+ "fmt"
"io/fs"
"os"
"path/filepath"
)
var cmdUse = &base.Command{
- UsageLine: "go work use [-r] [moddirs]",
+ UsageLine: "go work use [-r] moddirs",
Short: "add modules to workspace file",
Long: `Use provides a command-line interface for adding
directories, optionally recursively, to a go.work file.
@@ -30,6 +33,9 @@ The -r flag searches recursively for modules in the argument
directories, and the use command operates as if each of the directories
were specified as arguments: namely, use directives will be added for
directories that exist, and removed for directories that do not exist.
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
`,
}
@@ -39,7 +45,6 @@ func init() {
cmdUse.Run = runUse // break init cycle
base.AddModCommonFlags(&cmdUse.Flag)
- base.AddWorkfileFlag(&cmdUse.Flag)
}
func runUse(ctx context.Context, cmd *base.Command, args []string) {
@@ -50,50 +55,40 @@ func runUse(ctx context.Context, cmd *base.Command, args []string) {
gowork = modload.WorkFilePath()
if gowork == "" {
- base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using -workfile flag)")
+ base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
}
workFile, err := modload.ReadWorkFile(gowork)
if err != nil {
base.Fatalf("go: %v", err)
}
+ workDir := filepath.Dir(gowork) // Absolute, since gowork itself is absolute.
haveDirs := make(map[string][]string) // absolute → original(s)
for _, use := range workFile.Use {
- var absDir string
+ var abs string
if filepath.IsAbs(use.Path) {
- absDir = filepath.Clean(use.Path)
+ abs = filepath.Clean(use.Path)
} else {
- absDir = filepath.Join(filepath.Dir(gowork), use.Path)
+ abs = filepath.Join(workDir, use.Path)
}
- haveDirs[absDir] = append(haveDirs[absDir], use.Path)
+ haveDirs[abs] = append(haveDirs[abs], use.Path)
}
- addDirs := make(map[string]bool)
- removeDirs := make(map[string]bool)
+ // keepDirs maps each absolute path to keep to the literal string to use for
+ // that path (either an absolute or a relative path), or the empty string if
+ // all entries for the absolute path should be removed.
+ keepDirs := make(map[string]string)
+
+ // lookDir updates the entry in keepDirs for the directory dir,
+ // which is either absolute or relative to the current working directory
+ // (not necessarily the directory containing the workfile).
lookDir := func(dir string) {
- // If the path is absolute, try to keep it absolute. If it's relative,
- // make it relative to the go.work file rather than the working directory.
- absDir := dir
- if !filepath.IsAbs(dir) {
- absDir = filepath.Join(base.Cwd(), dir)
- rel, err := filepath.Rel(filepath.Dir(gowork), absDir)
- if err == nil {
- // Normalize relative paths to use slashes, so that checked-in go.work
- // files with relative paths within the repo are platform-independent.
- dir = filepath.ToSlash(rel)
- } else {
- // The path can't be made relative to the go.work file,
- // so it must be kept absolute instead.
- dir = absDir
- }
- }
+ absDir, dir := pathRel(workDir, dir)
fi, err := os.Stat(filepath.Join(absDir, "go.mod"))
if err != nil {
if os.IsNotExist(err) {
- for _, origDir := range haveDirs[absDir] {
- removeDirs[origDir] = true
- }
+ keepDirs[absDir] = ""
return
}
base.Errorf("go: %v", err)
@@ -103,31 +98,99 @@ func runUse(ctx context.Context, cmd *base.Command, args []string) {
base.Errorf("go: %v is not regular", filepath.Join(dir, "go.mod"))
}
- if len(haveDirs[absDir]) == 0 {
- addDirs[dir] = true
+ if dup := keepDirs[absDir]; dup != "" && dup != dir {
+ base.Errorf(`go: already added "%s" as "%s"`, dir, dup)
}
+ keepDirs[absDir] = dir
}
+ if len(args) == 0 {
+ base.Fatalf("go: 'go work use' requires one or more directory arguments")
+ }
for _, useDir := range args {
- if *useR {
- fsys.Walk(useDir, func(path string, info fs.FileInfo, err error) error {
- if !info.IsDir() {
- return nil
+ if !*useR {
+ lookDir(useDir)
+ continue
+ }
+
+ // Add or remove entries for any subdirectories that still exist.
+ err := fsys.Walk(useDir, func(path string, info fs.FileInfo, err error) error {
+ if !info.IsDir() {
+ if info.Mode()&fs.ModeSymlink != 0 {
+ if target, err := fsys.Stat(path); err == nil && target.IsDir() {
+ fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path)
+ }
}
- lookDir(path)
return nil
- })
- continue
+ }
+ lookDir(path)
+ return nil
+ })
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ base.Errorf("go: %v", err)
}
- lookDir(useDir)
- }
- for dir := range removeDirs {
- workFile.DropUse(dir)
+ // Remove entries for subdirectories that no longer exist.
+ // Because they don't exist, they will be skipped by Walk.
+ absArg, _ := pathRel(workDir, useDir)
+ for absDir, _ := range haveDirs {
+ if str.HasFilePathPrefix(absDir, absArg) {
+ if _, ok := keepDirs[absDir]; !ok {
+ keepDirs[absDir] = "" // Mark for deletion.
+ }
+ }
+ }
}
- for dir := range addDirs {
- workFile.AddUse(dir, "")
+
+ base.ExitIfErrors()
+
+ for absDir, keepDir := range keepDirs {
+ nKept := 0
+ for _, dir := range haveDirs[absDir] {
+ if dir == keepDir { // (note that dir is always non-empty)
+ nKept++
+ } else {
+ workFile.DropUse(dir)
+ }
+ }
+ if keepDir != "" && nKept != 1 {
+ // If we kept more than one copy, delete them all.
+ // We'll recreate a unique copy with AddUse.
+ if nKept > 1 {
+ workFile.DropUse(keepDir)
+ }
+ workFile.AddUse(keepDir, "")
+ }
}
modload.UpdateWorkFile(workFile)
modload.WriteWorkFile(gowork, workFile)
}
+
+// pathRel returns the absolute and canonical forms of dir for use in a
+// go.work file located in directory workDir.
+//
+// If dir is relative, it is intepreted relative to base.Cwd()
+// and its canonical form is relative to workDir if possible.
+// If dir is absolute or cannot be made relative to workDir,
+// its canonical form is absolute.
+//
+// Canonical absolute paths are clean.
+// Canonical relative paths are clean and slash-separated.
+func pathRel(workDir, dir string) (abs, canonical string) {
+ if filepath.IsAbs(dir) {
+ abs = filepath.Clean(dir)
+ return abs, abs
+ }
+
+ abs = filepath.Join(base.Cwd(), dir)
+ rel, err := filepath.Rel(workDir, abs)
+ if err != nil {
+ // The path can't be made relative to the go.work file,
+ // so it must be kept absolute instead.
+ return abs, abs
+ }
+
+ // Normalize relative paths to use slashes, so that checked-in go.work
+ // files with relative paths within the repo are platform-independent.
+ return abs, modload.ToDirectoryPath(rel)
+}
diff --git a/src/cmd/go/internal/workcmd/work.go b/src/cmd/go/internal/workcmd/work.go
index d3cc250231..39c81e8f5d 100644
--- a/src/cmd/go/internal/workcmd/work.go
+++ b/src/cmd/go/internal/workcmd/work.go
@@ -12,7 +12,7 @@ import (
var CmdWork = &base.Command{
UsageLine: "go work",
Short: "workspace maintenance",
- Long: `Go workspace provides access to operations on workspaces.
+ Long: `Work provides access to operations on workspaces.
Note that support for workspaces is built into many other commands, not
just 'go work'.
@@ -20,6 +20,12 @@ just 'go work'.
See 'go help modules' for information about Go's module system of which
workspaces are a part.
+See https://go.dev/ref/mod#workspaces for an in-depth reference on
+workspaces.
+
+See https://go.dev/doc/tutorial/workspaces for an introductory
+tutorial on workspaces.
+
A workspace is specified by a go.work file that specifies a set of
module directories with the "use" directive. These modules are used as
root modules by the go command for builds and related operations. A
diff --git a/src/cmd/go/testdata/script/build_internal.txt b/src/cmd/go/testdata/script/build_internal.txt
index 25aa18cfcb..5b786f2fbc 100644
--- a/src/cmd/go/testdata/script/build_internal.txt
+++ b/src/cmd/go/testdata/script/build_internal.txt
@@ -10,8 +10,10 @@ stderr 'internal'
# Test internal packages outside GOROOT are respected
cd ../testinternal2
+env GO111MODULE=off
! go build -v .
stderr 'p\.go:3:8: use of internal package .*internal/w not allowed'
+env GO111MODULE=''
[gccgo] skip # gccgo does not have GOROOT
cd ../testinternal
diff --git a/src/cmd/go/testdata/script/list_json_fields.txt b/src/cmd/go/testdata/script/list_json_fields.txt
new file mode 100644
index 0000000000..58c9efa162
--- /dev/null
+++ b/src/cmd/go/testdata/script/list_json_fields.txt
@@ -0,0 +1,52 @@
+# Test using -json flag to specify specific fields.
+
+# Test -json produces "full" output by looking for multiple fields present.
+go list -json .
+stdout '"Name": "a"'
+stdout '"Stale": true'
+# Same thing for -json=true
+go list -json=true .
+stdout '"Name": "a"'
+stdout '"Stale": true'
+
+# Test -json=false produces non-json output.
+go list -json=false
+cmp stdout want-non-json.txt
+
+# Test -json=<field> keeps only that field.
+go list -json=Name
+cmp stdout want-json-name.txt
+
+# Test -json=<field> with multiple fields.
+go list -json=ImportPath,Name,GoFiles,Imports
+cmp stdout want-json-multiple.txt
+
+-- go.mod --
+module example.com/a
+
+go 1.18
+-- a.go --
+package a
+
+import "fmt"
+
+func F() {
+ fmt.Println("hey there")
+}
+-- want-non-json.txt --
+example.com/a
+-- want-json-name.txt --
+{
+ "Name": "a"
+}
+-- want-json-multiple.txt --
+{
+ "ImportPath": "example.com/a",
+ "Name": "a",
+ "GoFiles": [
+ "a.go"
+ ],
+ "Imports": [
+ "fmt"
+ ]
+}
diff --git a/src/cmd/go/testdata/script/mod_download_partial.txt b/src/cmd/go/testdata/script/mod_download_partial.txt
index 3a02fcd747..617b1fd8e3 100644
--- a/src/cmd/go/testdata/script/mod_download_partial.txt
+++ b/src/cmd/go/testdata/script/mod_download_partial.txt
@@ -15,12 +15,13 @@ cp empty $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.partial
go mod verify
# 'go list' should not load packages from the directory.
-# NOTE: the message "directory $dir outside available modules" is reported
-# for directories not in the main module, active modules in the module cache,
-# or local replacements. In this case, the directory is in the right place,
-# but it's incomplete, so 'go list' acts as if it's not an active module.
+# NOTE: the message "directory $dir outside main module or its selected dependencies"
+# is reported for directories not in the main module, active modules in the
+# module cache, or local replacements. In this case, the directory is in the
+# right place, but it's incomplete, so 'go list' acts as if it's not an
+# active module.
! go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
-stderr 'outside available modules'
+stderr 'outside main module or its selected dependencies'
# 'go list -m' should not print the directory.
go list -m -f '{{.Dir}}' rsc.io/quote
diff --git a/src/cmd/go/testdata/script/mod_fs_patterns.txt b/src/cmd/go/testdata/script/mod_fs_patterns.txt
index a20fefd6d3..276d04e538 100644
--- a/src/cmd/go/testdata/script/mod_fs_patterns.txt
+++ b/src/cmd/go/testdata/script/mod_fs_patterns.txt
@@ -51,11 +51,11 @@ stdout '^at$'
# a package path.
cd ../badat/bad@
! go list .
-stderr 'directory . outside available modules'
+stderr 'directory . outside main module or its selected dependencies'
! go list $PWD
-stderr 'directory . outside available modules'
+stderr 'directory . outside main module or its selected dependencies'
! go list $PWD/...
-stderr 'directory . outside available modules'
+stderr 'directory . outside main module or its selected dependencies'
-- x/go.mod --
module m
diff --git a/src/cmd/go/testdata/script/mod_list_dir.txt b/src/cmd/go/testdata/script/mod_list_dir.txt
index 7ad65ffbc7..157d3b6a8a 100644
--- a/src/cmd/go/testdata/script/mod_list_dir.txt
+++ b/src/cmd/go/testdata/script/mod_list_dir.txt
@@ -24,7 +24,7 @@ go get rsc.io/sampler@v1.3.1
go list -f '{{.ImportPath}}' $GOPATH/pkg/mod/rsc.io/sampler@v1.3.1
stdout '^rsc.io/sampler$'
! go list -f '{{.ImportPath}}' $GOPATH/pkg/mod/rsc.io/sampler@v1.3.0
-stderr 'outside available modules'
+stderr 'outside main module or its selected dependencies'
-- go.mod --
module x
diff --git a/src/cmd/go/testdata/script/mod_list_replace_dir.txt b/src/cmd/go/testdata/script/mod_list_replace_dir.txt
index eac5ca7dd3..b446543916 100644
--- a/src/cmd/go/testdata/script/mod_list_replace_dir.txt
+++ b/src/cmd/go/testdata/script/mod_list_replace_dir.txt
@@ -9,7 +9,7 @@ go get
go mod download rsc.io/quote@v1.5.2
! go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
-stderr '^directory ..[/\\]pkg[/\\]mod[/\\]rsc.io[/\\]quote@v1.5.2 outside available modules$'
+stderr '^directory ..[/\\]pkg[/\\]mod[/\\]rsc.io[/\\]quote@v1.5.2 outside main module or its selected dependencies$'
go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.1
stdout 'rsc.io/quote'
diff --git a/src/cmd/go/testdata/script/run_issue51125.txt b/src/cmd/go/testdata/script/run_issue51125.txt
new file mode 100644
index 0000000000..8fa4486ca4
--- /dev/null
+++ b/src/cmd/go/testdata/script/run_issue51125.txt
@@ -0,0 +1,54 @@
+# Regression test for https://go.dev/issue/51125:
+# Relative import paths (a holdover from GOPATH) were accidentally allowed in module mode.
+
+cd $WORK
+
+# Relative imports should not be allowed with a go.mod file.
+
+! go run driver.go
+stderr '^driver.go:3:8: "./mypkg" is relative, but relative import paths are not supported in module mode$'
+
+go list -e -f '{{with .Error}}{{.}}{{end}}' -deps driver.go
+stdout '^driver.go:3:8: "./mypkg" is relative, but relative import paths are not supported in module mode$'
+! stderr .
+
+
+# Relative imports should not be allowed in module mode even without a go.mod file.
+rm go.mod
+
+! go run driver.go
+stderr '^driver.go:3:8: "./mypkg" is relative, but relative import paths are not supported in module mode$'
+
+go list -e -f '{{with .Error}}{{.}}{{end}}' -deps driver.go
+stdout '^driver.go:3:8: "./mypkg" is relative, but relative import paths are not supported in module mode$'
+! stderr .
+
+
+# In GOPATH mode, they're still allowed (but only outside of GOPATH/src).
+env GO111MODULE=off
+
+[!short] go run driver.go
+
+go list -deps driver.go
+
+
+-- $WORK/go.mod --
+module example
+
+go 1.17
+-- $WORK/driver.go --
+package main
+
+import "./mypkg"
+
+func main() {
+ mypkg.MyFunc()
+}
+-- $WORK/mypkg/code.go --
+package mypkg
+
+import "fmt"
+
+func MyFunc() {
+ fmt.Println("Hello, world!")
+}
diff --git a/src/cmd/go/testdata/script/run_work_versioned.txt b/src/cmd/go/testdata/script/run_work_versioned.txt
new file mode 100644
index 0000000000..eb0f22d1c0
--- /dev/null
+++ b/src/cmd/go/testdata/script/run_work_versioned.txt
@@ -0,0 +1,16 @@
+[short] skip
+go run example.com/printversion@v0.1.0
+stdout '^main is example.com/printversion v0.1.0$'
+
+-- go.work --
+go 1.18
+
+use (
+ .
+)
+-- go.mod --
+module example
+
+go 1.18
+
+require example.com/printversion v1.0.0
diff --git a/src/cmd/go/testdata/script/test_fuzz_return.txt b/src/cmd/go/testdata/script/test_fuzz_return.txt
new file mode 100644
index 0000000000..63275aad01
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_fuzz_return.txt
@@ -0,0 +1,19 @@
+[short] skip
+
+! go test .
+stdout '^panic: testing: fuzz target must not return a value \[recovered\]$'
+
+-- go.mod --
+module test
+go 1.18
+-- x_test.go --
+package test
+
+import "testing"
+
+func FuzzReturnErr(f *testing.F) {
+ f.Add("hello, validation!")
+ f.Fuzz(func(t *testing.T, in string) string {
+ return in
+ })
+}
diff --git a/src/cmd/go/testdata/script/test_relative_cmdline.txt b/src/cmd/go/testdata/script/test_relative_cmdline.txt
index 2f9c80fe4d..96f7b87265 100644
--- a/src/cmd/go/testdata/script/test_relative_cmdline.txt
+++ b/src/cmd/go/testdata/script/test_relative_cmdline.txt
@@ -1,5 +1,7 @@
# Relative imports in command line package
+env GO111MODULE=off
+
# Run tests outside GOPATH.
env GOPATH=$WORK/tmp
@@ -47,4 +49,4 @@ func TestF1(t *testing.T) {
if F() != p2.F() {
t.Fatal(F())
}
-} \ No newline at end of file
+}
diff --git a/src/cmd/go/testdata/script/work.txt b/src/cmd/go/testdata/script/work.txt
index cbb3746a69..fa1558f9e6 100644
--- a/src/cmd/go/testdata/script/work.txt
+++ b/src/cmd/go/testdata/script/work.txt
@@ -4,7 +4,7 @@ go env GOWORK
! stdout .
go work init ./a ./b
-cmp go.work go.work.want
+cmpenv go.work go.work.want
go env GOWORK
stdout '^'$WORK'(\\|/)gopath(\\|/)src(\\|/)go.work$'
@@ -32,7 +32,9 @@ stdout 'example.com/b'
go list -mod=readonly all
! go list -mod=mod all
stderr '^go: -mod may only be set to readonly when in workspace mode'
-go list -mod=mod -workfile=off all
+env GOWORK=off
+go list -mod=mod all
+env GOWORK=
# Test that duplicates in the use list return an error
cp go.work go.work.backup
@@ -53,7 +55,9 @@ go run example.com/d
# This exercises the code that determines which module command-line-arguments
# belongs to.
go list ./b/main.go
-go build -n -workfile=off -o foo foo.go
+env GOWORK=off
+go build -n -o foo foo.go
+env GOWORK=
go build -n -o foo foo.go
-- go.work.dup --
@@ -65,7 +69,7 @@ use (
../src/a
)
-- go.work.want --
-go 1.18
+go $goversion
use (
./a
diff --git a/src/cmd/go/testdata/script/work_edit.txt b/src/cmd/go/testdata/script/work_edit.txt
index fd04bbda6e..ad5de6286d 100644
--- a/src/cmd/go/testdata/script/work_edit.txt
+++ b/src/cmd/go/testdata/script/work_edit.txt
@@ -1,10 +1,10 @@
# Test editing go.work files.
go work init m
-cmp go.work go.work.want_initial
+cmpenv go.work go.work.want_initial
go work edit -use n
-cmp go.work go.work.want_use_n
+cmpenv go.work go.work.want_use_n
go work edit -go 1.18
cmp go.work go.work.want_go_118
@@ -30,7 +30,8 @@ cmp stdout go.work.want_print
go work edit -json -go 1.19 -use b -dropuse c -replace 'x.1@v1.4.0 = ../z' -dropreplace x.1 -dropreplace x.1@v1.3.0
cmp stdout go.work.want_json
-go work edit -print -fmt -workfile $GOPATH/src/unformatted
+env GOWORK=$GOPATH/src/unformatted
+go work edit -print -fmt
cmp stdout formatted
-- m/go.mod --
@@ -38,11 +39,11 @@ module m
go 1.18
-- go.work.want_initial --
-go 1.18
+go $goversion
use ./m
-- go.work.want_use_n --
-go 1.18
+go $goversion
use (
./m
@@ -158,4 +159,4 @@ use (
replace (
x.1 v1.3.0 => y.1 v1.4.0
x.1 v1.4.0 => ../z
-) \ No newline at end of file
+)
diff --git a/src/cmd/go/testdata/script/work_env.txt b/src/cmd/go/testdata/script/work_env.txt
index ec3d3be3ed..511bb4e2cb 100644
--- a/src/cmd/go/testdata/script/work_env.txt
+++ b/src/cmd/go/testdata/script/work_env.txt
@@ -13,6 +13,10 @@ cd src
go env GOWORK
stdout 'go.work'
+env GOWORK='off'
+go env GOWORK
+stdout 'off'
+
! go env -w GOWORK=off
stderr '^go: GOWORK cannot be modified$'
diff --git a/src/cmd/go/testdata/script/work_gowork.txt b/src/cmd/go/testdata/script/work_gowork.txt
new file mode 100644
index 0000000000..1cfbf0ca18
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_gowork.txt
@@ -0,0 +1,24 @@
+env GOWORK=stop.work
+! go list a # require absolute path
+! stderr panic
+env GOWORK=doesnotexist
+! go list a
+! stderr panic
+
+env GOWORK=$GOPATH/src/stop.work
+go list -n a
+go build -n a
+go test -n a
+
+-- stop.work --
+go 1.18
+
+use ./a
+-- a/a.go --
+package a
+-- a/a_test.go --
+package a
+-- a/go.mod --
+module a
+
+go 1.18 \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_init_gowork.txt b/src/cmd/go/testdata/script/work_init_gowork.txt
new file mode 100644
index 0000000000..55ac99b8c0
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_init_gowork.txt
@@ -0,0 +1,19 @@
+# Test that the GOWORK environment variable flag is used by go work init.
+
+! exists go.work
+go work init
+exists go.work
+
+env GOWORK=$GOPATH/src/foo/foo.work
+! exists foo/foo.work
+go work init
+exists foo/foo.work
+
+env GOWORK=
+cd foo/bar
+! go work init
+stderr 'already exists'
+
+# Create directories to make go.work files in.
+-- foo/dummy.txt --
+-- foo/bar/dummy.txt --
diff --git a/src/cmd/go/testdata/script/work_init_path.txt b/src/cmd/go/testdata/script/work_init_path.txt
new file mode 100644
index 0000000000..e3977882a0
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_init_path.txt
@@ -0,0 +1,17 @@
+# Regression test for https://go.dev/issue/51448.
+# 'go work init . foo/bar' should produce a go.work file
+# with the same paths as 'go work init; go work use -r .'.
+
+go work init . foo/bar
+mv go.work go.work.init
+
+go work init
+go work use -r .
+cmp go.work go.work.init
+
+-- go.mod --
+module example
+go 1.18
+-- foo/bar/go.mod --
+module example
+go 1.18
diff --git a/src/cmd/go/testdata/script/work_issue51204.txt b/src/cmd/go/testdata/script/work_issue51204.txt
new file mode 100644
index 0000000000..d483002060
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_issue51204.txt
@@ -0,0 +1,57 @@
+go work sync
+
+go list -f '{{.Dir}}' example.com/test
+stdout '^'$PWD${/}test'$'
+
+-- go.work --
+go 1.18
+
+use (
+ ./test2
+ ./test2/sub
+)
+-- test/go.mod --
+module example.com/test
+
+go 1.18
+-- test/file.go --
+package test
+
+func DoSomething() {
+}
+-- test2/go.mod --
+module example.com/test2
+
+go 1.18
+
+replace example.com/test => ../test
+
+require example.com/test v0.0.0-00010101000000-000000000000
+-- test2/file.go --
+package test2
+
+import (
+ "example.com/test"
+)
+
+func DoSomething() {
+ test.DoSomething()
+}
+-- test2/sub/go.mod --
+module example.com/test2/sub
+
+go 1.18
+
+replace example.com/test => ../../test
+
+require example.com/test v0.0.0
+-- test2/sub/file.go --
+package test2
+
+import (
+ "example.com/test"
+)
+
+func DoSomething() {
+ test.DoSomething()
+}
diff --git a/src/cmd/go/testdata/script/work_module_not_in_go_work.txt b/src/cmd/go/testdata/script/work_module_not_in_go_work.txt
new file mode 100644
index 0000000000..23d908c302
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_module_not_in_go_work.txt
@@ -0,0 +1,25 @@
+# This is a regression test for issue #49632.
+# The Go command should mention go.work if the user
+# tries to load a local package that's in a module
+# that's not in go.work and can't be resolved.
+
+! go list ./...
+stderr 'pattern ./...: directory prefix . does not contain modules listed in go.work or their selected dependencies'
+
+! go list ./a
+stderr 'directory a outside modules listed in go.work'
+
+-- go.work --
+go 1.18
+
+use ./b
+-- a/go.mod --
+module example.com/a
+
+go 1.18
+-- a/a.go --
+package a
+-- b/go.mod --
+module example.com/b
+
+go 1.18
diff --git a/src/cmd/go/testdata/script/work_nowork.txt b/src/cmd/go/testdata/script/work_nowork.txt
index b0320cbccb..b4c9b1d9cf 100644
--- a/src/cmd/go/testdata/script/work_nowork.txt
+++ b/src/cmd/go/testdata/script/work_nowork.txt
@@ -1,17 +1,17 @@
! go work use
-stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using -workfile flag\)$'
+stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using GOWORK environment variable\)$'
! go work use .
-stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using -workfile flag\)$'
+stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using GOWORK environment variable\)$'
! go work edit
-stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using -workfile flag\)$'
+stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using GOWORK environment variable\)$'
! go work edit -go=1.18
-stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using -workfile flag\)$'
+stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using GOWORK environment variable\)$'
! go work sync
-stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using -workfile flag\)$'
+stderr '^go: no go\.work file found\n\t\(run ''go work init'' first or specify path using GOWORK environment variable\)$'
-- go.mod --
module example
diff --git a/src/cmd/go/testdata/script/work_replace_conflict.txt b/src/cmd/go/testdata/script/work_replace_conflict.txt
index 81d1fcb043..7b71b0fbd7 100644
--- a/src/cmd/go/testdata/script/work_replace_conflict.txt
+++ b/src/cmd/go/testdata/script/work_replace_conflict.txt
@@ -2,7 +2,7 @@
# overriding it in the go.work file.
! go list -m example.com/dep
-stderr 'go: conflicting replacements for example.com/dep@v1.0.0:\n\t./dep1\n\t./dep2\nuse "go work edit -replace example.com/dep@v1.0.0=\[override\]" to resolve'
+stderr 'go: conflicting replacements for example.com/dep@v1.0.0:\n\t'$PWD${/}'dep1\n\t'$PWD${/}'dep2\nuse "go work edit -replace example.com/dep@v1.0.0=\[override\]" to resolve'
go work edit -replace example.com/dep@v1.0.0=./dep1
go list -m example.com/dep
stdout 'example.com/dep v1.0.0 => ./dep1'
@@ -15,7 +15,7 @@ use n
module example.com/m
require example.com/dep v1.0.0
-replace example.com/dep v1.0.0 => ./dep1
+replace example.com/dep v1.0.0 => ../dep1
-- m/m.go --
package m
@@ -28,7 +28,7 @@ func F() {
module example.com/n
require example.com/dep v1.0.0
-replace example.com/dep v1.0.0 => ./dep2
+replace example.com/dep v1.0.0 => ../dep2
-- n/n.go --
package n
diff --git a/src/cmd/go/testdata/script/work_use.txt b/src/cmd/go/testdata/script/work_use.txt
index f5ea89c900..12c8cecab7 100644
--- a/src/cmd/go/testdata/script/work_use.txt
+++ b/src/cmd/go/testdata/script/work_use.txt
@@ -14,16 +14,16 @@ use (
go 1.18
use (
- foo
- foo/bar/baz
+ ./foo
+ ./foo/bar/baz
)
-- go.want_work_other --
go 1.18
use (
- foo
- foo/bar/baz
- other
+ ./foo
+ ./foo/bar/baz
+ ./other
)
-- foo/go.mod --
module foo
diff --git a/src/cmd/go/testdata/script/work_use_deleted.txt b/src/cmd/go/testdata/script/work_use_deleted.txt
new file mode 100644
index 0000000000..b379cbc09d
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_use_deleted.txt
@@ -0,0 +1,22 @@
+go work use -r .
+cmp go.work go.work.want
+
+-- go.work --
+go 1.18
+
+use (
+ .
+ ./sub
+ ./sub/dir/deleted
+)
+-- go.work.want --
+go 1.18
+
+use ./sub/dir
+-- sub/README.txt --
+A go.mod file has been deleted from this directory.
+In addition, the entire subdirectory sub/dir/deleted
+has been deleted, along with sub/dir/deleted/go.mod.
+-- sub/dir/go.mod --
+module example/sub/dir
+go 1.18
diff --git a/src/cmd/go/testdata/script/work_use_dot.txt b/src/cmd/go/testdata/script/work_use_dot.txt
index c24aae33e8..8f210423ec 100644
--- a/src/cmd/go/testdata/script/work_use_dot.txt
+++ b/src/cmd/go/testdata/script/work_use_dot.txt
@@ -1,6 +1,7 @@
cp go.work go.work.orig
-# 'go work use .' should add an entry for the current directory.
+# If the current directory contains a go.mod file,
+# 'go work use .' should add an entry for it.
cd bar/baz
go work use .
cmp ../../go.work ../../go.work.rel
@@ -11,9 +12,28 @@ mv go.mod go.mod.bak
go work use .
cmp ../../go.work ../../go.work.orig
+# If the path is absolute, it should remain absolute.
mv go.mod.bak go.mod
go work use $PWD
-cmpenv ../../go.work ../../go.work.abs
+grep -count=1 '^use ' ../../go.work
+grep '^use ["]?'$PWD'["]?$' ../../go.work
+
+# An absolute path should replace an entry for the corresponding relative path
+# and vice-versa.
+go work use .
+cmp ../../go.work ../../go.work.rel
+go work use $PWD
+grep -count=1 '^use ' ../../go.work
+grep '^use ["]?'$PWD'["]?$' ../../go.work
+
+# If both the absolute and relative paths are named, 'go work use' should error
+# out: we don't know which one to use, and shouldn't add both because the
+# resulting workspace would contain a duplicate module.
+cp ../../go.work.orig ../../go.work
+! go work use $PWD .
+stderr '^go: already added "\./bar/baz" as "'$PWD'"$'
+cmp ../../go.work ../../go.work.orig
+
-- go.mod --
module example
@@ -23,11 +43,7 @@ go 1.18
-- go.work.rel --
go 1.18
-use bar/baz
--- go.work.abs --
-go 1.18
-
-use $PWD
+use ./bar/baz
-- bar/baz/go.mod --
module example/bar/baz
go 1.18
diff --git a/src/cmd/go/testdata/script/work_use_noargs.txt b/src/cmd/go/testdata/script/work_use_noargs.txt
new file mode 100644
index 0000000000..ca054344c6
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_use_noargs.txt
@@ -0,0 +1,11 @@
+# For now, 'go work use' requires arguments.
+# (Eventually, we may may it implicitly behave like 'go work use .'.
+
+! go work use
+stderr '^go: ''go work use'' requires one or more directory arguments'
+
+! go work use -r
+stderr '^go: ''go work use'' requires one or more directory arguments'
+
+-- go.work --
+go 1.18
diff --git a/src/cmd/go/testdata/script/work_vet.txt b/src/cmd/go/testdata/script/work_vet.txt
new file mode 100644
index 0000000000..e258fc0394
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vet.txt
@@ -0,0 +1,19 @@
+! go vet ./a
+stderr 'fmt.Println call has possible formatting directive'
+
+-- go.work --
+go 1.18
+
+use ./a
+-- a/go.mod --
+module example.com/a
+
+go 1.18
+-- a/a.go --
+package a
+
+import "fmt"
+
+func A() {
+ fmt.Println("%s")
+} \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_workfile.txt b/src/cmd/go/testdata/script/work_workfile.txt
deleted file mode 100644
index b62918147e..0000000000
--- a/src/cmd/go/testdata/script/work_workfile.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-! go list -workfile=stop.work a # require absolute path
-! stderr panic
-! go list -workfile=doesnotexist a
-! stderr panic
-
-go list -n -workfile=$GOPATH/src/stop.work a
-go build -n -workfile=$GOPATH/src/stop.work a
-go test -n -workfile=$GOPATH/src/stop.work a
-
--- stop.work --
-go 1.18
-
-use ./a
--- a/a.go --
-package a
--- a/a_test.go --
-package a
--- a/go.mod --
-module a
-
-go 1.18 \ No newline at end of file
diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go
index 51f6e652d9..4280ed4459 100644
--- a/src/cmd/gofmt/gofmt.go
+++ b/src/cmd/gofmt/gofmt.go
@@ -52,6 +52,16 @@ const (
printerNormalizeNumbers = 1 << 30
)
+// fdSem guards the number of concurrently-open file descriptors.
+//
+// For now, this is arbitrarily set to 200, based on the observation that many
+// platforms default to a kernel limit of 256. Ideally, perhaps we should derive
+// it from rlimit on platforms that support that system call.
+//
+// File descriptors opened from outside of this package are not tracked,
+// so this limit may be approximate.
+var fdSem = make(chan bool, 200)
+
var (
rewrite func(*token.FileSet, *ast.File) *ast.File
parserMode parser.Mode
@@ -213,51 +223,9 @@ func (r *reporter) ExitCode() int {
// If info == nil, we are formatting stdin instead of a file.
// If in == nil, the source is the contents of the file with the given filename.
func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) error {
- if in == nil {
- var err error
- in, err = os.Open(filename)
- if err != nil {
- return err
- }
- }
-
- // Compute the file's size and read its contents with minimal allocations.
- //
- // If the size is unknown (or bogus, or overflows an int), fall back to
- // a size-independent ReadAll.
- var src []byte
- size := -1
- if info != nil && info.Mode().IsRegular() && int64(int(info.Size())) == info.Size() {
- size = int(info.Size())
- }
- if size+1 > 0 {
- // If we have the FileInfo from filepath.WalkDir, use it to make
- // a buffer of the right size and avoid ReadAll's reallocations.
- //
- // We try to read size+1 bytes so that we can detect modifications: if we
- // read more than size bytes, then the file was modified concurrently.
- // (If that happens, we could, say, append to src to finish the read, or
- // proceed with a truncated buffer — but the fact that it changed at all
- // indicates a possible race with someone editing the file, so we prefer to
- // stop to avoid corrupting it.)
- src = make([]byte, size+1)
- n, err := io.ReadFull(in, src)
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
- if n < size {
- return fmt.Errorf("error: size of %s changed during reading (from %d to %d bytes)", filename, size, n)
- } else if n > size {
- return fmt.Errorf("error: size of %s changed during reading (from %d to >=%d bytes)", filename, size, len(src))
- }
- src = src[:n]
- } else {
- // The file is not known to be regular, so we don't have a reliable size for it.
- var err error
- src, err = io.ReadAll(in)
- if err != nil {
- return err
- }
+ src, err := readFile(filename, info, in)
+ if err != nil {
+ return err
}
fileSet := token.NewFileSet()
@@ -306,7 +274,9 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e
if err != nil {
return err
}
+ fdSem <- true
err = os.WriteFile(filename, res, perm)
+ <-fdSem
if err != nil {
os.Rename(bakname, filename)
return err
@@ -333,6 +303,65 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e
return err
}
+// readFile reads the contents of filename, described by info.
+// If in is non-nil, readFile reads directly from it.
+// Otherwise, readFile opens and reads the file itself,
+// with the number of concurrently-open files limited by fdSem.
+func readFile(filename string, info fs.FileInfo, in io.Reader) ([]byte, error) {
+ if in == nil {
+ fdSem <- true
+ var err error
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ in = f
+ defer func() {
+ f.Close()
+ <-fdSem
+ }()
+ }
+
+ // Compute the file's size and read its contents with minimal allocations.
+ //
+ // If we have the FileInfo from filepath.WalkDir, use it to make
+ // a buffer of the right size and avoid ReadAll's reallocations.
+ //
+ // If the size is unknown (or bogus, or overflows an int), fall back to
+ // a size-independent ReadAll.
+ size := -1
+ if info != nil && info.Mode().IsRegular() && int64(int(info.Size())) == info.Size() {
+ size = int(info.Size())
+ }
+ if size+1 <= 0 {
+ // The file is not known to be regular, so we don't have a reliable size for it.
+ var err error
+ src, err := io.ReadAll(in)
+ if err != nil {
+ return nil, err
+ }
+ return src, nil
+ }
+
+ // We try to read size+1 bytes so that we can detect modifications: if we
+ // read more than size bytes, then the file was modified concurrently.
+ // (If that happens, we could, say, append to src to finish the read, or
+ // proceed with a truncated buffer — but the fact that it changed at all
+ // indicates a possible race with someone editing the file, so we prefer to
+ // stop to avoid corrupting it.)
+ src := make([]byte, size+1)
+ n, err := io.ReadFull(in, src)
+ if err != nil && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ if n < size {
+ return nil, fmt.Errorf("error: size of %s changed during reading (from %d to %d bytes)", filename, size, n)
+ } else if n > size {
+ return nil, fmt.Errorf("error: size of %s changed during reading (from %d to >=%d bytes)", filename, size, len(src))
+ }
+ return src[:n], nil
+}
+
func main() {
// Arbitrarily limit in-flight work to 2MiB times the number of threads.
//
@@ -354,12 +383,16 @@ func gofmtMain(s *sequencer) {
flag.Parse()
if *cpuprofile != "" {
+ fdSem <- true
f, err := os.Create(*cpuprofile)
if err != nil {
s.AddReport(fmt.Errorf("creating cpu profile: %s", err))
return
}
- defer f.Close()
+ defer func() {
+ f.Close()
+ <-fdSem
+ }()
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
@@ -474,6 +507,9 @@ const chmodSupported = runtime.GOOS != "windows"
// with <number randomly chosen such that the file name is unique. backupFile returns
// the chosen file name.
func backupFile(filename string, data []byte, perm fs.FileMode) (string, error) {
+ fdSem <- true
+ defer func() { <-fdSem }()
+
// create backup file
f, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index 11af143f22..e0a3138c38 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -487,6 +487,7 @@ type FuncInfo struct {
OpenCodedDeferInfo *LSym
ArgInfo *LSym // argument info for traceback
ArgLiveInfo *LSym // argument liveness info for traceback
+ WrapInfo *LSym // for wrapper, info of wrapped function
FuncInfoSym *LSym
}
diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go
index fa616691eb..560e8e24c4 100644
--- a/src/cmd/internal/obj/objfile.go
+++ b/src/cmd/internal/obj/objfile.go
@@ -417,6 +417,7 @@ func contentHashSection(s *LSym) byte {
strings.HasSuffix(name, ".arginfo0") ||
strings.HasSuffix(name, ".arginfo1") ||
strings.HasSuffix(name, ".argliveinfo") ||
+ strings.HasSuffix(name, ".wrapinfo") ||
strings.HasSuffix(name, ".args_stackmap") ||
strings.HasSuffix(name, ".stkobj") {
return 'F' // go.func.* or go.funcrel.*
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 31fbb7f7bf..70ce9050b6 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -2552,7 +2552,13 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
case AROTLW:
o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
default:
- o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
+ // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
+ // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
+ o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
+ } else {
+ o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ }
}
case 7: /* mov r, soreg ==> stw o(r) */
diff --git a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go
index 68d9589bf2..843398d3b0 100644
--- a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go
+++ b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build riscv64
// +build riscv64
package testbranch
diff --git a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s
index cce296feb5..d7141e38c1 100644
--- a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s
+++ b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build riscv64
// +build riscv64
#include "textflag.h"
diff --git a/src/cmd/internal/objabi/funcdata.go b/src/cmd/internal/objabi/funcdata.go
index 4d49a8d548..05a1d49dec 100644
--- a/src/cmd/internal/objabi/funcdata.go
+++ b/src/cmd/internal/objabi/funcdata.go
@@ -23,6 +23,7 @@ const (
FUNCDATA_OpenCodedDeferInfo = 4
FUNCDATA_ArgInfo = 5
FUNCDATA_ArgLiveInfo = 6
+ FUNCDATA_WrapInfo = 7
// ArgsSizeUnknown is set in Func.argsize to mark all functions
// whose argument size is unknown (C vararg functions, and
diff --git a/src/cmd/internal/src/pos.go b/src/cmd/internal/src/pos.go
index b6816a56e0..373a22a7f2 100644
--- a/src/cmd/internal/src/pos.go
+++ b/src/cmd/internal/src/pos.go
@@ -389,9 +389,12 @@ func makeBogusLico() lico {
}
func makeLico(line, col uint) lico {
- if line > lineMax {
+ if line >= lineMax {
// cannot represent line, use max. line so we have some information
line = lineMax
+ // Drop column information if line number saturates.
+ // Ensures line+col is monotonic. See issue 51193.
+ col = 0
}
if col > colMax {
// cannot represent column, use max. column so we have some information
diff --git a/src/cmd/internal/src/pos_test.go b/src/cmd/internal/src/pos_test.go
index d4cd0e7ff1..cdf4ab4081 100644
--- a/src/cmd/internal/src/pos_test.go
+++ b/src/cmd/internal/src/pos_test.go
@@ -140,8 +140,8 @@ func TestLico(t *testing.T) {
{makeLico(1, 0), ":1", 1, 0},
{makeLico(1, 1), ":1:1", 1, 1},
{makeLico(2, 3), ":2:3", 2, 3},
- {makeLico(lineMax, 1), fmt.Sprintf(":%d:1", lineMax), lineMax, 1},
- {makeLico(lineMax+1, 1), fmt.Sprintf(":%d:1", lineMax), lineMax, 1}, // line too large, stick with max. line
+ {makeLico(lineMax, 1), fmt.Sprintf(":%d", lineMax), lineMax, 1},
+ {makeLico(lineMax+1, 1), fmt.Sprintf(":%d", lineMax), lineMax, 1}, // line too large, stick with max. line
{makeLico(1, colMax), ":1", 1, colMax},
{makeLico(1, colMax+1), ":1", 1, 0}, // column too large
{makeLico(lineMax+1, colMax+1), fmt.Sprintf(":%d", lineMax), lineMax, 0},
@@ -170,8 +170,8 @@ func TestIsStmt(t *testing.T) {
{makeLico(1, 1), ":1:1" + def, 1, 1},
{makeLico(1, 1).withIsStmt(), ":1:1" + is, 1, 1},
{makeLico(1, 1).withNotStmt(), ":1:1" + not, 1, 1},
- {makeLico(lineMax, 1), fmt.Sprintf(":%d:1", lineMax) + def, lineMax, 1},
- {makeLico(lineMax+1, 1), fmt.Sprintf(":%d:1", lineMax) + def, lineMax, 1}, // line too large, stick with max. line
+ {makeLico(lineMax, 1), fmt.Sprintf(":%d", lineMax) + def, lineMax, 1},
+ {makeLico(lineMax+1, 1), fmt.Sprintf(":%d", lineMax) + def, lineMax, 1}, // line too large, stick with max. line
{makeLico(1, colMax), ":1" + def, 1, colMax},
{makeLico(1, colMax+1), ":1" + def, 1, 0}, // column too large
{makeLico(lineMax+1, colMax+1), fmt.Sprintf(":%d", lineMax) + def, lineMax, 0},
@@ -214,9 +214,9 @@ func TestLogue(t *testing.T) {
{makeLico(1, 1).withXlogue(PosPrologueEnd), ":1:1" + defs + pro, 1, 1},
{makeLico(1, 1).withXlogue(PosEpilogueBegin), ":1:1" + defs + epi, 1, 1},
- {makeLico(lineMax, 1).withXlogue(PosDefaultLogue), fmt.Sprintf(":%d:1", lineMax) + defs + defp, lineMax, 1},
- {makeLico(lineMax, 1).withXlogue(PosPrologueEnd), fmt.Sprintf(":%d:1", lineMax) + defs + pro, lineMax, 1},
- {makeLico(lineMax, 1).withXlogue(PosEpilogueBegin), fmt.Sprintf(":%d:1", lineMax) + defs + epi, lineMax, 1},
+ {makeLico(lineMax, 1).withXlogue(PosDefaultLogue), fmt.Sprintf(":%d", lineMax) + defs + defp, lineMax, 1},
+ {makeLico(lineMax, 1).withXlogue(PosPrologueEnd), fmt.Sprintf(":%d", lineMax) + defs + pro, lineMax, 1},
+ {makeLico(lineMax, 1).withXlogue(PosEpilogueBegin), fmt.Sprintf(":%d", lineMax) + defs + epi, lineMax, 1},
} {
x := test.x
if got := formatstr("", x.Line(), x.Col(), true) + fmt.Sprintf(":%d:%d", x.IsStmt(), x.Xlogue()); got != test.string {
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 47b4921cd8..4aaed7baf0 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -471,6 +471,11 @@ func (d *dwctxt) dotypedef(parent *dwarf.DWDie, name string, def *dwarf.DWDie) *
if strings.HasPrefix(name, "struct {") {
return nil
}
+ // cmd/compile uses "noalg.struct {...}" as type name when hash and eq algorithm generation of
+ // this struct type is suppressed.
+ if strings.HasPrefix(name, "noalg.struct {") {
+ return nil
+ }
if strings.HasPrefix(name, "chan ") {
return nil
}
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index 720c03afd2..39066da286 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -567,6 +567,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind {
strings.HasSuffix(name, ".arginfo0"),
strings.HasSuffix(name, ".arginfo1"),
strings.HasSuffix(name, ".argliveinfo"),
+ strings.HasSuffix(name, ".wrapinfo"),
strings.HasSuffix(name, ".args_stackmap"),
strings.HasSuffix(name, ".stkobj"):
ldr.SetAttrNotInSymbolTable(s, true)
diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
index 924bce10b7..aa6780f847 100644
--- a/src/compress/gzip/gunzip.go
+++ b/src/compress/gzip/gunzip.go
@@ -211,14 +211,14 @@ func (z *Reader) readHeader() (hdr Header, err error) {
var s string
if flg&flagName != 0 {
if s, err = z.readString(); err != nil {
- return hdr, err
+ return hdr, noEOF(err)
}
hdr.Name = s
}
if flg&flagComment != 0 {
if s, err = z.readString(); err != nil {
- return hdr, err
+ return hdr, noEOF(err)
}
hdr.Comment = s
}
diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
index 17c23e8a9b..be69185463 100644
--- a/src/compress/gzip/gunzip_test.go
+++ b/src/compress/gzip/gunzip_test.go
@@ -359,6 +359,38 @@ var gunzipTests = []gunzipTest{
},
io.ErrUnexpectedEOF,
},
+ {
+ "hello.txt",
+ "gzip header with truncated name",
+ "hello world\n",
+ []byte{
+ 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a,
+ 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
+ 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
+ 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
+ 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
+ 0x00, 0x00,
+ 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0x01,
+ },
+ io.ErrUnexpectedEOF,
+ },
+ {
+ "",
+ "gzip header with truncated comment",
+ "hello world\n",
+ []byte{
+ 0x1f, 0x8b, 0x08, 0x10, 0xc8, 0x58, 0x13, 0x4a,
+ 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e,
+ 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9,
+ 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1,
+ 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
+ 0x00, 0x00,
+ 0x1f, 0x8b, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0x01,
+ },
+ io.ErrUnexpectedEOF,
+ },
}
func TestDecompressor(t *testing.T) {
@@ -495,23 +527,45 @@ func TestNilStream(t *testing.T) {
}
func TestTruncatedStreams(t *testing.T) {
- const data = "\x1f\x8b\b\x04\x00\tn\x88\x00\xff\a\x00foo bar\xcbH\xcd\xc9\xc9\xd7Q(\xcf/\xcaI\x01\x04:r\xab\xff\f\x00\x00\x00"
+ cases := []struct {
+ name string
+ data []byte
+ }{
+ {
+ name: "original",
+ data: []byte("\x1f\x8b\b\x04\x00\tn\x88\x00\xff\a\x00foo bar\xcbH\xcd\xc9\xc9\xd7Q(\xcf/\xcaI\x01\x04:r\xab\xff\f\x00\x00\x00"),
+ },
+ {
+ name: "truncated name",
+ data: []byte{
+ 0x1f, 0x8b, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01,
+ },
+ },
+ {
+ name: "truncated comment",
+ data: []byte{
+ 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01,
+ },
+ },
+ }
// Intentionally iterate starting with at least one byte in the stream.
- for i := 1; i < len(data)-1; i++ {
- r, err := NewReader(strings.NewReader(data[:i]))
- if err != nil {
+ for _, tc := range cases {
+ for i := 1; i < len(tc.data); i++ {
+ r, err := NewReader(strings.NewReader(string(tc.data[:i])))
+ if err != nil {
+ if err != io.ErrUnexpectedEOF {
+ t.Errorf("NewReader(%s-%d) on truncated stream: got %v, want %v", tc.name, i, err, io.ErrUnexpectedEOF)
+ }
+ continue
+ }
+ _, err = io.Copy(io.Discard, r)
+ if ferr, ok := err.(*flate.ReadError); ok {
+ err = ferr.Err
+ }
if err != io.ErrUnexpectedEOF {
- t.Errorf("NewReader(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF)
+ t.Errorf("io.Copy(%s-%d) on truncated stream: got %v, want %v", tc.name, i, err, io.ErrUnexpectedEOF)
}
- continue
- }
- _, err = io.Copy(io.Discard, r)
- if ferr, ok := err.(*flate.ReadError); ok {
- err = ferr.Err
- }
- if err != io.ErrUnexpectedEOF {
- t.Errorf("io.Copy(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF)
}
}
}
diff --git a/src/crypto/aes/asm_amd64.s b/src/crypto/aes/asm_amd64.s
index ad871ec5de..ed831bf47f 100644
--- a/src/crypto/aes/asm_amd64.s
+++ b/src/crypto/aes/asm_amd64.s
@@ -15,7 +15,7 @@ TEXT ·encryptBlockAsm(SB),NOSPLIT,$0
ADDQ $16, AX
PXOR X1, X0
SUBQ $12, CX
- JE Lenc196
+ JE Lenc192
JB Lenc128
Lenc256:
MOVUPS 0(AX), X1
@@ -23,7 +23,7 @@ Lenc256:
MOVUPS 16(AX), X1
AESENC X1, X0
ADDQ $32, AX
-Lenc196:
+Lenc192:
MOVUPS 0(AX), X1
AESENC X1, X0
MOVUPS 16(AX), X1
@@ -64,7 +64,7 @@ TEXT ·decryptBlockAsm(SB),NOSPLIT,$0
ADDQ $16, AX
PXOR X1, X0
SUBQ $12, CX
- JE Ldec196
+ JE Ldec192
JB Ldec128
Ldec256:
MOVUPS 0(AX), X1
@@ -72,7 +72,7 @@ Ldec256:
MOVUPS 16(AX), X1
AESDEC X1, X0
ADDQ $32, AX
-Ldec196:
+Ldec192:
MOVUPS 0(AX), X1
AESDEC X1, X0
MOVUPS 16(AX), X1
@@ -115,7 +115,7 @@ TEXT ·expandKeyAsm(SB),NOSPLIT,$0
ADDQ $16, BX
PXOR X4, X4 // _expand_key_* expect X4 to be zero
CMPL CX, $12
- JE Lexp_enc196
+ JE Lexp_enc192
JB Lexp_enc128
Lexp_enc256:
MOVUPS 16(AX), X2
@@ -148,7 +148,7 @@ Lexp_enc256:
AESKEYGENASSIST $0x40, X2, X1
CALL _expand_key_256a<>(SB)
JMP Lexp_dec
-Lexp_enc196:
+Lexp_enc192:
MOVQ 16(AX), X2
AESKEYGENASSIST $0x01, X2, X1
CALL _expand_key_192a<>(SB)
diff --git a/src/crypto/aes/asm_ppc64le.s b/src/crypto/aes/asm_ppc64le.s
index f3a96a3a17..5eae675322 100644
--- a/src/crypto/aes/asm_ppc64le.s
+++ b/src/crypto/aes/asm_ppc64le.s
@@ -498,3 +498,228 @@ loop_dec:
RET // blr
+// Remove defines from above so they can be defined here
+#undef INP
+#undef OUT
+#undef ROUNDS
+#undef KEY
+#undef TMP
+#undef OUTPERM
+#undef OUTMASK
+#undef OUTHEAD
+#undef OUTTAIL
+
+// CBC encrypt or decrypt
+// R3 src
+// R4 dst
+// R5 len
+// R6 key
+// R7 iv
+// R8 enc=1 dec=0
+// Ported from: aes_p8_cbc_encrypt
+// Register usage:
+// R9: ROUNDS
+// R10: Index
+// V0: initialized to 0
+// V3: initialized to mask
+// V4: IV
+// V5: SRC
+// V6: IV perm mask
+// V7: DST
+// V10: KEY perm mask
+
+#define INP R3
+#define OUT R4
+#define LEN R5
+#define KEY R6
+#define IVP R7
+#define ENC R8
+#define ROUNDS R9
+#define IDX R10
+
+#define RNDKEY0 V0
+#define RNDKEY1 V1
+#define INOUT V2
+#define TMP V3
+
+#define IVEC V4
+#define INPTAIL V5
+#define INPPERM V6
+#define OUTHEAD V7
+#define OUTPERM V8
+#define OUTMASK V9
+#define KEYPERM V10
+
+// Vector loads are done using LVX followed by
+// a VPERM using mask generated from previous
+// LVSL or LVSR instruction, to obtain the correct
+// bytes if address is unaligned.
+
+// Encryption is done with VCIPHER and VCIPHERLAST
+// Decryption is done with VNCIPHER and VNCIPHERLAST
+
+// Encrypt and decypt is done as follows:
+// - INOUT value is initialized in outer loop.
+// - ROUNDS value is adjusted for loop unrolling.
+// - Encryption/decryption is done in loop based on
+// adjusted ROUNDS value.
+// - Final INOUT value is encrypted/decrypted and stored.
+
+// Note: original implementation had an 8X version
+// for decryption which was omitted to avoid the
+// complexity.
+
+TEXT ·cryptBlocksChain(SB), NOSPLIT|NOFRAME, $0
+ MOVD src+0(FP), INP
+ MOVD dst+8(FP), OUT
+ MOVD length+16(FP), LEN
+ MOVD key+24(FP), KEY
+ MOVD iv+32(FP), IVP
+ MOVD enc+40(FP), ENC
+
+ CMPU LEN, $16 // cmpldi r5,16
+ BC 14, 0, LR // bltlr-
+ CMPW ENC, $0 // cmpwi r8,0
+ MOVD $15, IDX // li r10,15
+ VXOR RNDKEY0, RNDKEY0, RNDKEY0 // vxor v0,v0,v0
+ VSPLTISB $0xf, TMP // vspltisb $0xf,v3
+
+ LVX (IVP)(R0), IVEC // lvx v4,r0,r7
+ LVSL (IVP)(R0), INPPERM // lvsl v6,r0,r7
+ LVX (IVP)(IDX), INPTAIL // lvx v5,r10,r7
+ VXOR INPPERM, TMP, INPPERM // vxor v3, v6, v6
+ VPERM IVEC, INPTAIL, INPPERM, IVEC // vperm v4,v4,v5,v6
+ NEG INP, R11 // neg r11,r3
+ LVSR (KEY)(R0), KEYPERM // lvsr v10,r0,r6
+ MOVWZ 240(KEY), ROUNDS // lwz r9,240(r6)
+ LVSR (R11)(R0), V6 // lvsr v6,r0,r11
+ LVX (INP)(R0), INPTAIL // lvx v5,r0,r3
+ ADD $15, INP // addi r3,r3,15
+ VXOR INPPERM, TMP, INPPERM // vxor v6, v3, v6
+ LVSL (OUT)(R0), OUTPERM // lvsl v8,r0,r4
+ VSPLTISB $-1, OUTMASK // vspltisb v9,-1
+ LVX (OUT)(R0), OUTHEAD // lvx v7,r0,r4
+ VPERM OUTMASK, RNDKEY0, OUTPERM, OUTMASK // vperm v9,v9,v0,v8
+ VXOR OUTPERM, TMP, OUTPERM // vxor v8, v3, v8
+ SRW $1, ROUNDS // rlwinm r9,r9,31,1,31
+
+ MOVD $16, IDX // li r10,16
+ ADD $-1, ROUNDS // addi r9,r9,-1
+ BEQ Lcbc_dec // beq
+ PCALIGN $16
+
+ // Outer loop: initialize encrypted value (INOUT)
+ // Load input (INPTAIL) ivec (IVEC)
+Lcbc_enc:
+ VOR INPTAIL, INPTAIL, INOUT // vor v2,v5,v5
+ LVX (INP)(R0), INPTAIL // lvx v5,r0,r3
+ ADD $16, INP // addi r3,r3,16
+ MOVD ROUNDS, CTR // mtctr r9
+ ADD $-16, LEN // addi r5,r5,-16
+ LVX (KEY)(R0), RNDKEY0 // lvx v0,r0,r6
+ VPERM INOUT, INPTAIL, INPPERM, INOUT // vperm v2,v2,v5,v6
+ LVX (KEY)(IDX), RNDKEY1 // lvx v1,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ VPERM RNDKEY1, RNDKEY0, KEYPERM, RNDKEY0 // vperm v0,v1,v0,v10
+ VXOR INOUT, RNDKEY0, INOUT // vxor v2,v2,v0
+ LVX (KEY)(IDX), RNDKEY0 // lvx v0,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ VXOR INOUT, IVEC, INOUT // vxor v2,v2,v4
+
+ // Encryption loop of INOUT using RNDKEY0 and RNDKEY1
+Loop_cbc_enc:
+ VPERM RNDKEY0, RNDKEY1, KEYPERM, RNDKEY1 // vperm v1,v1,v0,v10
+ VCIPHER INOUT, RNDKEY1, INOUT // vcipher v2,v2,v1
+ LVX (KEY)(IDX), RNDKEY1 // lvx v1,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ VPERM RNDKEY1, RNDKEY0, KEYPERM, RNDKEY0 // vperm v0,v0,v1,v10
+ VCIPHER INOUT, RNDKEY0, INOUT // vcipher v2,v2,v0
+ LVX (KEY)(IDX), RNDKEY0 // lvx v0,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ BC 16, 0, Loop_cbc_enc // bdnz Loop_cbc_enc
+
+ // Encrypt tail values and store INOUT
+ VPERM RNDKEY0, RNDKEY1, KEYPERM, RNDKEY1 // vperm v1,v1,v0,v10
+ VCIPHER INOUT, RNDKEY1, INOUT // vcipher v2,v2,v1
+ LVX (KEY)(IDX), RNDKEY1 // lvx v1,r10,r6
+ MOVD $16, IDX // li r10,16
+ VPERM RNDKEY1, RNDKEY0, KEYPERM, RNDKEY0 // vperm v0,v0,v1,v10
+ VCIPHERLAST INOUT, RNDKEY0, IVEC // vcipherlast v4,v2,v0
+ CMPU LEN, $16 // cmpldi r5,16
+ VPERM IVEC, IVEC, OUTPERM, TMP // vperm v3,v4,v4,v8
+ VSEL OUTHEAD, TMP, OUTMASK, INOUT // vsel v2,v7,v3,v9
+ VOR TMP, TMP, OUTHEAD // vor v7,v3,v3
+ STVX INOUT, (OUT)(R0) // stvx v2,r0,r4
+ ADD $16, OUT // addi r4,r4,16
+ BGE Lcbc_enc // bge Lcbc_enc
+ BR Lcbc_done // b Lcbc_done
+
+ // Outer loop: initialize decrypted value (INOUT)
+ // Load input (INPTAIL) ivec (IVEC)
+Lcbc_dec:
+ VOR INPTAIL, INPTAIL, TMP // vor v3,v5,v5
+ LVX (INP)(R0), INPTAIL // lvx v5,r0,r3
+ ADD $16, INP // addi r3,r3,16
+ MOVD ROUNDS, CTR // mtctr r9
+ ADD $-16, LEN // addi r5,r5,-16
+ LVX (KEY)(R0), RNDKEY0 // lvx v0,r0,r6
+ VPERM TMP, INPTAIL, INPPERM, TMP // vperm v3,v3,v5,v6
+ LVX (KEY)(IDX), RNDKEY1 // lvx v1,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ VPERM RNDKEY1, RNDKEY0, KEYPERM, RNDKEY0 // vperm v0,v1,v0,v10
+ VXOR TMP, RNDKEY0, INOUT // vxor v2,v3,v0
+ LVX (KEY)(IDX), RNDKEY0 // lvx v0,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ PCALIGN $16
+
+ // Decryption loop of INOUT using RNDKEY0 and RNDKEY1
+Loop_cbc_dec:
+ VPERM RNDKEY0, RNDKEY1, KEYPERM, RNDKEY1 // vperm v1,v0,v1,v10
+ VNCIPHER INOUT, RNDKEY1, INOUT // vncipher v2,v2,v1
+ LVX (KEY)(IDX), RNDKEY1 // lvx v1,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ VPERM RNDKEY1, RNDKEY0, KEYPERM, RNDKEY0 // vperm v0,v1,v0,v10
+ VNCIPHER INOUT, RNDKEY0, INOUT // vncipher v2,v2,v0
+ LVX (KEY)(IDX), RNDKEY0 // lvx v0,r10,r6
+ ADD $16, IDX // addi r10,r10,16
+ BC 16, 0, Loop_cbc_dec // bdnz
+
+ // Decrypt tail values and store INOUT
+ VPERM RNDKEY0, RNDKEY1, KEYPERM, RNDKEY1 // vperm v1,v0,v1,v10
+ VNCIPHER INOUT, RNDKEY1, INOUT // vncipher v2,v2,v1
+ LVX (KEY)(IDX), RNDKEY1 // lvx v1,r10,r6
+ MOVD $16, IDX // li r10,16
+ VPERM RNDKEY1, RNDKEY0, KEYPERM, RNDKEY0 // vperm v0,v1,v0,v10
+ VNCIPHERLAST INOUT, RNDKEY0, INOUT // vncipherlast v2,v2,v0
+ CMPU LEN, $16 // cmpldi r5,16
+ VXOR INOUT, IVEC, INOUT // vxor v2,v2,v4
+ VOR TMP, TMP, IVEC // vor v4,v3,v3
+ VPERM INOUT, INOUT, OUTPERM, TMP // vperm v3,v2,v2,v8
+ VSEL OUTHEAD, TMP, OUTMASK, INOUT // vsel v2,v7,v3,v9
+ VOR TMP, TMP, OUTHEAD // vor v7,v3,v3
+ STVX INOUT, (OUT)(R0) // stvx v2,r0,r4
+ ADD $16, OUT // addi r4,r4,16
+ BGE Lcbc_dec // bge
+
+Lcbc_done:
+ ADD $-1, OUT // addi r4,r4,-1
+ LVX (OUT)(R0), INOUT // lvx v2,r0,r4
+ VSEL OUTHEAD, INOUT, OUTMASK, INOUT // vsel v2,v7,v2,v9
+ STVX INOUT, (OUT)(R0) // stvx v2,r0,r4
+ NEG IVP, ENC // neg r8,r7
+ MOVD $15, IDX // li r10,15
+ VXOR RNDKEY0, RNDKEY0, RNDKEY0 // vxor v0,v0,v0
+ VSPLTISB $-1, OUTMASK // vspltisb v9,-1
+ VSPLTISB $0xf, TMP // vspltisb v3, 0xf
+ LVSR (ENC)(R0), OUTPERM // lvsl v8,r0,r8
+ VPERM OUTMASK, RNDKEY0, OUTPERM, OUTMASK // vperm v9,v9,v0,v8
+ VXOR OUTPERM, TMP, OUTPERM // vxor v9, v3, v9
+ LVX (IVP)(R0), OUTHEAD // lvx v7,r0,r7
+ VPERM IVEC, IVEC, OUTPERM, IVEC // vperm v4,v4,v4,v8
+ VSEL OUTHEAD, IVEC, OUTMASK, INOUT // vsel v2,v7,v4,v9
+ LVX (IVP)(IDX), INPTAIL // lvx v5,r10,r7
+ STVX INOUT, (IVP)(R0) // stvx v2,r0,r7
+ VSEL IVEC, INPTAIL, OUTMASK, INOUT // vsel v2,v4,v5,v9
+ STVX INOUT, (IVP)(IDX) // stvx v2,r10,r7
+ RET // bclr 20,lt,0
+
diff --git a/src/crypto/aes/cbc_ppc64le.go b/src/crypto/aes/cbc_ppc64le.go
new file mode 100644
index 0000000000..fa8a430ed4
--- /dev/null
+++ b/src/crypto/aes/cbc_ppc64le.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/subtle"
+)
+
+// Assert that aesCipherAsm implements the cbcEncAble and cbcDecAble interfaces.
+var _ cbcEncAble = (*aesCipherAsm)(nil)
+var _ cbcDecAble = (*aesCipherAsm)(nil)
+
+const cbcEncrypt = 1
+const cbcDecrypt = 0
+
+type cbc struct {
+ b *aesCipherAsm
+ enc int
+ iv [BlockSize]byte
+}
+
+func (b *aesCipherAsm) NewCBCEncrypter(iv []byte) cipher.BlockMode {
+ var c cbc
+ c.b = b
+ c.enc = cbcEncrypt
+ copy(c.iv[:], iv)
+ return &c
+}
+
+func (b *aesCipherAsm) NewCBCDecrypter(iv []byte) cipher.BlockMode {
+ var c cbc
+ c.b = b
+ c.enc = cbcDecrypt
+ copy(c.iv[:], iv)
+ return &c
+}
+
+func (x *cbc) BlockSize() int { return BlockSize }
+
+// cryptBlocksChain invokes the cipher message identifying encrypt or decrypt.
+//go:noescape
+func cryptBlocksChain(src, dst *byte, length int, key *uint32, iv *byte, enc int)
+
+func (x *cbc) CryptBlocks(dst, src []byte) {
+ if len(src)%BlockSize != 0 {
+ panic("crypto/cipher: input not full blocks")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if subtle.InexactOverlap(dst[:len(src)], src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src) > 0 {
+ if x.enc == cbcEncrypt {
+ cryptBlocksChain(&src[0], &dst[0], len(src), &x.b.enc[0], &x.iv[0], x.enc)
+ } else {
+ cryptBlocksChain(&src[0], &dst[0], len(src), &x.b.dec[0], &x.iv[0], x.enc)
+ }
+ }
+}
+
+func (x *cbc) SetIV(iv []byte) {
+ if len(iv) != BlockSize {
+ panic("cipher: incorrect length IV")
+ }
+ copy(x.iv[:], iv)
+}
diff --git a/src/crypto/cipher/cbc.go b/src/crypto/cipher/cbc.go
index 0d07192e29..a719b61e24 100644
--- a/src/crypto/cipher/cbc.go
+++ b/src/crypto/cipher/cbc.go
@@ -52,6 +52,17 @@ func NewCBCEncrypter(b Block, iv []byte) BlockMode {
return (*cbcEncrypter)(newCBC(b, iv))
}
+// newCBCGenericEncrypter returns a BlockMode which encrypts in cipher block chaining
+// mode, using the given Block. The length of iv must be the same as the
+// Block's block size. This always returns the generic non-asm encrypter for use
+// in fuzz testing.
+func newCBCGenericEncrypter(b Block, iv []byte) BlockMode {
+ if len(iv) != b.BlockSize() {
+ panic("cipher.NewCBCEncrypter: IV length must equal block size")
+ }
+ return (*cbcEncrypter)(newCBC(b, iv))
+}
+
func (x *cbcEncrypter) BlockSize() int { return x.blockSize }
func (x *cbcEncrypter) CryptBlocks(dst, src []byte) {
@@ -112,6 +123,17 @@ func NewCBCDecrypter(b Block, iv []byte) BlockMode {
return (*cbcDecrypter)(newCBC(b, iv))
}
+// newCBCGenericDecrypter returns a BlockMode which encrypts in cipher block chaining
+// mode, using the given Block. The length of iv must be the same as the
+// Block's block size. This always returns the generic non-asm decrypter for use in
+// fuzz testing.
+func newCBCGenericDecrypter(b Block, iv []byte) BlockMode {
+ if len(iv) != b.BlockSize() {
+ panic("cipher.NewCBCDecrypter: IV length must equal block size")
+ }
+ return (*cbcDecrypter)(newCBC(b, iv))
+}
+
func (x *cbcDecrypter) BlockSize() int { return x.blockSize }
func (x *cbcDecrypter) CryptBlocks(dst, src []byte) {
diff --git a/src/crypto/cipher/export_test.go b/src/crypto/cipher/export_test.go
index cf8007ab49..beb9bf5d23 100644
--- a/src/crypto/cipher/export_test.go
+++ b/src/crypto/cipher/export_test.go
@@ -6,3 +6,5 @@ package cipher
// Export internal functions for testing.
var XorBytes = xorBytes
+var NewCBCGenericEncrypter = newCBCGenericEncrypter
+var NewCBCGenericDecrypter = newCBCGenericDecrypter
diff --git a/src/crypto/cipher/fuzz_test.go b/src/crypto/cipher/fuzz_test.go
new file mode 100644
index 0000000000..ffceeef5f5
--- /dev/null
+++ b/src/crypto/cipher/fuzz_test.go
@@ -0,0 +1,103 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le
+
+package cipher_test
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "testing"
+ "time"
+)
+
+var cbcAESFuzzTests = []struct {
+ name string
+ key []byte
+}{
+ {
+ "CBC-AES128",
+ commonKey128,
+ },
+ {
+ "CBC-AES192",
+ commonKey192,
+ },
+ {
+ "CBC-AES256",
+ commonKey256,
+ },
+}
+
+var timeout *time.Timer
+
+const datalen = 1024
+
+func TestFuzz(t *testing.T) {
+
+ for _, ft := range cbcAESFuzzTests {
+ c, _ := aes.NewCipher(ft.key)
+
+ cbcAsm := cipher.NewCBCEncrypter(c, commonIV)
+ cbcGeneric := cipher.NewCBCGenericEncrypter(c, commonIV)
+
+ if testing.Short() {
+ timeout = time.NewTimer(10 * time.Millisecond)
+ } else {
+ timeout = time.NewTimer(2 * time.Second)
+ }
+
+ indata := make([]byte, datalen)
+ outgeneric := make([]byte, datalen)
+ outdata := make([]byte, datalen)
+
+ fuzzencrypt:
+ for {
+ select {
+ case <-timeout.C:
+ break fuzzencrypt
+ default:
+ }
+
+ rand.Read(indata[:])
+
+ cbcGeneric.CryptBlocks(indata, outgeneric)
+ cbcAsm.CryptBlocks(indata, outdata)
+
+ if !bytes.Equal(outdata, outgeneric) {
+ t.Fatalf("AES-CBC encryption does not match reference result: %x and %x, please report this error to security@golang.org", outdata, outgeneric)
+ }
+ }
+
+ cbcAsm = cipher.NewCBCDecrypter(c, commonIV)
+ cbcGeneric = cipher.NewCBCGenericDecrypter(c, commonIV)
+
+ if testing.Short() {
+ timeout = time.NewTimer(10 * time.Millisecond)
+ } else {
+ timeout = time.NewTimer(2 * time.Second)
+ }
+
+ fuzzdecrypt:
+ for {
+ select {
+ case <-timeout.C:
+ break fuzzdecrypt
+ default:
+ }
+
+ rand.Read(indata[:])
+
+ cbcGeneric.CryptBlocks(indata, outgeneric)
+ cbcAsm.CryptBlocks(indata, outdata)
+
+ if !bytes.Equal(outdata, outgeneric) {
+ t.Fatalf("AES-CBC decryption does not match reference result: %x and %x, please report this error to security@golang.org", outdata, outgeneric)
+ }
+ }
+ }
+}
diff --git a/src/crypto/ed25519/ed25519.go b/src/crypto/ed25519/ed25519.go
index 09c5269d0c..4669e02db2 100644
--- a/src/crypto/ed25519/ed25519.go
+++ b/src/crypto/ed25519/ed25519.go
@@ -126,7 +126,10 @@ func newKeyFromSeed(privateKey, seed []byte) {
}
h := sha512.Sum512(seed)
- s := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
A := (&edwards25519.Point{}).ScalarBaseMult(s)
publicKey := A.Bytes()
@@ -152,7 +155,10 @@ func sign(signature, privateKey, message []byte) {
seed, publicKey := privateKey[:SeedSize], privateKey[SeedSize:]
h := sha512.Sum512(seed)
- s := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
prefix := h[32:]
mh := sha512.New()
@@ -160,7 +166,10 @@ func sign(signature, privateKey, message []byte) {
mh.Write(message)
messageDigest := make([]byte, 0, sha512.Size)
messageDigest = mh.Sum(messageDigest)
- r := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
R := (&edwards25519.Point{}).ScalarBaseMult(r)
@@ -170,7 +179,10 @@ func sign(signature, privateKey, message []byte) {
kh.Write(message)
hramDigest := make([]byte, 0, sha512.Size)
hramDigest = kh.Sum(hramDigest)
- k := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
S := edwards25519.NewScalar().MultiplyAdd(k, s, r)
@@ -200,7 +212,10 @@ func Verify(publicKey PublicKey, message, sig []byte) bool {
kh.Write(message)
hramDigest := make([]byte, 0, sha512.Size)
hramDigest = kh.Sum(hramDigest)
- k := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
S, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:])
if err != nil {
diff --git a/src/crypto/ed25519/internal/edwards25519/edwards25519.go b/src/crypto/ed25519/internal/edwards25519/edwards25519.go
index 313e6c281c..4e0ad7a357 100644
--- a/src/crypto/ed25519/internal/edwards25519/edwards25519.go
+++ b/src/crypto/ed25519/internal/edwards25519/edwards25519.go
@@ -151,10 +151,10 @@ func (v *Point) SetBytes(x []byte) (*Point, error) {
// at https://hdevalence.ca/blog/2020-10-04-its-25519am, specifically the
// "Canonical A, R" section.
- if len(x) != 32 {
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
return nil, errors.New("edwards25519: invalid point encoding length")
}
- y := new(field.Element).SetBytes(x)
// -x² + y² = 1 + dx²y²
// x² + dx²y² = x²(dy² + 1) = y² - 1
@@ -224,7 +224,7 @@ func (v *Point) fromP2(p *projP2) *Point {
}
// d is a constant in the curve equation.
-var d = new(field.Element).SetBytes([]byte{
+var d, _ = new(field.Element).SetBytes([]byte{
0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
diff --git a/src/crypto/ed25519/internal/edwards25519/field/fe.go b/src/crypto/ed25519/internal/edwards25519/field/fe.go
index dbe86599b3..5518ef2b90 100644
--- a/src/crypto/ed25519/internal/edwards25519/field/fe.go
+++ b/src/crypto/ed25519/internal/edwards25519/field/fe.go
@@ -8,6 +8,7 @@ package field
import (
"crypto/subtle"
"encoding/binary"
+ "errors"
"math/bits"
)
@@ -186,14 +187,17 @@ func (v *Element) Set(a *Element) *Element {
return v
}
-// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
//
// Consistent with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032.
-func (v *Element) SetBytes(x []byte) *Element {
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
if len(x) != 32 {
- panic("edwards25519: invalid field element input size")
+ return nil, errors.New("edwards25519: invalid field element input size")
}
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
@@ -208,12 +212,12 @@ func (v *Element) SetBytes(x []byte) *Element {
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
v.l3 &= maskLow51Bits
- // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
// Note: not bytes 25:33, shift 4, to avoid overread.
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
v.l4 &= maskLow51Bits
- return v
+ return v, nil
}
// Bytes returns the canonical 32-byte little-endian encoding of v.
@@ -391,26 +395,26 @@ var sqrtM1 = &Element{1718705420411056, 234908883556509,
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
- var a, b Element
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
// r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := a.Square(v)
- uv3 := b.Multiply(u, b.Multiply(v2, v))
- uv7 := a.Multiply(uv3, a.Square(v2))
- r.Multiply(uv3, r.Pow22523(uv7))
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
- check := a.Multiply(v, a.Square(r)) // check = v * r^2
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
- uNeg := b.Negate(u)
+ uNeg := new(Element).Negate(u)
correctSignSqrt := check.Equal(u)
flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
- rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
- r.Absolute(r) // Choose the nonnegative square root.
+ r.Absolute(rr) // Choose the nonnegative square root.
return r, correctSignSqrt | flippedSignSqrt
}
diff --git a/src/crypto/ed25519/internal/edwards25519/field/fe_alias_test.go b/src/crypto/ed25519/internal/edwards25519/field/fe_alias_test.go
index 5ad81df013..abe9986b88 100644
--- a/src/crypto/ed25519/internal/edwards25519/field/fe_alias_test.go
+++ b/src/crypto/ed25519/internal/edwards25519/field/fe_alias_test.go
@@ -96,19 +96,33 @@ func TestAliasing(t *testing.T) {
{name: "Negate", oneArgF: (*Element).Negate},
{name: "Set", oneArgF: (*Element).Set},
{name: "Square", oneArgF: (*Element).Square},
+ {name: "Pow22523", oneArgF: (*Element).Pow22523},
+ {
+ name: "Mult32",
+ oneArgF: func(v, x *Element) *Element {
+ return v.Mult32(x, 0xffffffff)
+ },
+ },
{name: "Multiply", twoArgsF: (*Element).Multiply},
{name: "Add", twoArgsF: (*Element).Add},
{name: "Subtract", twoArgsF: (*Element).Subtract},
{
+ name: "SqrtRatio",
+ twoArgsF: func(v, x, y *Element) *Element {
+ r, _ := v.SqrtRatio(x, y)
+ return r
+ },
+ },
+ {
name: "Select0",
twoArgsF: func(v, x, y *Element) *Element {
- return (*Element).Select(v, x, y, 0)
+ return v.Select(x, y, 0)
},
},
{
name: "Select1",
twoArgsF: func(v, x, y *Element) *Element {
- return (*Element).Select(v, x, y, 1)
+ return v.Select(x, y, 1)
},
},
} {
diff --git a/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go b/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go
index bccf8511ac..d6667b27be 100644
--- a/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go
+++ b/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go
@@ -254,6 +254,8 @@ func (v *Element) carryPropagateGeneric() *Element {
c3 := v.l3 >> 51
c4 := v.l4 >> 51
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
v.l0 = v.l0&maskLow51Bits + c4*19
v.l1 = v.l1&maskLow51Bits + c0
v.l2 = v.l2&maskLow51Bits + c1
diff --git a/src/crypto/ed25519/internal/edwards25519/field/fe_test.go b/src/crypto/ed25519/internal/edwards25519/field/fe_test.go
index b484459ff2..945a024a41 100644
--- a/src/crypto/ed25519/internal/edwards25519/field/fe_test.go
+++ b/src/crypto/ed25519/internal/edwards25519/field/fe_test.go
@@ -192,7 +192,8 @@ func TestSetBytesRoundTrip(t *testing.T) {
for _, tt := range tests {
b := tt.fe.Bytes()
- if !bytes.Equal(b, tt.b) || new(Element).SetBytes(tt.b).Equal(&tt.fe) != 1 {
+ fe, _ := new(Element).SetBytes(tt.b)
+ if !bytes.Equal(b, tt.b) || fe.Equal(&tt.fe) != 1 {
t.Errorf("Failed fixed roundtrip: %v", tt)
}
}
@@ -217,8 +218,8 @@ func TestBytesBigEquivalence(t *testing.T) {
return false
}
- buf := make([]byte, 32) // pad with zeroes
- copy(buf, swapEndianness(fe1.toBig().Bytes()))
+ buf := make([]byte, 32)
+ buf = swapEndianness(fe1.toBig().FillBytes(buf))
return bytes.Equal(fe.Bytes(), buf) && isInBounds(&fe) && isInBounds(&fe1)
}
@@ -244,7 +245,8 @@ func (v *Element) fromBig(n *big.Int) *Element {
}
}
- return v.SetBytes(buf[:32])
+ v.SetBytes(buf[:32])
+ return v
}
func (v *Element) fromDecimal(s string) *Element {
@@ -471,9 +473,9 @@ func TestSqrtRatio(t *testing.T) {
}
for i, tt := range tests {
- u := new(Element).SetBytes(decodeHex(tt.u))
- v := new(Element).SetBytes(decodeHex(tt.v))
- want := new(Element).SetBytes(decodeHex(tt.r))
+ u, _ := new(Element).SetBytes(decodeHex(tt.u))
+ v, _ := new(Element).SetBytes(decodeHex(tt.v))
+ want, _ := new(Element).SetBytes(decodeHex(tt.r))
got, wasSquare := new(Element).SqrtRatio(u, v)
if got.Equal(want) == 0 || wasSquare != tt.wasSquare {
t.Errorf("%d: got (%v, %v), want (%v, %v)", i, got, wasSquare, want, tt.wasSquare)
diff --git a/src/crypto/ed25519/internal/edwards25519/scalar.go b/src/crypto/ed25519/internal/edwards25519/scalar.go
index 889acaa0f1..3df2fb936f 100644
--- a/src/crypto/ed25519/internal/edwards25519/scalar.go
+++ b/src/crypto/ed25519/internal/edwards25519/scalar.go
@@ -22,7 +22,7 @@ import (
// The zero value is a valid zero element.
type Scalar struct {
// s is the Scalar value in little-endian. The value is always reduced
- // between operations.
+ // modulo l between operations.
s [32]byte
}
@@ -79,16 +79,20 @@ func (s *Scalar) Set(x *Scalar) *Scalar {
return s
}
-// SetUniformBytes sets s to an uniformly distributed value given 64 uniformly
-// distributed random bytes.
-func (s *Scalar) SetUniformBytes(x []byte) *Scalar {
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to an uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
if len(x) != 64 {
- panic("edwards25519: invalid SetUniformBytes input length")
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
}
var wideBytes [64]byte
copy(wideBytes[:], x[:])
scReduce(&s.s, &wideBytes)
- return s
+ return s, nil
}
// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
@@ -122,7 +126,8 @@ func isReduced(s *Scalar) bool {
// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
// Section 5.1.5 (also known as clamping) and sets s to the result. The input
-// must be 32 bytes, and it is not modified.
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
//
// Note that since Scalar values are always reduced modulo the prime order of
// the curve, the resulting value will not preserve any of the cofactor-clearing
@@ -130,13 +135,13 @@ func isReduced(s *Scalar) bool {
// expected as long as it is applied to points on the prime order subgroup, like
// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
// irrelevant RFC 7748 clamping, but it is now required for compatibility.
-func (s *Scalar) SetBytesWithClamping(x []byte) *Scalar {
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
// The description above omits the purpose of the high bits of the clamping
// for brevity, but those are also lost to reductions, and are also
// irrelevant to edwards25519 as they protect against a specific
// implementation bug that was once observed in a generic Montgomery ladder.
if len(x) != 32 {
- panic("edwards25519: invalid SetBytesWithClamping input length")
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
}
var wideBytes [64]byte
copy(wideBytes[:], x[:])
@@ -144,7 +149,7 @@ func (s *Scalar) SetBytesWithClamping(x []byte) *Scalar {
wideBytes[31] &= 63
wideBytes[31] |= 64
scReduce(&s.s, &wideBytes)
- return s
+ return s, nil
}
// Bytes returns the canonical 32-byte little-endian encoding of s.
diff --git a/src/crypto/ed25519/internal/edwards25519/scalar_test.go b/src/crypto/ed25519/internal/edwards25519/scalar_test.go
index 704caffc5c..9d51b34b25 100644
--- a/src/crypto/ed25519/internal/edwards25519/scalar_test.go
+++ b/src/crypto/ed25519/internal/edwards25519/scalar_test.go
@@ -113,7 +113,7 @@ func TestScalarSetBytesWithClamping(t *testing.T) {
// Generated with libsodium.js 1.0.18 crypto_scalarmult_ed25519_base.
random := "633d368491364dc9cd4c1bf891b1d59460face1644813240a313e61f2c88216e"
- s := new(Scalar).SetBytesWithClamping(decodeHex(random))
+ s, _ := new(Scalar).SetBytesWithClamping(decodeHex(random))
p := new(Point).ScalarBaseMult(s)
want := "1d87a9026fd0126a5736fe1628c95dd419172b5b618457e041c9c861b2494a94"
if got := hex.EncodeToString(p.Bytes()); got != want {
@@ -121,7 +121,7 @@ func TestScalarSetBytesWithClamping(t *testing.T) {
}
zero := "0000000000000000000000000000000000000000000000000000000000000000"
- s = new(Scalar).SetBytesWithClamping(decodeHex(zero))
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(zero))
p = new(Point).ScalarBaseMult(s)
want = "693e47972caf527c7883ad1b39822f026f47db2ab0e1919955b8993aa04411d1"
if got := hex.EncodeToString(p.Bytes()); got != want {
@@ -129,7 +129,7 @@ func TestScalarSetBytesWithClamping(t *testing.T) {
}
one := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
- s = new(Scalar).SetBytesWithClamping(decodeHex(one))
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(one))
p = new(Point).ScalarBaseMult(s)
want = "12e9a68b73fd5aacdbcaf3e88c46fea6ebedb1aa84eed1842f07f8edab65e3a7"
if got := hex.EncodeToString(p.Bytes()); got != want {
diff --git a/src/crypto/rand/eagain.go b/src/crypto/rand/eagain.go
deleted file mode 100644
index f018e75931..0000000000
--- a/src/crypto/rand/eagain.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package rand
-
-import (
- "io/fs"
- "syscall"
-)
-
-func init() {
- isEAGAIN = unixIsEAGAIN
-}
-
-// unixIsEAGAIN reports whether err is a syscall.EAGAIN wrapped in a PathError.
-// See golang.org/issue/9205
-func unixIsEAGAIN(err error) bool {
- if pe, ok := err.(*fs.PathError); ok {
- if errno, ok := pe.Err.(syscall.Errno); ok && errno == syscall.EAGAIN {
- return true
- }
- }
- return false
-}
diff --git a/src/crypto/rand/rand_plan9.go b/src/crypto/rand/rand_plan9.go
new file mode 100644
index 0000000000..b81d73ca80
--- /dev/null
+++ b/src/crypto/rand/rand_plan9.go
@@ -0,0 +1,109 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan9 cryptographically secure pseudorandom number
+// generator.
+
+package rand
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "encoding/binary"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+const randomDevice = "/dev/random"
+
+func init() {
+ Reader = &reader{}
+}
+
+// reader is a new pseudorandom generator that seeds itself by
+// reading from /dev/random. The Read method on the returned
+// reader always returns the full amount asked for, or else it
+// returns an error. The generator uses the X9.31 algorithm with
+// AES-128, reseeding after every 1 MB of generated data.
+type reader struct {
+ mu sync.Mutex
+ budget int // number of bytes that can be generated
+ cipher cipher.Block
+ entropy io.Reader
+ entropyUsed int32 // atomic; whether entropy has been used
+ time, seed, dst, key [aes.BlockSize]byte
+}
+
+func warnBlocked() {
+ println("crypto/rand: blocked for 60 seconds waiting to read random data from the kernel")
+}
+
+func (r *reader) readEntropy(b []byte) error {
+ if atomic.CompareAndSwapInt32(&r.entropyUsed, 0, 1) {
+ // First use of randomness. Start timer to warn about
+ // being blocked on entropy not being available.
+ t := time.AfterFunc(time.Minute, warnBlocked)
+ defer t.Stop()
+ }
+ var err error
+ if r.entropy == nil {
+ r.entropy, err = os.Open(randomDevice)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = io.ReadFull(r.entropy, b)
+ return err
+}
+
+func (r *reader) Read(b []byte) (n int, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ n = len(b)
+
+ for len(b) > 0 {
+ if r.budget == 0 {
+ err = r.readEntropy(r.seed[0:])
+ if err != nil {
+ return n - len(b), err
+ }
+ err = r.readEntropy(r.key[0:])
+ if err != nil {
+ return n - len(b), err
+ }
+ r.cipher, err = aes.NewCipher(r.key[0:])
+ if err != nil {
+ return n - len(b), err
+ }
+ r.budget = 1 << 20 // reseed after generating 1MB
+ }
+ r.budget -= aes.BlockSize
+
+ // ANSI X9.31 (== X9.17) algorithm, but using AES in place of 3DES.
+ //
+ // single block:
+ // t = encrypt(time)
+ // dst = encrypt(t^seed)
+ // seed = encrypt(t^dst)
+ ns := time.Now().UnixNano()
+ binary.BigEndian.PutUint64(r.time[:], uint64(ns))
+ r.cipher.Encrypt(r.time[0:], r.time[0:])
+ for i := 0; i < aes.BlockSize; i++ {
+ r.dst[i] = r.time[i] ^ r.seed[i]
+ }
+ r.cipher.Encrypt(r.dst[0:], r.dst[0:])
+ for i := 0; i < aes.BlockSize; i++ {
+ r.seed[i] = r.time[i] ^ r.dst[i]
+ }
+ r.cipher.Encrypt(r.seed[0:], r.seed[0:])
+
+ m := copy(b, r.dst[0:])
+ b = b[m:]
+ }
+
+ return n, nil
+}
diff --git a/src/crypto/rand/rand_unix.go b/src/crypto/rand/rand_unix.go
index 28f2f5b58b..560f195d86 100644
--- a/src/crypto/rand/rand_unix.go
+++ b/src/crypto/rand/rand_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || plan9 || solaris
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// Unix cryptographically secure pseudorandom number
// generator.
@@ -11,14 +11,12 @@ package rand
import (
"bufio"
- "crypto/aes"
- "crypto/cipher"
- "encoding/binary"
+ "errors"
"io"
"os"
- "runtime"
"sync"
"sync/atomic"
+ "syscall"
"time"
)
@@ -26,27 +24,19 @@ import "crypto/internal/boring"
const urandomDevice = "/dev/urandom"
-// Easy implementation: read from /dev/urandom.
-// This is sufficient on Linux, OS X, and FreeBSD.
-
func init() {
if boring.Enabled {
Reader = boring.RandReader
return
}
- if runtime.GOOS == "plan9" {
- Reader = newReader(nil)
- } else {
- Reader = &devReader{name: urandomDevice}
- }
+ Reader = &reader{}
}
-// A devReader satisfies reads by reading the file named name.
-type devReader struct {
- name string
+// A reader satisfies reads by reading from urandomDevice
+type reader struct {
f io.Reader
mu sync.Mutex
- used int32 // atomic; whether this devReader has been used
+ used int32 // atomic; whether this reader has been used
}
// altGetRandom if non-nil specifies an OS-specific function to get
@@ -57,35 +47,29 @@ func warnBlocked() {
println("crypto/rand: blocked for 60 seconds waiting to read random data from the kernel")
}
-func (r *devReader) Read(b []byte) (n int, err error) {
+func (r *reader) Read(b []byte) (n int, err error) {
boring.Unreachable()
if atomic.CompareAndSwapInt32(&r.used, 0, 1) {
// First use of randomness. Start timer to warn about
// being blocked on entropy not being available.
- t := time.AfterFunc(60*time.Second, warnBlocked)
+ t := time.AfterFunc(time.Minute, warnBlocked)
defer t.Stop()
}
- if altGetRandom != nil && r.name == urandomDevice && altGetRandom(b) {
+ if altGetRandom != nil && altGetRandom(b) {
return len(b), nil
}
r.mu.Lock()
defer r.mu.Unlock()
if r.f == nil {
- f, err := os.Open(r.name)
- if f == nil {
+ f, err := os.Open(urandomDevice)
+ if err != nil {
return 0, err
}
- if runtime.GOOS == "plan9" {
- r.f = f
- } else {
- r.f = bufio.NewReader(hideAgainReader{f})
- }
+ r.f = bufio.NewReader(hideAgainReader{f})
}
return r.f.Read(b)
}
-var isEAGAIN func(error) bool // set by eagain.go on unix systems
-
// hideAgainReader masks EAGAIN reads from /dev/urandom.
// See golang.org/issue/9205
type hideAgainReader struct {
@@ -94,84 +78,8 @@ type hideAgainReader struct {
func (hr hideAgainReader) Read(p []byte) (n int, err error) {
n, err = hr.r.Read(p)
- if err != nil && isEAGAIN != nil && isEAGAIN(err) {
+ if errors.Is(err, syscall.EAGAIN) {
err = nil
}
return
}
-
-// Alternate pseudo-random implementation for use on
-// systems without a reliable /dev/urandom.
-
-// newReader returns a new pseudorandom generator that
-// seeds itself by reading from entropy. If entropy == nil,
-// the generator seeds itself by reading from the system's
-// random number generator, typically /dev/random.
-// The Read method on the returned reader always returns
-// the full amount asked for, or else it returns an error.
-//
-// The generator uses the X9.31 algorithm with AES-128,
-// reseeding after every 1 MB of generated data.
-func newReader(entropy io.Reader) io.Reader {
- if entropy == nil {
- entropy = &devReader{name: "/dev/random"}
- }
- return &reader{entropy: entropy}
-}
-
-type reader struct {
- mu sync.Mutex
- budget int // number of bytes that can be generated
- cipher cipher.Block
- entropy io.Reader
- time, seed, dst, key [aes.BlockSize]byte
-}
-
-func (r *reader) Read(b []byte) (n int, err error) {
- boring.Unreachable()
- r.mu.Lock()
- defer r.mu.Unlock()
- n = len(b)
-
- for len(b) > 0 {
- if r.budget == 0 {
- _, err := io.ReadFull(r.entropy, r.seed[0:])
- if err != nil {
- return n - len(b), err
- }
- _, err = io.ReadFull(r.entropy, r.key[0:])
- if err != nil {
- return n - len(b), err
- }
- r.cipher, err = aes.NewCipher(r.key[0:])
- if err != nil {
- return n - len(b), err
- }
- r.budget = 1 << 20 // reseed after generating 1MB
- }
- r.budget -= aes.BlockSize
-
- // ANSI X9.31 (== X9.17) algorithm, but using AES in place of 3DES.
- //
- // single block:
- // t = encrypt(time)
- // dst = encrypt(t^seed)
- // seed = encrypt(t^dst)
- ns := time.Now().UnixNano()
- binary.BigEndian.PutUint64(r.time[:], uint64(ns))
- r.cipher.Encrypt(r.time[0:], r.time[0:])
- for i := 0; i < aes.BlockSize; i++ {
- r.dst[i] = r.time[i] ^ r.seed[i]
- }
- r.cipher.Encrypt(r.dst[0:], r.dst[0:])
- for i := 0; i < aes.BlockSize; i++ {
- r.seed[i] = r.time[i] ^ r.dst[i]
- }
- r.cipher.Encrypt(r.seed[0:], r.seed[0:])
-
- m := copy(b, r.dst[0:])
- b = b[m:]
- }
-
- return n, nil
-}
diff --git a/src/crypto/sha256/sha256block_decl.go b/src/crypto/sha256/sha256block_decl.go
index c9c1194487..18ba1c0ec1 100644
--- a/src/crypto/sha256/sha256block_decl.go
+++ b/src/crypto/sha256/sha256block_decl.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build 386 || amd64 || s390x || ppc64le
+//go:build 386 || amd64 || s390x || ppc64le || ppc64
package sha256
diff --git a/src/crypto/sha256/sha256block_generic.go b/src/crypto/sha256/sha256block_generic.go
index a8878c2eee..fd098bec89 100644
--- a/src/crypto/sha256/sha256block_generic.go
+++ b/src/crypto/sha256/sha256block_generic.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !amd64 && !386 && !s390x && !ppc64le && !arm64
+//go:build !amd64 && !386 && !s390x && !ppc64le && !ppc64 && !arm64
package sha256
diff --git a/src/crypto/sha256/sha256block_ppc64le.s b/src/crypto/sha256/sha256block_ppc64x.s
index 77e63c073f..617d42e1d7 100644
--- a/src/crypto/sha256/sha256block_ppc64le.s
+++ b/src/crypto/sha256/sha256block_ppc64x.s
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build ppc64 || ppc64le
+
// Based on CRYPTOGAMS code with the following comment:
// # ====================================================================
// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -57,19 +59,11 @@
#define END R5
#define TBL R6
#define IDX R7
-#define CNT R8
#define LEN R9
-#define OFFLOAD R11
#define TEMP R12
#define HEX00 R0
#define HEX10 R10
-#define HEX20 R25
-#define HEX30 R26
-#define HEX40 R27
-#define HEX50 R28
-#define HEX60 R29
-#define HEX70 R31
// V0-V7 are A-H
// V8-V23 are used for the message schedule
@@ -212,12 +206,23 @@ DATA ·kcon+0x3F0(SB)/8, $0xc67178f2c67178f2
DATA ·kcon+0x3F8(SB)/8, $0xc67178f2c67178f2
DATA ·kcon+0x400(SB)/8, $0x0000000000000000
DATA ·kcon+0x408(SB)/8, $0x0000000000000000
+
+#ifdef GOARCH_ppc64le
DATA ·kcon+0x410(SB)/8, $0x1011121310111213 // permutation control vectors
DATA ·kcon+0x418(SB)/8, $0x1011121300010203
DATA ·kcon+0x420(SB)/8, $0x1011121310111213
DATA ·kcon+0x428(SB)/8, $0x0405060700010203
DATA ·kcon+0x430(SB)/8, $0x1011121308090a0b
DATA ·kcon+0x438(SB)/8, $0x0405060700010203
+#else
+DATA ·kcon+0x410(SB)/8, $0x1011121300010203
+DATA ·kcon+0x418(SB)/8, $0x1011121310111213 // permutation control vectors
+DATA ·kcon+0x420(SB)/8, $0x0405060700010203
+DATA ·kcon+0x428(SB)/8, $0x1011121310111213
+DATA ·kcon+0x430(SB)/8, $0x0001020304050607
+DATA ·kcon+0x438(SB)/8, $0x08090a0b10111213
+#endif
+
GLOBL ·kcon(SB), RODATA, $1088
#define SHA256ROUND0(a, b, c, d, e, f, g, h, xi) \
@@ -257,36 +262,34 @@ GLOBL ·kcon(SB), RODATA, $1088
VADDUWM S0, h, h; \
VADDUWM s1, xj, xj
+#ifdef GOARCH_ppc64le
+#define VPERMLE(va,vb,vc,vt) VPERM va, vb, vc, vt
+#else
+#define VPERMLE(va,vb,vc,vt)
+#endif
+
// func block(dig *digest, p []byte)
-TEXT ·block(SB),0,$128-32
+TEXT ·block(SB),0,$0-32
MOVD dig+0(FP), CTX
MOVD p_base+8(FP), INP
MOVD p_len+16(FP), LEN
SRD $6, LEN
SLD $6, LEN
-
ADD INP, LEN, END
CMP INP, END
BEQ end
MOVD $·kcon(SB), TBL
- MOVD R1, OFFLOAD
-
- MOVD R0, CNT
MOVWZ $0x10, HEX10
- MOVWZ $0x20, HEX20
- MOVWZ $0x30, HEX30
- MOVWZ $0x40, HEX40
- MOVWZ $0x50, HEX50
- MOVWZ $0x60, HEX60
- MOVWZ $0x70, HEX70
-
MOVWZ $8, IDX
+
+#ifdef GOARCH_ppc64le
LVSL (IDX)(R0), LEMASK
VSPLTISB $0x0F, KI
VXOR KI, LEMASK, LEMASK
+#endif
LXVW4X (CTX)(HEX00), VS32 // v0 = vs32
LXVW4X (CTX)(HEX10), VS36 // v4 = vs36
@@ -306,20 +309,21 @@ loop:
LXVD2X (INP)(R0), VS40 // load v8 (=vs40) in advance
ADD $16, INP
- STVX V0, (OFFLOAD+HEX00)
- STVX V1, (OFFLOAD+HEX10)
- STVX V2, (OFFLOAD+HEX20)
- STVX V3, (OFFLOAD+HEX30)
- STVX V4, (OFFLOAD+HEX40)
- STVX V5, (OFFLOAD+HEX50)
- STVX V6, (OFFLOAD+HEX60)
- STVX V7, (OFFLOAD+HEX70)
+ // Offload to VSR24-31 (aka FPR24-31)
+ XXLOR V0, V0, VS24
+ XXLOR V1, V1, VS25
+ XXLOR V2, V2, VS26
+ XXLOR V3, V3, VS27
+ XXLOR V4, V4, VS28
+ XXLOR V5, V5, VS29
+ XXLOR V6, V6, VS30
+ XXLOR V7, V7, VS31
VADDUWM KI, V7, V7 // h+K[i]
LVX (TBL)(IDX), KI
ADD $16, IDX
- VPERM V8, V8, LEMASK, V8
+ VPERMLE(V8, V8, LEMASK, V8)
SHA256ROUND0(V0, V1, V2, V3, V4, V5, V6, V7, V8)
VSLDOI $4, V8, V8, V9
SHA256ROUND0(V7, V0, V1, V2, V3, V4, V5, V6, V9)
@@ -329,7 +333,7 @@ loop:
ADD $16, INP, INP
VSLDOI $4, V10, V10, V11
SHA256ROUND0(V5, V6, V7, V0, V1, V2, V3, V4, V11)
- VPERM V12, V12, LEMASK, V12
+ VPERMLE(V12, V12, LEMASK, V12)
SHA256ROUND0(V4, V5, V6, V7, V0, V1, V2, V3, V12)
VSLDOI $4, V12, V12, V13
SHA256ROUND0(V3, V4, V5, V6, V7, V0, V1, V2, V13)
@@ -339,7 +343,7 @@ loop:
ADD $16, INP, INP
VSLDOI $4, V14, V14, V15
SHA256ROUND0(V1, V2, V3, V4, V5, V6, V7, V0, V15)
- VPERM V16, V16, LEMASK, V16
+ VPERMLE(V16, V16, LEMASK, V16)
SHA256ROUND0(V0, V1, V2, V3, V4, V5, V6, V7, V16)
VSLDOI $4, V16, V16, V17
SHA256ROUND0(V7, V0, V1, V2, V3, V4, V5, V6, V17)
@@ -349,7 +353,7 @@ loop:
LXVD2X (INP)(R0), VS52 // load v20 (=vs52) in advance
ADD $16, INP, INP
SHA256ROUND0(V5, V6, V7, V0, V1, V2, V3, V4, V19)
- VPERM V20, V20, LEMASK, V20
+ VPERMLE(V20, V20, LEMASK, V20)
SHA256ROUND0(V4, V5, V6, V7, V0, V1, V2, V3, V20)
VSLDOI $4, V20, V20, V21
SHA256ROUND0(V3, V4, V5, V6, V7, V0, V1, V2, V21)
@@ -381,21 +385,21 @@ L16_xx:
BC 0x10, 0, L16_xx // bdnz
- LVX (OFFLOAD)(HEX00), V10
+ XXLOR VS24, VS24, V10
- LVX (OFFLOAD)(HEX10), V11
+ XXLOR VS25, VS25, V11
VADDUWM V10, V0, V0
- LVX (OFFLOAD)(HEX20), V12
+ XXLOR VS26, VS26, V12
VADDUWM V11, V1, V1
- LVX (OFFLOAD)(HEX30), V13
+ XXLOR VS27, VS27, V13
VADDUWM V12, V2, V2
- LVX (OFFLOAD)(HEX40), V14
+ XXLOR VS28, VS28, V14
VADDUWM V13, V3, V3
- LVX (OFFLOAD)(HEX50), V15
+ XXLOR VS29, VS29, V15
VADDUWM V14, V4, V4
- LVX (OFFLOAD)(HEX60), V16
+ XXLOR VS30, VS30, V16
VADDUWM V15, V5, V5
- LVX (OFFLOAD)(HEX70), V17
+ XXLOR VS31, VS31, V17
VADDUWM V16, V6, V6
VADDUWM V17, V7, V7
diff --git a/src/crypto/sha512/sha512block_decl.go b/src/crypto/sha512/sha512block_decl.go
index c6dcdf5db6..52278ae690 100644
--- a/src/crypto/sha512/sha512block_decl.go
+++ b/src/crypto/sha512/sha512block_decl.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build s390x || ppc64le
+//go:build s390x || ppc64le || ppc64
package sha512
diff --git a/src/crypto/sha512/sha512block_generic.go b/src/crypto/sha512/sha512block_generic.go
index 62ea237867..9f0c2f2c5d 100644
--- a/src/crypto/sha512/sha512block_generic.go
+++ b/src/crypto/sha512/sha512block_generic.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !amd64 && !s390x && !ppc64le
+//go:build !amd64 && !s390x && !ppc64le && !ppc64
package sha512
diff --git a/src/crypto/sha512/sha512block_ppc64le.s b/src/crypto/sha512/sha512block_ppc64x.s
index 55f0c06c7a..955900b714 100644
--- a/src/crypto/sha512/sha512block_ppc64le.s
+++ b/src/crypto/sha512/sha512block_ppc64x.s
@@ -10,6 +10,8 @@
// # details see http://www.openssl.org/~appro/cryptogams/.
// # ====================================================================
+//go:build ppc64 || ppc64le
+
#include "textflag.h"
// SHA512 block routine. See sha512block.go for Go equivalent.
@@ -66,10 +68,6 @@
#define HEX10 R10
#define HEX20 R25
#define HEX30 R26
-#define HEX40 R27
-#define HEX50 R28
-#define HEX60 R29
-#define HEX70 R31
// V0-V7 are A-H
// V8-V23 are used for the message schedule
@@ -81,6 +79,14 @@
#define s1 V29
#define LEMASK V31 // Permutation control register for little endian
+// VPERM is needed on LE to switch the bytes
+
+#ifdef GOARCH_ppc64le
+#define VPERMLE(va,vb,vc,vt) VPERM va, vb, vc, vt
+#else
+#define VPERMLE(va,vb,vc,vt)
+#endif
+
// 2 copies of each Kt, to fill both doublewords of a vector register
DATA ·kcon+0x000(SB)/8, $0x428a2f98d728ae22
DATA ·kcon+0x008(SB)/8, $0x428a2f98d728ae22
@@ -306,15 +312,15 @@ TEXT ·block(SB),0,$128-32
MOVWZ $0x10, HEX10
MOVWZ $0x20, HEX20
MOVWZ $0x30, HEX30
- MOVWZ $0x40, HEX40
- MOVWZ $0x50, HEX50
- MOVWZ $0x60, HEX60
- MOVWZ $0x70, HEX70
+// Generate the mask used with VPERM for LE
+
+#ifdef GOARCH_ppc64le
MOVWZ $8, IDX
LVSL (IDX)(R0), LEMASK
VSPLTISB $0x0F, KI
VXOR KI, LEMASK, LEMASK
+#endif
LXVD2X (CTX)(HEX00), VS32 // v0 = vs32
LXVD2X (CTX)(HEX10), VS34 // v2 = vs34
@@ -333,62 +339,64 @@ loop:
LXVD2X (INP)(R0), VS40 // load v8 (=vs40) in advance
ADD $16, INP
- STVX V0, (OFFLOAD+HEX00)
- STVX V1, (OFFLOAD+HEX10)
- STVX V2, (OFFLOAD+HEX20)
- STVX V3, (OFFLOAD+HEX30)
- STVX V4, (OFFLOAD+HEX40)
- STVX V5, (OFFLOAD+HEX50)
- STVX V6, (OFFLOAD+HEX60)
- STVX V7, (OFFLOAD+HEX70)
+ // Copy V0-V7 to VS24-VS31
+
+ XXLOR V0, V0, VS24
+ XXLOR V1, V1, VS25
+ XXLOR V2, V2, VS26
+ XXLOR V3, V3, VS27
+ XXLOR V4, V4, VS28
+ XXLOR V5, V5, VS29
+ XXLOR V6, V6, VS30
+ XXLOR V7, V7, VS31
VADDUDM KI, V7, V7 // h+K[i]
LVX (TBL)(IDX), KI
ADD $16, IDX
- VPERM V8, V8, LEMASK, V8
+ VPERMLE(V8,V8,LEMASK,V8)
SHA512ROUND0(V0, V1, V2, V3, V4, V5, V6, V7, V8)
LXVD2X (INP)(R0), VS42 // load v10 (=vs42) in advance
ADD $16, INP, INP
VSLDOI $8, V8, V8, V9
SHA512ROUND0(V7, V0, V1, V2, V3, V4, V5, V6, V9)
- VPERM V10, V10, LEMASK, V10
+ VPERMLE(V10,V10,LEMASK,V10)
SHA512ROUND0(V6, V7, V0, V1, V2, V3, V4, V5, V10)
LXVD2X (INP)(R0), VS44 // load v12 (=vs44) in advance
ADD $16, INP, INP
VSLDOI $8, V10, V10, V11
SHA512ROUND0(V5, V6, V7, V0, V1, V2, V3, V4, V11)
- VPERM V12, V12, LEMASK, V12
+ VPERMLE(V12,V12,LEMASK,V12)
SHA512ROUND0(V4, V5, V6, V7, V0, V1, V2, V3, V12)
LXVD2X (INP)(R0), VS46 // load v14 (=vs46) in advance
ADD $16, INP, INP
VSLDOI $8, V12, V12, V13
SHA512ROUND0(V3, V4, V5, V6, V7, V0, V1, V2, V13)
- VPERM V14, V14, LEMASK, V14
+ VPERMLE(V14,V14,LEMASK,V14)
SHA512ROUND0(V2, V3, V4, V5, V6, V7, V0, V1, V14)
LXVD2X (INP)(R0), VS48 // load v16 (=vs48) in advance
ADD $16, INP, INP
VSLDOI $8, V14, V14, V15
SHA512ROUND0(V1, V2, V3, V4, V5, V6, V7, V0, V15)
- VPERM V16, V16, LEMASK, V16
+ VPERMLE(V16,V16,LEMASK,V16)
SHA512ROUND0(V0, V1, V2, V3, V4, V5, V6, V7, V16)
LXVD2X (INP)(R0), VS50 // load v18 (=vs50) in advance
ADD $16, INP, INP
VSLDOI $8, V16, V16, V17
SHA512ROUND0(V7, V0, V1, V2, V3, V4, V5, V6, V17)
- VPERM V18, V18, LEMASK, V18
+ VPERMLE(V18,V18,LEMASK,V18)
SHA512ROUND0(V6, V7, V0, V1, V2, V3, V4, V5, V18)
LXVD2X (INP)(R0), VS52 // load v20 (=vs52) in advance
ADD $16, INP, INP
VSLDOI $8, V18, V18, V19
SHA512ROUND0(V5, V6, V7, V0, V1, V2, V3, V4, V19)
- VPERM V20, V20, LEMASK, V20
+ VPERMLE(V20,V20,LEMASK,V20)
SHA512ROUND0(V4, V5, V6, V7, V0, V1, V2, V3, V20)
LXVD2X (INP)(R0), VS54 // load v22 (=vs54) in advance
ADD $16, INP, INP
VSLDOI $8, V20, V20, V21
SHA512ROUND0(V3, V4, V5, V6, V7, V0, V1, V2, V21)
- VPERM V22, V22, LEMASK, V22
+ VPERMLE(V22,V22,LEMASK,V22)
SHA512ROUND0(V2, V3, V4, V5, V6, V7, V0, V1, V22)
VSLDOI $8, V22, V22, V23
SHA512ROUND1(V1, V2, V3, V4, V5, V6, V7, V0, V23, V8, V9, V17, V22)
@@ -416,31 +424,37 @@ L16_xx:
BC 0x10, 0, L16_xx // bdnz
- LVX (OFFLOAD)(HEX00), V10
-
- LVX (OFFLOAD)(HEX10), V11
+ XXLOR VS24, VS24, V10
+ XXLOR VS25, VS25, V11
+ XXLOR VS26, VS26, V12
+ XXLOR VS27, VS27, V13
+ XXLOR VS28, VS28, V14
+ XXLOR VS29, VS29, V15
+ XXLOR VS30, VS30, V16
+ XXLOR VS31, VS31, V17
VADDUDM V10, V0, V0
- LVX (OFFLOAD)(HEX20), V12
VADDUDM V11, V1, V1
- LVX (OFFLOAD)(HEX30), V13
VADDUDM V12, V2, V2
- LVX (OFFLOAD)(HEX40), V14
VADDUDM V13, V3, V3
- LVX (OFFLOAD)(HEX50), V15
VADDUDM V14, V4, V4
- LVX (OFFLOAD)(HEX60), V16
VADDUDM V15, V5, V5
- LVX (OFFLOAD)(HEX70), V17
VADDUDM V16, V6, V6
VADDUDM V17, V7, V7
CMPU INP, END
BLT loop
+#ifdef GOARCH_ppc64le
VPERM V0, V1, KI, V0
VPERM V2, V3, KI, V2
VPERM V4, V5, KI, V4
VPERM V6, V7, KI, V6
+#else
+ VPERM V1, V0, KI, V0
+ VPERM V3, V2, KI, V2
+ VPERM V5, V4, KI, V4
+ VPERM V7, V6, KI, V6
+#endif
STXVD2X VS32, (CTX+HEX00) // v0 = vs32
STXVD2X VS34, (CTX+HEX10) // v2 = vs34
STXVD2X VS36, (CTX+HEX20) // v4 = vs36
diff --git a/src/crypto/x509/internal/macos/corefoundation.go b/src/crypto/x509/internal/macos/corefoundation.go
index cda1d95d81..75c212910b 100644
--- a/src/crypto/x509/internal/macos/corefoundation.go
+++ b/src/crypto/x509/internal/macos/corefoundation.go
@@ -48,7 +48,7 @@ func CFStringToString(ref CFRef) string {
// TimeToCFDateRef converts a time.Time into an apple CFDateRef
func TimeToCFDateRef(t time.Time) CFRef {
secs := t.Sub(time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC)).Seconds()
- ref := CFDateCreate(int(secs))
+ ref := CFDateCreate(secs)
return ref
}
@@ -170,8 +170,8 @@ func x509_CFArrayAppendValue_trampoline()
//go:cgo_import_dynamic x509_CFDateCreate CFDateCreate "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-func CFDateCreate(seconds int) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_CFDateCreate_trampoline), kCFAllocatorDefault, uintptr(seconds), 0, 0, 0, 0)
+func CFDateCreate(seconds float64) CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_CFDateCreate_trampoline), kCFAllocatorDefault, 0, 0, 0, 0, seconds)
return CFRef(ret)
}
func x509_CFDateCreate_trampoline()
@@ -193,7 +193,7 @@ func CFStringCreateExternalRepresentation(strRef CFRef) CFRef {
func x509_CFStringCreateExternalRepresentation_trampoline()
// syscall is implemented in the runtime package (runtime/sys_darwin.go)
-func syscall(fn, a1, a2, a3, a4, a5, a6 uintptr) uintptr
+func syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) uintptr
// ReleaseCFArray iterates through an array, releasing its contents, and then
// releases the array itself. This is necessary because we cannot, easily, set the
diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go
index 8f953f6cb6..d1edcb8c48 100644
--- a/src/database/sql/fakedb_test.go
+++ b/src/database/sql/fakedb_test.go
@@ -676,6 +676,9 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm
if c.waiter != nil {
c.waiter(ctx)
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
}
if stmt.wait > 0 {
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 08ca1f5b9a..a921dd5a84 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -418,26 +418,31 @@ func TestQueryContextWait(t *testing.T) {
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
- // TODO(kardianos): convert this from using a timeout to using an explicit
- // cancel when the query signals that it is "executing" the query.
- ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
+ ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
- _, err := db.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
- if err != context.DeadlineExceeded {
+ c, err := db.Conn(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c.dc.ci.(*fakeConn).waiter = func(c context.Context) {
+ cancel()
+ <-ctx.Done()
+ }
+ _, err = c.QueryContext(ctx, "SELECT|people|age,name|")
+ c.Close()
+ if err != context.Canceled {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
// Verify closed rows connection after error condition.
waitForFree(t, db, 1)
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
- // TODO(kardianos): if the context timeouts before the db.QueryContext
- // executes this check may fail. After adjusting how the context
- // is canceled above revert this back to a Fatal error.
- t.Logf("executed %d Prepare statements; want 1", prepares)
+ t.Fatalf("executed %d Prepare statements; want 1", prepares)
}
}
@@ -455,14 +460,14 @@ func TestTxContextWait(t *testing.T) {
}
tx.keepConnOnRollback = false
- go func() {
- time.Sleep(15 * time.Millisecond)
+ tx.dc.ci.(*fakeConn).waiter = func(c context.Context) {
cancel()
- }()
+ <-ctx.Done()
+ }
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
- _, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
+ _, err = tx.QueryContext(ctx, "SELECT|people|age,name|")
if err != context.Canceled {
t.Fatalf("expected QueryContext to error with context canceled but returned %v", err)
}
diff --git a/src/debug/buildinfo/buildinfo.go b/src/debug/buildinfo/buildinfo.go
index 2c0200e8dc..8de03ff106 100644
--- a/src/debug/buildinfo/buildinfo.go
+++ b/src/debug/buildinfo/buildinfo.go
@@ -75,8 +75,8 @@ func Read(r io.ReaderAt) (*BuildInfo, error) {
if err != nil {
return nil, err
}
- bi := &BuildInfo{}
- if err := bi.UnmarshalText([]byte(mod)); err != nil {
+ bi, err := debug.ParseBuildInfo(mod)
+ if err != nil {
return nil, err
}
bi.GoVersion = vers
diff --git a/src/debug/buildinfo/buildinfo_test.go b/src/debug/buildinfo/buildinfo_test.go
index 8346be0109..ac71626fda 100644
--- a/src/debug/buildinfo/buildinfo_test.go
+++ b/src/debug/buildinfo/buildinfo_test.go
@@ -212,12 +212,10 @@ func TestReadFile(t *testing.T) {
} else {
if tc.wantErr != "" {
t.Fatalf("unexpected success; want error containing %q", tc.wantErr)
- } else if got, err := info.MarshalText(); err != nil {
- t.Fatalf("unexpected error marshaling BuildInfo: %v", err)
- } else if got := cleanOutputForComparison(string(got)); got != tc.want {
- if got != tc.want {
- t.Fatalf("got:\n%s\nwant:\n%s", got, tc.want)
- }
+ }
+ got := info.String()
+ if clean := cleanOutputForComparison(string(got)); got != tc.want && clean != tc.want {
+ t.Fatalf("got:\n%s\nwant:\n%s", got, tc.want)
}
}
})
diff --git a/src/encoding/binary/binary.go b/src/encoding/binary/binary.go
index ee933461ee..0681511fbb 100644
--- a/src/encoding/binary/binary.go
+++ b/src/encoding/binary/binary.go
@@ -29,7 +29,7 @@ import (
"sync"
)
-// A ByteOrder specifies how to convert byte sequences into
+// A ByteOrder specifies how to convert byte slices into
// 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
Uint16([]byte) uint16
@@ -41,10 +41,19 @@ type ByteOrder interface {
String() string
}
-// LittleEndian is the little-endian implementation of ByteOrder.
+// AppendByteOrder specifies how to append 16-, 32-, or 64-bit unsigned integers
+// into a byte slice.
+type AppendByteOrder interface {
+ AppendUint16([]byte, uint16) []byte
+ AppendUint32([]byte, uint32) []byte
+ AppendUint64([]byte, uint64) []byte
+ String() string
+}
+
+// LittleEndian is the little-endian implementation of ByteOrder and AppendByteOrder.
var LittleEndian littleEndian
-// BigEndian is the big-endian implementation of ByteOrder.
+// BigEndian is the big-endian implementation of ByteOrder and AppendByteOrder.
var BigEndian bigEndian
type littleEndian struct{}
@@ -60,6 +69,13 @@ func (littleEndian) PutUint16(b []byte, v uint16) {
b[1] = byte(v >> 8)
}
+func (littleEndian) AppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ )
+}
+
func (littleEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
@@ -73,6 +89,15 @@ func (littleEndian) PutUint32(b []byte, v uint32) {
b[3] = byte(v >> 24)
}
+func (littleEndian) AppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ )
+}
+
func (littleEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
@@ -91,6 +116,19 @@ func (littleEndian) PutUint64(b []byte, v uint64) {
b[7] = byte(v >> 56)
}
+func (littleEndian) AppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56),
+ )
+}
+
func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
@@ -108,6 +146,13 @@ func (bigEndian) PutUint16(b []byte, v uint16) {
b[1] = byte(v)
}
+func (bigEndian) AppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v>>8),
+ byte(v),
+ )
+}
+
func (bigEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
@@ -121,6 +166,15 @@ func (bigEndian) PutUint32(b []byte, v uint32) {
b[3] = byte(v)
}
+func (bigEndian) AppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
func (bigEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
@@ -139,6 +193,19 @@ func (bigEndian) PutUint64(b []byte, v uint64) {
b[7] = byte(v)
}
+func (bigEndian) AppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>56),
+ byte(v>>48),
+ byte(v>>40),
+ byte(v>>32),
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
func (bigEndian) String() string { return "BigEndian" }
func (bigEndian) GoString() string { return "binary.BigEndian" }
diff --git a/src/encoding/binary/binary_test.go b/src/encoding/binary/binary_test.go
index 9e1b5f12db..09d08f5ee3 100644
--- a/src/encoding/binary/binary_test.go
+++ b/src/encoding/binary/binary_test.go
@@ -442,6 +442,65 @@ func testPutUint64SmallSliceLengthPanics() (panicked bool) {
return false
}
+func TestByteOrder(t *testing.T) {
+ type byteOrder interface {
+ ByteOrder
+ AppendByteOrder
+ }
+ buf := make([]byte, 8)
+ for _, order := range []byteOrder{LittleEndian, BigEndian} {
+ const offset = 3
+ for _, value := range []uint64{
+ 0x0000000000000000,
+ 0x0123456789abcdef,
+ 0xfedcba9876543210,
+ 0xffffffffffffffff,
+ 0xaaaaaaaaaaaaaaaa,
+ math.Float64bits(math.Pi),
+ math.Float64bits(math.E),
+ } {
+ want16 := uint16(value)
+ order.PutUint16(buf[:2], want16)
+ if got := order.Uint16(buf[:2]); got != want16 {
+ t.Errorf("PutUint16: Uint16 = %v, want %v", got, want16)
+ }
+ buf = order.AppendUint16(buf[:offset], want16)
+ if got := order.Uint16(buf[offset:]); got != want16 {
+ t.Errorf("AppendUint16: Uint16 = %v, want %v", got, want16)
+ }
+ if len(buf) != offset+2 {
+ t.Errorf("AppendUint16: len(buf) = %d, want %d", len(buf), offset+2)
+ }
+
+ want32 := uint32(value)
+ order.PutUint32(buf[:4], want32)
+ if got := order.Uint32(buf[:4]); got != want32 {
+ t.Errorf("PutUint32: Uint32 = %v, want %v", got, want32)
+ }
+ buf = order.AppendUint32(buf[:offset], want32)
+ if got := order.Uint32(buf[offset:]); got != want32 {
+ t.Errorf("AppendUint32: Uint32 = %v, want %v", got, want32)
+ }
+ if len(buf) != offset+4 {
+ t.Errorf("AppendUint32: len(buf) = %d, want %d", len(buf), offset+4)
+ }
+
+ want64 := uint64(value)
+ order.PutUint64(buf[:8], want64)
+ if got := order.Uint64(buf[:8]); got != want64 {
+ t.Errorf("PutUint64: Uint64 = %v, want %v", got, want64)
+ }
+ buf = order.AppendUint64(buf[:offset], want64)
+ if got := order.Uint64(buf[offset:]); got != want64 {
+ t.Errorf("AppendUint64: Uint64 = %v, want %v", got, want64)
+ }
+ if len(buf) != offset+8 {
+ t.Errorf("AppendUint64: len(buf) = %d, want %d", len(buf), offset+8)
+ }
+ }
+ }
+}
+
func TestEarlyBoundsChecks(t *testing.T) {
if testUint64SmallSliceLengthPanics() != true {
t.Errorf("binary.LittleEndian.Uint64 expected to panic for small slices, but didn't")
@@ -596,41 +655,84 @@ func BenchmarkWriteSlice1000Int32s(b *testing.B) {
func BenchmarkPutUint16(b *testing.B) {
b.SetBytes(2)
for i := 0; i < b.N; i++ {
- BigEndian.PutUint16(putbuf[:], uint16(i))
+ BigEndian.PutUint16(putbuf[:2], uint16(i))
+ }
+}
+
+func BenchmarkAppendUint16(b *testing.B) {
+ b.SetBytes(2)
+ for i := 0; i < b.N; i++ {
+ putbuf = BigEndian.AppendUint16(putbuf[:0], uint16(i))
}
}
func BenchmarkPutUint32(b *testing.B) {
b.SetBytes(4)
for i := 0; i < b.N; i++ {
- BigEndian.PutUint32(putbuf[:], uint32(i))
+ BigEndian.PutUint32(putbuf[:4], uint32(i))
+ }
+}
+
+func BenchmarkAppendUint32(b *testing.B) {
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ putbuf = BigEndian.AppendUint32(putbuf[:0], uint32(i))
}
}
func BenchmarkPutUint64(b *testing.B) {
b.SetBytes(8)
for i := 0; i < b.N; i++ {
- BigEndian.PutUint64(putbuf[:], uint64(i))
+ BigEndian.PutUint64(putbuf[:8], uint64(i))
+ }
+}
+
+func BenchmarkAppendUint64(b *testing.B) {
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ putbuf = BigEndian.AppendUint64(putbuf[:0], uint64(i))
}
}
+
func BenchmarkLittleEndianPutUint16(b *testing.B) {
b.SetBytes(2)
for i := 0; i < b.N; i++ {
- LittleEndian.PutUint16(putbuf[:], uint16(i))
+ LittleEndian.PutUint16(putbuf[:2], uint16(i))
+ }
+}
+
+func BenchmarkLittleEndianAppendUint16(b *testing.B) {
+ b.SetBytes(2)
+ for i := 0; i < b.N; i++ {
+ putbuf = LittleEndian.AppendUint16(putbuf[:0], uint16(i))
}
}
func BenchmarkLittleEndianPutUint32(b *testing.B) {
b.SetBytes(4)
for i := 0; i < b.N; i++ {
- LittleEndian.PutUint32(putbuf[:], uint32(i))
+ LittleEndian.PutUint32(putbuf[:4], uint32(i))
+ }
+}
+
+func BenchmarkLittleEndianAppendUint32(b *testing.B) {
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ putbuf = LittleEndian.AppendUint32(putbuf[:0], uint32(i))
}
}
func BenchmarkLittleEndianPutUint64(b *testing.B) {
b.SetBytes(8)
for i := 0; i < b.N; i++ {
- LittleEndian.PutUint64(putbuf[:], uint64(i))
+ LittleEndian.PutUint64(putbuf[:8], uint64(i))
+ }
+}
+
+func BenchmarkLittleEndianAppendUint64(b *testing.B) {
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ putbuf = LittleEndian.AppendUint64(putbuf[:0], uint64(i))
}
}
diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go
index 1f5e3e446a..571ac094e2 100644
--- a/src/encoding/json/encode.go
+++ b/src/encoding/json/encode.go
@@ -784,7 +784,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
// We're a large number of nested ptrEncoder.encode calls deep;
// start checking if we've run into a pointer cycle.
- ptr := v.Pointer()
+ ptr := v.UnsafePointer()
if _, ok := e.ptrSeen[ptr]; ok {
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
}
@@ -877,9 +877,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
// Here we use a struct to memorize the pointer to the first element of the slice
// and its length.
ptr := struct {
- ptr uintptr
+ ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe
len int
- }{v.Pointer(), v.Len()}
+ }{v.UnsafePointer(), v.Len()}
if _, ok := e.ptrSeen[ptr]; ok {
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
}
diff --git a/src/encoding/xml/marshal.go b/src/encoding/xml/marshal.go
index 6859be04a2..7792ac77f8 100644
--- a/src/encoding/xml/marshal.go
+++ b/src/encoding/xml/marshal.go
@@ -512,7 +512,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat
}
fv := finfo.value(val, dontInitNilPointers)
- if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) {
+ if finfo.flags&fOmitEmpty != 0 && (!fv.IsValid() || isEmptyValue(fv)) {
continue
}
diff --git a/src/encoding/xml/marshal_test.go b/src/encoding/xml/marshal_test.go
index 5fdbae7ef0..3fe7e2dc00 100644
--- a/src/encoding/xml/marshal_test.go
+++ b/src/encoding/xml/marshal_test.go
@@ -2495,3 +2495,39 @@ func TestInvalidXMLName(t *testing.T) {
t.Errorf("error %q does not contain %q", err, want)
}
}
+
+// Issue 50164. Crash on zero value XML attribute.
+type LayerOne struct {
+ XMLName Name `xml:"l1"`
+
+ Value *float64 `xml:"value,omitempty"`
+ *LayerTwo `xml:",omitempty"`
+}
+
+type LayerTwo struct {
+ ValueTwo *int `xml:"value_two,attr,omitempty"`
+}
+
+func TestMarshalZeroValue(t *testing.T) {
+ proofXml := `<l1><value>1.2345</value></l1>`
+ var l1 LayerOne
+ err := Unmarshal([]byte(proofXml), &l1)
+ if err != nil {
+ t.Fatalf("unmarshal XML error: %v", err)
+ }
+ want := float64(1.2345)
+ got := *l1.Value
+ if got != want {
+ t.Fatalf("unexpected unmarshal result, want %f but got %f", want, got)
+ }
+
+ // Marshal again (or Encode again)
+ // In issue 50164, here `Marshal(l1)` will panic because of the zero value of xml attribute ValueTwo `value_two`.
+ anotherXML, err := Marshal(l1)
+ if err != nil {
+ t.Fatalf("marshal XML error: %v", err)
+ }
+ if string(anotherXML) != proofXml {
+ t.Fatalf("unexpected unmarshal result, want %q but got %q", proofXml, anotherXML)
+ }
+}
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index 4a54b689e5..785e14cab9 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -88,6 +88,7 @@ var depsRules = `
< internal/itoa
< internal/unsafeheader
< runtime/internal/sys
+ < runtime/internal/syscall
< runtime/internal/atomic
< runtime/internal/math
< runtime
@@ -309,6 +310,7 @@ var depsRules = `
< go/build;
DEBUG, go/build, go/types, text/scanner
+ < internal/pkgbits
< go/internal/gcimporter, go/internal/gccgoimporter, go/internal/srcimporter
< go/importer;
diff --git a/src/go/doc/reader.go b/src/go/doc/reader.go
index de1d422106..d9e721d01b 100644
--- a/src/go/doc/reader.go
+++ b/src/go/doc/reader.go
@@ -927,6 +927,7 @@ var predeclaredTypes = map[string]bool{
"any": true,
"bool": true,
"byte": true,
+ "comparable": true,
"complex64": true,
"complex128": true,
"error": true,
diff --git a/src/go/doc/testdata/b.0.golden b/src/go/doc/testdata/b.0.golden
index 9d93392eaa..c06246a7b1 100644
--- a/src/go/doc/testdata/b.0.golden
+++ b/src/go/doc/testdata/b.0.golden
@@ -46,6 +46,9 @@ VARIABLES
FUNCTIONS
+ // Associated with comparable type if AllDecls is set.
+ func ComparableFactory() comparable
+
//
func F(x int) int
diff --git a/src/go/doc/testdata/b.1.golden b/src/go/doc/testdata/b.1.golden
index 66c47b5c2a..2b62c3400c 100644
--- a/src/go/doc/testdata/b.1.golden
+++ b/src/go/doc/testdata/b.1.golden
@@ -38,6 +38,12 @@ TYPES
//
func (x *T) M()
+ // Should only appear if AllDecls is set.
+ type comparable struct{} // overrides a predeclared type comparable
+
+ // Associated with comparable type if AllDecls is set.
+ func ComparableFactory() comparable
+
//
type notExported int
diff --git a/src/go/doc/testdata/b.2.golden b/src/go/doc/testdata/b.2.golden
index 9d93392eaa..c06246a7b1 100644
--- a/src/go/doc/testdata/b.2.golden
+++ b/src/go/doc/testdata/b.2.golden
@@ -46,6 +46,9 @@ VARIABLES
FUNCTIONS
+ // Associated with comparable type if AllDecls is set.
+ func ComparableFactory() comparable
+
//
func F(x int) int
diff --git a/src/go/doc/testdata/b.go b/src/go/doc/testdata/b.go
index e50663b3df..61b512bc8a 100644
--- a/src/go/doc/testdata/b.go
+++ b/src/go/doc/testdata/b.go
@@ -27,9 +27,15 @@ func UintFactory() uint {}
// Associated with uint type if AllDecls is set.
func uintFactory() uint {}
+// Associated with comparable type if AllDecls is set.
+func ComparableFactory() comparable {}
+
// Should only appear if AllDecls is set.
type uint struct{} // overrides a predeclared type uint
+// Should only appear if AllDecls is set.
+type comparable struct{} // overrides a predeclared type comparable
+
// ----------------------------------------------------------------------------
// Exported declarations associated with non-exported types must always be shown.
diff --git a/src/go/internal/gcimporter/iimport.go b/src/go/internal/gcimporter/iimport.go
index 8ec4c5413b..bff1c09cc9 100644
--- a/src/go/internal/gcimporter/iimport.go
+++ b/src/go/internal/gcimporter/iimport.go
@@ -181,6 +181,15 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
p.doDecl(localpkg, name)
}
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the 'P' case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+
for _, typ := range p.interfaceList {
typ.Complete()
}
@@ -195,6 +204,11 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
return localpkg, nil
}
+type setConstraintArgs struct {
+ t *types.TypeParam
+ constraint types.Type
+}
+
type iimporter struct {
exportVersion int64
ipath string
@@ -211,6 +225,9 @@ type iimporter struct {
fake fakeFileSet
interfaceList []*types.Interface
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
}
func (p *iimporter) doDecl(pkg *types.Package, name string) {
@@ -391,7 +408,11 @@ func (r *importReader) obj(name string) {
}
iface.MarkImplicit()
}
- t.SetConstraint(constraint)
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
case 'V':
typ := r.typ()
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index 4479adb732..51a3c3e67f 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -543,6 +543,13 @@ func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType {
}
p.exprLev--
}
+ if p.tok == token.COMMA {
+ // Trailing commas are accepted in type parameter
+ // lists but not in array type declarations.
+ // Accept for better error handling but complain.
+ p.error(p.pos, "unexpected comma; expecting ]")
+ p.next()
+ }
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
@@ -797,7 +804,7 @@ func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) {
return
}
-func (p *parser) parseParameterList(name0 *ast.Ident, closing token.Token) (params []*ast.Field) {
+func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
@@ -816,8 +823,17 @@ func (p *parser) parseParameterList(name0 *ast.Ident, closing token.Token) (para
var named int // number of parameters that have an explicit name and type
for name0 != nil || p.tok != closing && p.tok != token.EOF {
- par := p.parseParamDecl(name0, typeSetsOK)
+ var par field
+ if typ0 != nil {
+ if typeSetsOK {
+ typ0 = p.embeddedElem(typ0)
+ }
+ par = field{name0, typ0}
+ } else {
+ par = p.parseParamDecl(name0, typeSetsOK)
+ }
name0 = nil // 1st name was consumed if present
+ typ0 = nil // 1st typ was consumed if present
if par.name != nil || par.typ != nil {
list = append(list, par)
if par.name != nil && par.typ != nil {
@@ -926,7 +942,7 @@ func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.Field
opening := p.pos
p.next()
// [T any](params) syntax
- list := p.parseParameterList(nil, token.RBRACK)
+ list := p.parseParameterList(nil, nil, token.RBRACK)
rbrack := p.expect(token.RBRACK)
tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
// Type parameter lists must not be empty.
@@ -940,7 +956,7 @@ func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.Field
var fields []*ast.Field
if p.tok != token.RPAREN {
- fields = p.parseParameterList(nil, token.RPAREN)
+ fields = p.parseParameterList(nil, nil, token.RPAREN)
}
rparen := p.expect(token.RPAREN)
@@ -1007,7 +1023,7 @@ func (p *parser) parseMethodSpec() *ast.Field {
//
// Interface methods do not have type parameters. We parse them for a
// better error message and improved error recovery.
- _ = p.parseParameterList(name0, token.RBRACK)
+ _ = p.parseParameterList(name0, nil, token.RBRACK)
_ = p.expect(token.RBRACK)
p.error(lbrack, "interface method must have no type parameters")
@@ -1784,7 +1800,12 @@ func (p *parser) tokPrec() (token.Token, int) {
return tok, tok.Precedence()
}
-func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr {
+// parseBinaryExpr parses a (possibly) binary expression.
+// If x is non-nil, it is used as the left operand.
+// If check is true, operands are checked to be valid expressions.
+//
+// TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring.
+func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int, check bool) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
@@ -1798,9 +1819,30 @@ func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr {
return x
}
pos := p.expect(op)
- y := p.parseBinaryExpr(nil, oprec+1)
- x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
+ y := p.parseBinaryExpr(nil, oprec+1, check)
+ if check {
+ x = p.checkExpr(x)
+ y = p.checkExpr(y)
+ }
+ x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y}
+ }
+}
+
+// checkBinaryExpr checks binary expressions that were not already checked by
+// parseBinaryExpr, because the latter was called with check=false.
+func (p *parser) checkBinaryExpr(x ast.Expr) {
+ bx, ok := x.(*ast.BinaryExpr)
+ if !ok {
+ return
}
+
+ bx.X = p.checkExpr(bx.X)
+ bx.Y = p.checkExpr(bx.Y)
+
+ // parseBinaryExpr checks x and y for each binary expr in a tree, so we
+ // traverse the tree of binary exprs starting from x.
+ p.checkBinaryExpr(bx.X)
+ p.checkBinaryExpr(bx.Y)
}
// The result may be a type or even a raw type ([...]int). Callers must
@@ -1811,7 +1853,7 @@ func (p *parser) parseExpr() ast.Expr {
defer un(trace(p, "Expression"))
}
- return p.parseBinaryExpr(nil, token.LowestPrec+1)
+ return p.parseBinaryExpr(nil, token.LowestPrec+1, true)
}
func (p *parser) parseRhs() ast.Expr {
@@ -2534,12 +2576,12 @@ func (p *parser) parseValueSpec(doc *ast.CommentGroup, _ token.Pos, keyword toke
return spec
}
-func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident) {
+func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) {
if p.trace {
defer un(trace(p, "parseGenericType"))
}
- list := p.parseParameterList(name0, token.RBRACK)
+ list := p.parseParameterList(name0, typ0, token.RBRACK)
closePos := p.expect(token.RBRACK)
spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
// Let the type checker decide whether to accept type parameters on aliases:
@@ -2564,31 +2606,85 @@ func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token
lbrack := p.pos
p.next()
if p.tok == token.IDENT {
- // array type or generic type: [name0...
- name0 := p.parseIdent()
+ // We may have an array type or a type parameter list.
+ // In either case we expect an expression x (which may
+ // just be a name, or a more complex expression) which
+ // we can analyze further.
+ //
+ // A type parameter list may have a type bound starting
+ // with a "[" as in: P []E. In that case, simply parsing
+ // an expression would lead to an error: P[] is invalid.
+ // But since index or slice expressions are never constant
+ // and thus invalid array length expressions, if we see a
+ // "[" following a name it must be the start of an array
+ // or slice constraint. Only if we don't see a "[" do we
+ // need to parse a full expression.
// Index or slice expressions are never constant and thus invalid
// array length expressions. Thus, if we see a "[" following name
// we can safely assume that "[" name starts a type parameter list.
- var x ast.Expr // x != nil means x is the array length expression
+ var x ast.Expr = p.parseIdent()
if p.tok != token.LBRACK {
- // We may still have either an array type or generic type -- check if
- // name0 is the entire expr.
+ // To parse the expression starting with name, expand
+ // the call sequence we would get by passing in name
+ // to parser.expr, and pass in name to parsePrimaryExpr.
p.exprLev++
- lhs := p.parsePrimaryExpr(name0)
- x = p.parseBinaryExpr(lhs, token.LowestPrec+1)
+ lhs := p.parsePrimaryExpr(x)
+ x = p.parseBinaryExpr(lhs, token.LowestPrec+1, false)
p.exprLev--
- if x == name0 && p.tok != token.RBRACK {
- x = nil
+ }
+
+ // analyze the cases
+ var pname *ast.Ident // pname != nil means pname is the type parameter name
+ var ptype ast.Expr // ptype != nil means ptype is the type parameter type; pname != nil in this case
+
+ switch t := x.(type) {
+ case *ast.Ident:
+ // Unless we see a "]", we are at the start of a type parameter list.
+ if p.tok != token.RBRACK {
+ // d.Name "[" name ...
+ pname = t
+ // no ptype
+ }
+ case *ast.BinaryExpr:
+ // If we have an expression of the form name*T, and T is a (possibly
+ // parenthesized) type literal or the next token is a comma, we are
+ // at the start of a type parameter list.
+ if name, _ := t.X.(*ast.Ident); name != nil {
+ if t.Op == token.MUL && (isTypeLit(t.Y) || p.tok == token.COMMA) {
+ // d.Name "[" name "*" t.Y
+ // d.Name "[" name "*" t.Y ","
+ // convert t into unary *t.Y
+ pname = name
+ ptype = &ast.StarExpr{Star: t.OpPos, X: t.Y}
+ }
+ }
+ if pname == nil {
+ // A normal binary expression. Since we passed check=false, we must
+ // now check its operands.
+ p.checkBinaryExpr(t)
+ }
+ case *ast.CallExpr:
+ // If we have an expression of the form name(T), and T is a (possibly
+ // parenthesized) type literal or the next token is a comma, we are
+ // at the start of a type parameter list.
+ if name, _ := t.Fun.(*ast.Ident); name != nil {
+ if len(t.Args) == 1 && !t.Ellipsis.IsValid() && (isTypeLit(t.Args[0]) || p.tok == token.COMMA) {
+ // d.Name "[" name "(" t.ArgList[0] ")"
+ // d.Name "[" name "(" t.ArgList[0] ")" ","
+ pname = name
+ ptype = t.Args[0]
+ }
}
}
- if x == nil {
- // generic type [T any];
- p.parseGenericType(spec, lbrack, name0)
+ if pname != nil {
+ // d.Name "[" pname ...
+ // d.Name "[" pname ptype ...
+ // d.Name "[" pname ptype "," ...
+ p.parseGenericType(spec, lbrack, pname, ptype)
} else {
- // array type
- // TODO(rfindley) should resolve all identifiers in x.
+ // d.Name "[" x ...
spec.Type = p.parseArrayType(lbrack, x)
}
} else {
@@ -2611,6 +2707,21 @@ func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token
return spec
}
+// isTypeLit reports whether x is a (possibly parenthesized) type literal.
+func isTypeLit(x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
+ return true
+ case *ast.StarExpr:
+ // *T may be a pointer dereferenciation.
+ // Only consider *T as type literal if T is a type literal.
+ return isTypeLit(x.X)
+ case *ast.ParenExpr:
+ return isTypeLit(x.X)
+ }
+ return false
+}
+
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
diff --git a/src/go/parser/short_test.go b/src/go/parser/short_test.go
index cf4fa0a902..d117f0d381 100644
--- a/src/go/parser/short_test.go
+++ b/src/go/parser/short_test.go
@@ -74,7 +74,7 @@ var validWithTParamsOnly = []string{
`package p; type T[P any /* ERROR "expected ']', found any" */ ] struct { P }`,
`package p; type T[P comparable /* ERROR "expected ']', found comparable" */ ] struct { P }`,
`package p; type T[P comparable /* ERROR "expected ']', found comparable" */ [P]] struct { P }`,
- `package p; type T[P1, /* ERROR "expected ']', found ','" */ P2 any] struct { P1; f []P2 }`,
+ `package p; type T[P1, /* ERROR "unexpected comma" */ P2 any] struct { P1; f []P2 }`,
`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any]()()`,
`package p; func _(T (P))`,
`package p; func f[ /* ERROR "expected '\(', found '\['" */ A, B any](); func _() { _ = f[int, int] }`,
@@ -83,8 +83,8 @@ var validWithTParamsOnly = []string{
`package p; func _(p.T[ /* ERROR "missing ',' in parameter list" */ Q])`,
`package p; type _[A interface /* ERROR "expected ']', found 'interface'" */ {},] struct{}`,
`package p; type _[A interface /* ERROR "expected ']', found 'interface'" */ {}] struct{}`,
- `package p; type _[A, /* ERROR "expected ']', found ','" */ B any,] struct{}`,
- `package p; type _[A, /* ERROR "expected ']', found ','" */ B any] struct{}`,
+ `package p; type _[A, /* ERROR "unexpected comma" */ B any,] struct{}`,
+ `package p; type _[A, /* ERROR "unexpected comma" */ B any] struct{}`,
`package p; type _[A any /* ERROR "expected ']', found any" */,] struct{}`,
`package p; type _[A any /* ERROR "expected ']', found any" */ ]struct{}`,
`package p; type _[A any /* ERROR "expected ']', found any" */ ] struct{ A }`,
@@ -95,8 +95,8 @@ var validWithTParamsOnly = []string{
`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B C](a A) B`,
`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B C[A, B]](a A) B`,
- `package p; type _[A, /* ERROR "expected ']', found ','" */ B any] interface { _(a A) B }`,
- `package p; type _[A, /* ERROR "expected ']', found ','" */ B C[A, B]] interface { _(a A) B }`,
+ `package p; type _[A, /* ERROR "unexpected comma" */ B any] interface { _(a A) B }`,
+ `package p; type _[A, /* ERROR "unexpected comma" */ B C[A, B]] interface { _(a A) B }`,
`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1, T2 interface{}](x T1) T2`,
`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1 interface{ m() }, T2, T3 interface{}](x T1, y T3) T2`,
`package p; var _ = [ /* ERROR "expected expression" */ ]T[int]{}`,
@@ -193,7 +193,7 @@ var invalids = []string{
`package p; func f() { go func() { func() { f(x func /* ERROR "missing ','" */ (){}) } } }`,
`package p; func _() (type /* ERROR "found 'type'" */ T)(T)`,
`package p; func (type /* ERROR "found 'type'" */ T)(T) _()`,
- `package p; type _[A+B, /* ERROR "expected ']'" */ ] int`,
+ `package p; type _[A+B, /* ERROR "unexpected comma" */ ] int`,
// TODO(rfindley): this error should be positioned on the ':'
`package p; var a = a[[]int:[ /* ERROR "expected expression" */ ]int];`,
@@ -231,7 +231,7 @@ var invalidNoTParamErrs = []string{
`package p; type T[P any /* ERROR "expected ']', found any" */ ] = T0`,
`package p; var _ func[ /* ERROR "expected '\(', found '\['" */ T any](T)`,
`package p; func _[ /* ERROR "expected '\(', found '\['" */ ]()`,
- `package p; type _[A, /* ERROR "expected ']', found ','" */] struct{ A }`,
+ `package p; type _[A, /* ERROR "unexpected comma" */] struct{ A }`,
`package p; func _[ /* ERROR "expected '\(', found '\['" */ type P, *Q interface{}]()`,
`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B any](a A) B`,
diff --git a/src/go/parser/testdata/issue49482.go2 b/src/go/parser/testdata/issue49482.go2
new file mode 100644
index 0000000000..50de65118e
--- /dev/null
+++ b/src/go/parser/testdata/issue49482.go2
@@ -0,0 +1,35 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // these need a comma to disambiguate
+ _[P *T,] struct{}
+ _[P *T, _ any] struct{}
+ _[P (*T),] struct{}
+ _[P (*T), _ any] struct{}
+ _[P (T),] struct{}
+ _[P (T), _ any] struct{}
+
+ // these parse as name followed by type
+ _[P *struct{}] struct{}
+ _[P (*struct{})] struct{}
+ _[P ([]int)] struct{}
+
+ // array declarations
+ _ [P(T)]struct{}
+ _ [P((T))]struct{}
+ _ [P * *T]struct{}
+ _ [P * T]struct{}
+ _ [P(*T)]struct{}
+ _ [P(**T)]struct{}
+ _ [P * T - T]struct{}
+ _ [P*T-T, /* ERROR "unexpected comma" */ ]struct{}
+ _ [10, /* ERROR "unexpected comma" */ ]struct{}
+
+ // These should be parsed as generic type declarations.
+ _[P *struct /* ERROR "expected expression" */ {}|int] struct{}
+ _[P *struct /* ERROR "expected expression" */ {}|int|string] struct{}
+)
diff --git a/src/go/parser/testdata/typeparams.src b/src/go/parser/testdata/typeparams.src
index 1fea23f51a..479cb96871 100644
--- a/src/go/parser/testdata/typeparams.src
+++ b/src/go/parser/testdata/typeparams.src
@@ -9,7 +9,7 @@ package p
type List[E any /* ERROR "expected ']', found any" */ ] []E
-type Pair[L, /* ERROR "expected ']', found ','" */ R any] struct {
+type Pair[L, /* ERROR "unexpected comma" */ R any] struct {
Left L
Right R
}
diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go
index 19d4ab6663..f2170dbc4f 100644
--- a/src/go/printer/nodes.go
+++ b/src/go/printer/nodes.go
@@ -367,20 +367,48 @@ func (p *printer) parameters(fields *ast.FieldList, isTypeParam bool) {
p.expr(stripParensAlways(par.Type))
prevLine = parLineEnd
}
+
// if the closing ")" is on a separate line from the last parameter,
// print an additional "," and line break
if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
p.print(token.COMMA)
p.linebreak(closing, 0, ignore, true)
+ } else if isTypeParam && fields.NumFields() == 1 {
+ // Otherwise, if we are in a type parameter list that could be confused
+ // with the constant array length expression [P*C], print a comma so that
+ // parsing is unambiguous.
+ //
+ // Note that while ParenExprs can also be ambiguous (issue #49482), the
+ // printed type is never parenthesized (stripParensAlways is used above).
+ if t, _ := fields.List[0].Type.(*ast.StarExpr); t != nil && !isTypeLit(t.X) {
+ p.print(token.COMMA)
+ }
}
+
// unindent if we indented
if ws == ignore {
p.print(unindent)
}
}
+
p.print(fields.Closing, closeTok)
}
+// isTypeLit reports whether x is a (possibly parenthesized) type literal.
+func isTypeLit(x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
+ return true
+ case *ast.StarExpr:
+ // *T may be a pointer dereferenciation.
+ // Only consider *T as type literal if T is a type literal.
+ return isTypeLit(x.X)
+ case *ast.ParenExpr:
+ return isTypeLit(x.X)
+ }
+ return false
+}
+
func (p *printer) signature(sig *ast.FuncType) {
if sig.TypeParams != nil {
p.parameters(sig.TypeParams, true)
diff --git a/src/go/printer/testdata/generics.golden b/src/go/printer/testdata/generics.golden
index 3d95eda5b2..4fac2c9c58 100644
--- a/src/go/printer/testdata/generics.golden
+++ b/src/go/printer/testdata/generics.golden
@@ -38,3 +38,29 @@ func _() {
// type constraint literals with elided interfaces
func _[P ~int, Q int | string]() {}
func _[P struct{ f int }, Q *P]() {}
+
+// various potentially ambiguous type parameter lists (issue #49482)
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P T] struct{}
+type _[P T, _ any] struct{}
+
+type _[P *struct{}] struct{}
+type _[P *struct{}] struct{}
+type _[P []int] struct{}
+
+// array type declarations
+type _ [P(T)]struct{}
+type _ [P((T))]struct{}
+type _ [P * *T]struct{}
+type _ [P * T]struct{}
+type _ [P(*T)]struct{}
+type _ [P(**T)]struct{}
+type _ [P * T]struct{}
+type _ [P*T - T]struct{}
+
+type _[
+ P *T,
+] struct{}
diff --git a/src/go/printer/testdata/generics.input b/src/go/printer/testdata/generics.input
index 746dfdd235..fde9d32ef0 100644
--- a/src/go/printer/testdata/generics.input
+++ b/src/go/printer/testdata/generics.input
@@ -35,3 +35,29 @@ func _() {
// type constraint literals with elided interfaces
func _[P ~int, Q int | string]() {}
func _[P struct{f int}, Q *P]() {}
+
+// various potentially ambiguous type parameter lists (issue #49482)
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P (*T),] struct{}
+type _[P (*T), _ any] struct{}
+type _[P (T),] struct{}
+type _[P (T), _ any] struct{}
+
+type _[P *struct{}] struct{}
+type _[P (*struct{})] struct{}
+type _[P ([]int)] struct{}
+
+// array type declarations
+type _ [P(T)]struct{}
+type _ [P((T))]struct{}
+type _ [P * *T]struct{}
+type _ [P * T]struct{}
+type _ [P(*T)]struct{}
+type _ [P(**T)]struct{}
+type _ [P * T]struct{}
+type _ [P * T - T]struct{}
+
+type _[
+ P *T,
+] struct{}
diff --git a/src/go/token/token.go b/src/go/token/token.go
index d22e575661..dd0f4f8234 100644
--- a/src/go/token/token.go
+++ b/src/go/token/token.go
@@ -340,10 +340,13 @@ func IsKeyword(name string) bool {
// is not a digit. Keywords are not identifiers.
//
func IsIdentifier(name string) bool {
+ if name == "" || IsKeyword(name) {
+ return false
+ }
for i, c := range name {
if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) {
return false
}
}
- return name != "" && !IsKeyword(name)
+ return true
}
diff --git a/src/go/types/api.go b/src/go/types/api.go
index 2776e05232..828461477b 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -417,9 +417,15 @@ func (conf *Config) Check(path string, fset *token.FileSet, files []*ast.File, i
}
// AssertableTo reports whether a value of type V can be asserted to have type T.
+// The behavior of AssertableTo is undefined if V is a generalized interface; i.e.,
+// an interface that may only be used as a type constraint in Go code.
func AssertableTo(V *Interface, T Type) bool {
- m, _ := (*Checker)(nil).assertableTo(V, T)
- return m == nil
+ // Checker.newAssertableTo suppresses errors for invalid types, so we need special
+ // handling here.
+ if T.Underlying() == Typ[Invalid] {
+ return false
+ }
+ return (*Checker)(nil).newAssertableTo(V, T) == nil
}
// AssignableTo reports whether a value of type V is assignable to a variable of type T.
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index a18ee16c7b..4c732dd58e 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -466,52 +466,54 @@ func TestInstanceInfo(t *testing.T) {
`func(float64, *byte, ...[]byte)`,
},
- {`package s1; func f[T any, P interface{~*T}](x T) {}; func _(x string) { f(x) }`,
+ {`package s1; func f[T any, P interface{*T}](x T) {}; func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {`package s2; func f[T any, P interface{~*T}](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s2; func f[T any, P interface{*T}](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {`package s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s3; type C[T any] interface{chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {`package s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {`package t1; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = f[string] }`,
+ {`package t1; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {`package t2; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
+ {`package t2; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {`package t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]]() []T { return nil }; func _() { _ = f[int] }`,
+ {`package t3; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
`f`,
- []string{`int`, `chan<- int`},
+ []string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
},
- {`package t4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ {`package t4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = (f[int]) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
},
+
{`package i0; import "lib"; func _() { lib.F(42) }`,
`F`,
[]string{`int`},
`func(int)`,
},
+
{`package type0; type T[P interface{~int}] struct{ x P }; var _ T[int]`,
`T`,
[]string{`int`},
@@ -1688,7 +1690,7 @@ func F(){
var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F
var a []int
- for i, x := range /*i=undef*/ /*x=var:16*/ a /*i=var:20*/ /*x=var:20*/ { _ = i; _ = x }
+ for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
var i interface{}
switch y := i.(type) { /*y=undef*/
@@ -2306,27 +2308,27 @@ type Bad Bad // invalid type
conf := Config{Error: func(error) {}}
pkg, _ := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil)
- scope := pkg.Scope()
+ lookup := func(tname string) Type { return pkg.Scope().Lookup(tname).Type() }
var (
- EmptyIface = scope.Lookup("EmptyIface").Type().Underlying().(*Interface)
- I = scope.Lookup("I").Type().(*Named)
+ EmptyIface = lookup("EmptyIface").Underlying().(*Interface)
+ I = lookup("I").(*Named)
II = I.Underlying().(*Interface)
- C = scope.Lookup("C").Type().(*Named)
+ C = lookup("C").(*Named)
CI = C.Underlying().(*Interface)
- Integer = scope.Lookup("Integer").Type().Underlying().(*Interface)
- EmptyTypeSet = scope.Lookup("EmptyTypeSet").Type().Underlying().(*Interface)
- N1 = scope.Lookup("N1").Type()
+ Integer = lookup("Integer").Underlying().(*Interface)
+ EmptyTypeSet = lookup("EmptyTypeSet").Underlying().(*Interface)
+ N1 = lookup("N1")
N1p = NewPointer(N1)
- N2 = scope.Lookup("N2").Type()
+ N2 = lookup("N2")
N2p = NewPointer(N2)
- N3 = scope.Lookup("N3").Type()
- N4 = scope.Lookup("N4").Type()
- Bad = scope.Lookup("Bad").Type()
+ N3 = lookup("N3")
+ N4 = lookup("N4")
+ Bad = lookup("Bad")
)
tests := []struct {
- t Type
- i *Interface
+ V Type
+ T *Interface
want bool
}{
{I, II, true},
@@ -2357,8 +2359,20 @@ type Bad Bad // invalid type
}
for _, test := range tests {
- if got := Implements(test.t, test.i); got != test.want {
- t.Errorf("Implements(%s, %s) = %t, want %t", test.t, test.i, got, test.want)
+ if got := Implements(test.V, test.T); got != test.want {
+ t.Errorf("Implements(%s, %s) = %t, want %t", test.V, test.T, got, test.want)
+ }
+
+ // The type assertion x.(T) is valid if T is an interface or if T implements the type of x.
+ // The assertion is never valid if T is a bad type.
+ V := test.T
+ T := test.V
+ want := false
+ if _, ok := T.Underlying().(*Interface); (ok || Implements(T, V)) && T != Bad {
+ want = true
+ }
+ if got := AssertableTo(V, T); got != want {
+ t.Errorf("AssertableTo(%s, %s) = %t, want %t", V, T, got, want)
}
}
}
diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go
index f75b8b6f6b..f5e22c2f67 100644
--- a/src/go/types/assignments.go
+++ b/src/go/types/assignments.go
@@ -290,15 +290,14 @@ func (check *Checker) typesSummary(list []Type, variadic bool) string {
return "(" + strings.Join(res, ", ") + ")"
}
-func (check *Checker) assignError(rhs []ast.Expr, nvars, nvals int) {
- measure := func(x int, unit string) string {
- s := fmt.Sprintf("%d %s", x, unit)
- if x != 1 {
- s += "s"
- }
- return s
+func measure(x int, unit string) string {
+ if x != 1 {
+ unit += "s"
}
+ return fmt.Sprintf("%d %s", x, unit)
+}
+func (check *Checker) assignError(rhs []ast.Expr, nvars, nvals int) {
vars := measure(nvars, "variable")
vals := measure(nvals, "value")
rhs0 := rhs[0]
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index 8fcfcb935f..c81e73c828 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -83,10 +83,24 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
// of S and the respective parameter passing rules apply."
S := x.typ
var T Type
- if s, _ := structuralType(S).(*Slice); s != nil {
+ if s, _ := coreType(S).(*Slice); s != nil {
T = s.elem
} else {
- check.invalidArg(x, _InvalidAppend, "%s is not a slice", x)
+ var cause string
+ switch {
+ case x.isNil():
+ cause = "have untyped nil"
+ case isTypeParam(S):
+ if u := coreType(S); u != nil {
+ cause = check.sprintf("%s has core type %s", x, u)
+ } else {
+ cause = check.sprintf("%s has no core type", x)
+ }
+ default:
+ cause = check.sprintf("have %s", x)
+ }
+ // don't use Checker.invalidArg here as it would repeat "argument" in the error message
+ check.errorf(x, _InvalidAppend, "first argument to append must be a slice; %s", cause)
return
}
@@ -102,7 +116,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
if x.mode == invalid {
return
}
- if t := structuralString(x.typ); t != nil && isString(t) {
+ if t := coreString(x.typ); t != nil && isString(t) {
if check.Types != nil {
sig := makeSig(S, S, x.typ)
sig.variadic = true
@@ -336,14 +350,14 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
case _Copy:
// copy(x, y []T) int
- dst, _ := structuralType(x.typ).(*Slice)
+ dst, _ := coreType(x.typ).(*Slice)
var y operand
arg(&y, 1)
if y.mode == invalid {
return
}
- src0 := structuralString(y.typ)
+ src0 := coreString(y.typ)
if src0 != nil && isString(src0) {
src0 = NewSlice(universeByte)
}
@@ -481,13 +495,13 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
}
var min int // minimum number of arguments
- switch structuralType(T).(type) {
+ switch coreType(T).(type) {
case *Slice:
min = 2
case *Map, *Chan:
min = 1
case nil:
- check.errorf(arg0, _InvalidMake, "cannot make %s: no structural type", arg0)
+ check.errorf(arg0, _InvalidMake, "cannot make %s: no core type", arg0)
return
default:
check.invalidArg(arg0, _InvalidMake, "cannot make %s; type must be slice, map, or channel", arg0)
diff --git a/src/go/types/call.go b/src/go/types/call.go
index aa87c48a65..854528ddfa 100644
--- a/src/go/types/call.go
+++ b/src/go/types/call.go
@@ -171,7 +171,7 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
cgocall := x.mode == cgofunc
// a type parameter may be "called" if all types have the same signature
- sig, _ := structuralType(x.typ).(*Signature)
+ sig, _ := coreType(x.typ).(*Signature)
if sig == nil {
check.invalidOp(x, _InvalidCall, "cannot call non-function %s", x)
x.mode = invalid
@@ -527,7 +527,12 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
}
check.exprOrType(x, e.X, false)
- if x.mode == invalid {
+ switch x.mode {
+ case builtin:
+ // types2 uses the position of '.' for the error
+ check.errorf(e.Sel, _UncalledBuiltin, "cannot select on %s", x)
+ goto Error
+ case invalid:
goto Error
}
diff --git a/src/go/types/check.go b/src/go/types/check.go
index a0c3700254..23136377c8 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -24,19 +24,6 @@ const (
compilerErrorMessages = false // match compiler error messages
)
-// If forceStrict is set, the type-checker enforces additional
-// rules not specified by the Go 1 spec, but which will
-// catch guaranteed run-time errors if the respective
-// code is executed. In other words, programs passing in
-// strict mode are Go 1 compliant, but not all Go 1 programs
-// will pass in strict mode. The additional rules are:
-//
-// - A type assertion x.(T) where T is an interface type
-// is invalid if any (statically known) method that exists
-// for both x and T have different signatures.
-//
-const forceStrict = false
-
// exprInfo stores information about an untyped expression.
type exprInfo struct {
isLhs bool // expression is lhs operand of a shift with delayed type-check
@@ -146,7 +133,7 @@ type Checker struct {
untyped map[ast.Expr]exprInfo // map of expressions without final type
delayed []action // stack of delayed action segments; segments are processed in FIFO order
objPath []Object // path of object dependencies during type inference (for cycle reporting)
- defTypes []*Named // defined types created during type checking, for final validation.
+ cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking
// environment within which the current object is type-checked (valid only
// for the duration of type-checking a specific object)
@@ -225,6 +212,16 @@ func (check *Checker) pop() Object {
return obj
}
+type cleaner interface {
+ cleanup()
+}
+
+// needsCleanup records objects/types that implement the cleanup method
+// which will be called at the end of type-checking.
+func (check *Checker) needsCleanup(c cleaner) {
+ check.cleaners = append(check.cleaners, c)
+}
+
// NewChecker returns a new Checker instance for a given package.
// Package files may be added incrementally via checker.Files.
func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker {
@@ -268,6 +265,8 @@ func (check *Checker) initFiles(files []*ast.File) {
check.methods = nil
check.untyped = nil
check.delayed = nil
+ check.objPath = nil
+ check.cleaners = nil
// determine package name and collect valid files
pkg := check.pkg
@@ -317,22 +316,37 @@ func (check *Checker) checkFiles(files []*ast.File) (err error) {
defer check.handleBailout(&err)
+ print := func(msg string) {
+ if trace {
+ fmt.Println()
+ fmt.Println(msg)
+ }
+ }
+
+ print("== initFiles ==")
check.initFiles(files)
+ print("== collectObjects ==")
check.collectObjects()
+ print("== packageObjects ==")
check.packageObjects()
+ print("== processDelayed ==")
check.processDelayed(0) // incl. all functions
- check.expandDefTypes()
+ print("== cleanup ==")
+ check.cleanup()
+ print("== initOrder ==")
check.initOrder()
if !check.conf.DisableUnusedImportCheck {
+ print("== unusedImports ==")
check.unusedImports()
}
+ print("== recordUntyped ==")
check.recordUntyped()
if check.firstErr == nil {
@@ -350,7 +364,6 @@ func (check *Checker) checkFiles(files []*ast.File) (err error) {
check.recvTParamMap = nil
check.brokenAliases = nil
check.unionTypeSets = nil
- check.defTypes = nil
check.ctxt = nil
// TODO(rFindley) There's more memory we should release at this point.
@@ -378,27 +391,13 @@ func (check *Checker) processDelayed(top int) {
check.delayed = check.delayed[:top]
}
-func (check *Checker) expandDefTypes() {
- // Ensure that every defined type created in the course of type-checking has
- // either non-*Named underlying, or is unresolved.
- //
- // This guarantees that we don't leak any types whose underlying is *Named,
- // because any unresolved instances will lazily compute their underlying by
- // substituting in the underlying of their origin. The origin must have
- // either been imported or type-checked and expanded here, and in either case
- // its underlying will be fully expanded.
- for i := 0; i < len(check.defTypes); i++ {
- n := check.defTypes[i]
- switch n.underlying.(type) {
- case nil:
- if n.resolver == nil {
- panic("nil underlying")
- }
- case *Named:
- n.under() // n.under may add entries to check.defTypes
- }
- n.check = nil
+// cleanup runs cleanup for all collected cleaners.
+func (check *Checker) cleanup() {
+ // Don't use a range clause since Named.cleanup may add more cleaners.
+ for i := 0; i < len(check.cleaners); i++ {
+ check.cleaners[i].cleanup()
}
+ check.cleaners = nil
}
func (check *Checker) record(x *operand) {
diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go
index 84741359c0..c5a69cddf4 100644
--- a/src/go/types/conversions.go
+++ b/src/go/types/conversions.go
@@ -48,11 +48,14 @@ func (check *Checker) conversion(x *operand, T Type) {
// have specific types, constant x cannot be
// converted.
ok = T.(*TypeParam).underIs(func(u Type) bool {
- // t is nil if there are no specific type terms
+ // u is nil if there are no specific type terms
if u == nil {
cause = check.sprintf("%s does not contain specific types", T)
return false
}
+ if isString(x.typ) && isBytesOrRunes(u) {
+ return true
+ }
if !constConvertibleTo(u, nil) {
cause = check.sprintf("cannot convert %s to %s (in %s)", x, u, T)
return false
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index cd6f709a56..93a37d76ce 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -624,7 +624,6 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list *ast.FieldList
}()
index := 0
- var bounds []Type
for _, f := range list.List {
var bound Type
// NOTE: we may be able to assert that f.Type != nil here, but this is not
@@ -642,7 +641,6 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list *ast.FieldList
} else {
bound = Typ[Invalid]
}
- bounds = append(bounds, bound)
for i := range f.Names {
tparams[index+i].bound = bound
}
diff --git a/src/go/types/errorcodes.go b/src/go/types/errorcodes.go
index 51f091a9cb..a7514b317a 100644
--- a/src/go/types/errorcodes.go
+++ b/src/go/types/errorcodes.go
@@ -98,13 +98,10 @@ const (
// _InvalidDeclCycle occurs when a declaration cycle is not valid.
//
// Example:
- // import "unsafe"
- //
- // type T struct {
- // a [n]int
+ // type S struct {
+ // S
// }
//
- // var n = unsafe.Sizeof(T{})
_InvalidDeclCycle
// _InvalidTypeCycle occurs when a cycle in type definitions results in a
diff --git a/src/go/types/errors.go b/src/go/types/errors.go
index a1786ec0ff..fade8630e0 100644
--- a/src/go/types/errors.go
+++ b/src/go/types/errors.go
@@ -110,6 +110,17 @@ func sprintf(fset *token.FileSet, qf Qualifier, debug bool, format string, args
}
buf.WriteByte(']')
arg = buf.String()
+ case []*TypeParam:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(typeString(x, qf, debug)) // use typeString so we get subscripts when debugging
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
}
args[i] = arg
}
diff --git a/src/go/types/expr.go b/src/go/types/expr.go
index 0d21a592f9..9241c243f2 100644
--- a/src/go/types/expr.go
+++ b/src/go/types/expr.go
@@ -173,9 +173,9 @@ func (check *Checker) unary(x *operand, e *ast.UnaryExpr) {
return
case token.ARROW:
- u := structuralType(x.typ)
+ u := coreType(x.typ)
if u == nil {
- check.invalidOp(x, _InvalidReceive, "cannot receive from %s: no structural type", x)
+ check.invalidOp(x, _InvalidReceive, "cannot receive from %s: no core type", x)
x.mode = invalid
return
}
@@ -859,7 +859,7 @@ func (check *Checker) incomparableCause(typ Type) string {
}
// see if we can extract a more specific error
var cause string
- comparable(typ, nil, func(format string, args ...interface{}) {
+ comparable(typ, true, nil, func(format string, args ...interface{}) {
cause = check.sprintf(format, args...)
})
return cause
@@ -1338,7 +1338,11 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
case hint != nil:
// no composite literal type present - use hint (element type of enclosing type)
typ = hint
- base, _ = deref(structuralType(typ)) // *T implies &T{}
+ base, _ = deref(coreType(typ)) // *T implies &T{}
+ if base == nil {
+ check.errorf(e, _InvalidLit, "invalid composite literal element type %s: no core type", typ)
+ goto Error
+ }
default:
// TODO(gri) provide better error messages depending on context
@@ -1346,7 +1350,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
goto Error
}
- switch utyp := structuralType(base).(type) {
+ switch utyp := coreType(base).(type) {
case *Struct:
// Prevent crash if the struct referred to is not yet set up.
// See analogous comment for *Array.
diff --git a/src/go/types/index.go b/src/go/types/index.go
index db4732c8e0..33075edaf1 100644
--- a/src/go/types/index.go
+++ b/src/go/types/index.go
@@ -183,6 +183,7 @@ func (check *Checker) indexExpr(x *operand, e *typeparams.IndexExpr) (isFuncInst
}
if !valid {
+ // types2 uses the position of '[' for the error
check.invalidOp(x, _NonIndexableOperand, "cannot index %s", x)
x.mode = invalid
return false
@@ -214,9 +215,9 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) {
valid := false
length := int64(-1) // valid if >= 0
- switch u := structuralString(x.typ).(type) {
+ switch u := coreString(x.typ).(type) {
case nil:
- check.invalidOp(x, _NonSliceableOperand, "cannot slice %s: %s has no structural type", x, x.typ)
+ check.invalidOp(x, _NonSliceableOperand, "cannot slice %s: %s has no core type", x, x.typ)
x.mode = invalid
return
diff --git a/src/go/types/infer.go b/src/go/types/infer.go
index 6a9a662565..429510291e 100644
--- a/src/go/types/infer.go
+++ b/src/go/types/infer.go
@@ -40,6 +40,13 @@ func (check *Checker) infer(posn positioner, tparams []*TypeParam, targs []Type,
}()
}
+ if traceInference {
+ check.dump("-- inferA %s%s ➞ %s", tparams, params, targs)
+ defer func() {
+ check.dump("=> inferA %s ➞ %s", tparams, result)
+ }()
+ }
+
// There must be at least one type parameter, and no more type arguments than type parameters.
n := len(tparams)
assert(n > 0 && len(targs) <= n)
@@ -53,6 +60,64 @@ func (check *Checker) infer(posn positioner, tparams []*TypeParam, targs []Type,
}
// len(targs) < n
+ const enableTparamRenaming = true
+ if enableTparamRenaming {
+ // For the purpose of type inference we must differentiate type parameters
+ // occurring in explicit type or value function arguments from the type
+ // parameters we are solving for via unification, because they may be the
+ // same in self-recursive calls. For example:
+ //
+ // func f[P *Q, Q any](p P, q Q) {
+ // f(p)
+ // }
+ //
+ // In this example, the fact that the P used in the instantation f[P] has
+ // the same pointer identity as the P we are trying to solve for via
+ // unification is coincidental: there is nothing special about recursive
+ // calls that should cause them to conflate the identity of type arguments
+ // with type parameters. To put it another way: any such self-recursive
+ // call is equivalent to a mutually recursive call, which does not run into
+ // any problems of type parameter identity. For example, the following code
+ // is equivalent to the code above.
+ //
+ // func f[P interface{*Q}, Q any](p P, q Q) {
+ // f2(p)
+ // }
+ //
+ // func f2[P interface{*Q}, Q any](p P, q Q) {
+ // f(p)
+ // }
+ //
+ // We can turn the first example into the second example by renaming type
+ // parameters in the original signature to give them a new identity. As an
+ // optimization, we do this only for self-recursive calls.
+
+ // We can detect if we are in a self-recursive call by comparing the
+ // identity of the first type parameter in the current function with the
+ // first type parameter in tparams. This works because type parameters are
+ // unique to their type parameter list.
+ selfRecursive := check.sig != nil && check.sig.tparams.Len() > 0 && tparams[0] == check.sig.tparams.At(0)
+
+ if selfRecursive {
+ // In self-recursive inference, rename the type parameters with new type
+ // parameters that are the same but for their pointer identity.
+ tparams2 := make([]*TypeParam, len(tparams))
+ for i, tparam := range tparams {
+ tname := NewTypeName(tparam.Obj().Pos(), tparam.Obj().Pkg(), tparam.Obj().Name(), nil)
+ tparams2[i] = NewTypeParam(tname, nil)
+ tparams2[i].index = tparam.index // == i
+ }
+
+ renameMap := makeRenameMap(tparams, tparams2)
+ for i, tparam := range tparams {
+ tparams2[i].bound = check.subst(posn.Pos(), tparam.bound, renameMap, nil)
+ }
+
+ tparams = tparams2
+ params = check.subst(posn.Pos(), params, renameMap, nil).(*Tuple)
+ }
+ }
+
// If we have more than 2 arguments, we may have arguments with named and unnamed types.
// If that is the case, permutate params and args such that the arguments with named
// types are first in the list. This doesn't affect type inference if all types are taken
@@ -402,6 +467,13 @@ func (w *tpWalker) isParameterizedTypeList(list []Type) bool {
func (check *Checker) inferB(posn positioner, tparams []*TypeParam, targs []Type) (types []Type, index int) {
assert(len(tparams) >= len(targs) && len(targs) > 0)
+ if traceInference {
+ check.dump("-- inferB %s ➞ %s", tparams, targs)
+ defer func() {
+ check.dump("=> inferB %s ➞ %s", tparams, types)
+ }()
+ }
+
// Setup bidirectional unification between constraints
// and the corresponding type arguments (which may be nil!).
u := newUnifier(false)
@@ -415,27 +487,88 @@ func (check *Checker) inferB(posn positioner, tparams []*TypeParam, targs []Type
}
}
- // If a constraint has a structural type, unify the corresponding type parameter with it.
- for _, tpar := range tparams {
- sbound := structuralType(tpar)
- if sbound != nil {
- // If the structural type is the underlying type of a single
- // defined type in the constraint, use that defined type instead.
- if named, _ := tpar.singleType().(*Named); named != nil {
- sbound = named
- }
- if !u.unify(tpar, sbound) {
- // TODO(gri) improve error message by providing the type arguments
- // which we know already
- check.errorf(posn, _InvalidTypeArg, "%s does not match %s", tpar, sbound)
- return nil, 0
+ // Repeatedly apply constraint type inference as long as
+ // there are still unknown type arguments and progress is
+ // being made.
+ //
+ // This is an O(n^2) algorithm where n is the number of
+ // type parameters: if there is progress (and iteration
+ // continues), at least one type argument is inferred
+ // per iteration and we have a doubly nested loop.
+ // In practice this is not a problem because the number
+ // of type parameters tends to be very small (< 5 or so).
+ // (It should be possible for unification to efficiently
+ // signal newly inferred type arguments; then the loops
+ // here could handle the respective type parameters only,
+ // but that will come at a cost of extra complexity which
+ // may not be worth it.)
+ for n := u.x.unknowns(); n > 0; {
+ nn := n
+
+ for i, tpar := range tparams {
+ // If there is a core term (i.e., a core type with tilde information)
+ // unify the type parameter with the core type.
+ if core, single := coreTerm(tpar); core != nil {
+ // A type parameter can be unified with its core type in two cases.
+ tx := u.x.at(i)
+ switch {
+ case tx != nil:
+ // The corresponding type argument tx is known.
+ // In this case, if the core type has a tilde, the type argument's underlying
+ // type must match the core type, otherwise the type argument and the core type
+ // must match.
+ // If tx is an external type parameter, don't consider its underlying type
+ // (which is an interface). Core type unification will attempt to unify against
+ // core.typ.
+ // Note also that even with inexact unification we cannot leave away the under
+ // call here because it's possible that both tx and core.typ are named types,
+ // with under(tx) being a (named) basic type matching core.typ. Such cases do
+ // not match with inexact unification.
+ if core.tilde && !isTypeParam(tx) {
+ tx = under(tx)
+ }
+ if !u.unify(tx, core.typ) {
+ // TODO(gri) improve error message by providing the type arguments
+ // which we know already
+ // Don't use term.String() as it always qualifies types, even if they
+ // are in the current package.
+ tilde := ""
+ if core.tilde {
+ tilde = "~"
+ }
+ check.errorf(posn, _InvalidTypeArg, "%s does not match %s%s", tpar, tilde, core.typ)
+ return nil, 0
+ }
+
+ case single && !core.tilde:
+ // The corresponding type argument tx is unknown and there's a single
+ // specific type and no tilde.
+ // In this case the type argument must be that single type; set it.
+ u.x.set(i, core.typ)
+
+ default:
+ // Unification is not possible and no progress was made.
+ continue
+ }
+
+ // The number of known type arguments may have changed.
+ nn = u.x.unknowns()
+ if nn == 0 {
+ break // all type arguments are known
+ }
}
}
+
+ assert(nn <= n)
+ if nn == n {
+ break // no progress
+ }
+ n = nn
}
// u.x.types() now contains the incoming type arguments plus any additional type
- // arguments which were inferred from structural types. The newly inferred non-
- // nil entries may still contain references to other type parameters.
+ // arguments which were inferred from core terms. The newly inferred non-nil
+ // entries may still contain references to other type parameters.
// For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int
// was given, unification produced the type list [int, []C, *A]. We eliminate the
// remaining type parameters by substituting the type parameters in this type list
@@ -503,8 +636,8 @@ func (check *Checker) inferB(posn positioner, tparams []*TypeParam, targs []Type
}
// Once nothing changes anymore, we may still have type parameters left;
- // e.g., a structural constraint *P may match a type parameter Q but we
- // don't have any type arguments to fill in for *P or Q (issue #45548).
+ // e.g., a constraint with core type *P may match a type parameter Q but
+ // we don't have any type arguments to fill in for *P or Q (issue #45548).
// Don't let such inferences escape, instead nil them out.
for i, typ := range types {
if typ != nil && isParameterized(tparams, typ) {
@@ -524,6 +657,42 @@ func (check *Checker) inferB(posn positioner, tparams []*TypeParam, targs []Type
return
}
+// If the type parameter has a single specific type S, coreTerm returns (S, true).
+// Otherwise, if tpar has a core type T, it returns a term corresponding to that
+// core type and false. In that case, if any term of tpar has a tilde, the core
+// term has a tilde. In all other cases coreTerm returns (nil, false).
+func coreTerm(tpar *TypeParam) (*term, bool) {
+ n := 0
+ var single *term // valid if n == 1
+ var tilde bool
+ tpar.is(func(t *term) bool {
+ if t == nil {
+ assert(n == 0)
+ return false // no terms
+ }
+ n++
+ single = t
+ if t.tilde {
+ tilde = true
+ }
+ return true
+ })
+ if n == 1 {
+ if debug {
+ assert(debug && under(single.typ) == coreType(tpar))
+ }
+ return single, true
+ }
+ if typ := coreType(tpar); typ != nil {
+ // A core type is always an underlying type.
+ // If any term of tpar has a tilde, we don't
+ // have a precise core type and we must return
+ // a tilde as well.
+ return &term{tilde, typ}, false
+ }
+ return nil, false
+}
+
type cycleFinder struct {
tparams []*TypeParam
types []Type
diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go
index 4aeaeb7f11..4b8e3d4661 100644
--- a/src/go/types/instantiate.go
+++ b/src/go/types/instantiate.go
@@ -204,7 +204,7 @@ func (check *Checker) implements(V, T Type) error {
// If T is comparable, V must be comparable.
// Remember as a pending error and report only if we don't have a more specific error.
var pending error
- if Ti.IsComparable() && ((Vi != nil && !Vi.IsComparable()) || (Vi == nil && !Comparable(V))) {
+ if Ti.IsComparable() && !comparable(V, false, nil, nil) {
pending = errorf("%s does not implement comparable", V)
}
diff --git a/src/go/types/interface.go b/src/go/types/interface.go
index b9d4660eb4..3db3580a91 100644
--- a/src/go/types/interface.go
+++ b/src/go/types/interface.go
@@ -56,7 +56,7 @@ func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
}
// set method receivers if necessary
- typ := new(Interface)
+ typ := (*Checker)(nil).newInterface()
for _, m := range methods {
if sig := m.typ.(*Signature); sig.recv == nil {
sig.recv = NewVar(m.pos, m.pkg, "", typ)
@@ -73,6 +73,15 @@ func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
return typ
}
+// check may be nil
+func (check *Checker) newInterface() *Interface {
+ typ := &Interface{check: check}
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
// MarkImplicit marks the interface t as implicit, meaning this interface
// corresponds to a constraint literal such as ~T or A|B without explicit
// interface embedding. MarkImplicit should be called before any concurrent use
@@ -141,6 +150,11 @@ func (t *Interface) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
+func (t *Interface) cleanup() {
+ t.check = nil
+ t.embedPos = nil
+}
+
func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
addEmbedded := func(pos token.Pos, typ Type) {
ityp.embeddeds = append(ityp.embeddeds, typ)
@@ -210,16 +224,10 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d
sortMethods(ityp.methods)
// (don't sort embeddeds: they must correspond to *embedPos entries)
- // Compute type set with a non-nil *Checker as soon as possible
- // to report any errors. Subsequent uses of type sets will use
- // this computed type set and won't need to pass in a *Checker.
- //
- // Pin the checker to the interface type in the interim, in case the type set
- // must be used before delayed funcs are processed (see issue #48234).
- // TODO(rfindley): clean up use of *Checker with computeInterfaceTypeSet
- ityp.check = check
+ // Compute type set as soon as possible to report any errors.
+ // Subsequent uses of type sets will use this computed type
+ // set and won't need to pass in a *Checker.
check.later(func() {
computeInterfaceTypeSet(check, iface.Pos(), ityp)
- ityp.check = nil
}).describef(iface, "compute type set for %s", ityp)
}
diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go
index f2f38be266..501c230357 100644
--- a/src/go/types/lookup.go
+++ b/src/go/types/lookup.go
@@ -66,12 +66,12 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
obj, index, indirect = lookupFieldOrMethod(T, addressable, pkg, name, false)
- // If we didn't find anything and if we have a type parameter with a structural constraint,
- // see if there is a matching field (but not a method, those need to be declared explicitly
- // in the constraint). If the structural constraint is a named pointer type (see above), we
- // are ok here because only fields are accepted as results.
+ // If we didn't find anything and if we have a type parameter with a core type,
+ // see if there is a matching field (but not a method, those need to be declared
+ // explicitly in the constraint). If the constraint is a named pointer type (see
+ // above), we are ok here because only fields are accepted as results.
if obj == nil && isTypeParam(T) {
- if t := structuralType(T); t != nil {
+ if t := coreType(T); t != nil {
obj, index, indirect = lookupFieldOrMethod(t, addressable, pkg, name, false)
if _, ok := obj.(*Var); !ok {
obj, index, indirect = nil, nil, false // accept fields (variables) only
@@ -424,18 +424,31 @@ func (check *Checker) funcString(f *Func) string {
// method required by V and whether it is missing or just has the wrong type.
// The receiver may be nil if assertableTo is invoked through an exported API call
// (such as AssertableTo), i.e., when all methods have been type-checked.
-// If the global constant forceStrict is set, assertions that are known to fail
-// are not permitted.
+// TODO(gri) replace calls to this function with calls to newAssertableTo.
func (check *Checker) assertableTo(V *Interface, T Type) (method, wrongType *Func) {
// no static check is required if T is an interface
// spec: "If T is an interface type, x.(T) asserts that the
// dynamic type of x implements the interface T."
- if IsInterface(T) && !forceStrict {
+ if IsInterface(T) {
return
}
+ // TODO(gri) fix this for generalized interfaces
return check.missingMethod(T, V, false)
}
+// newAssertableTo reports whether a value of type V can be asserted to have type T.
+// It also implements behavior for interfaces that currently are only permitted
+// in constraint position (we have not yet defined that behavior in the spec).
+func (check *Checker) newAssertableTo(V *Interface, T Type) error {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if IsInterface(T) {
+ return nil
+ }
+ return check.implements(T, V)
+}
+
// deref dereferences typ if it is a *Pointer and returns its base and true.
// Otherwise it returns (typ, false).
func deref(typ Type) (Type, bool) {
diff --git a/src/go/types/named.go b/src/go/types/named.go
index 5e84c39776..5b84e0653b 100644
--- a/src/go/types/named.go
+++ b/src/go/types/named.go
@@ -72,11 +72,31 @@ func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tpar
}
// Ensure that typ is always expanded and sanity-checked.
if check != nil {
- check.defTypes = append(check.defTypes, typ)
+ check.needsCleanup(typ)
}
return typ
}
+func (t *Named) cleanup() {
+ // Ensure that every defined type created in the course of type-checking has
+ // either non-*Named underlying, or is unresolved.
+ //
+ // This guarantees that we don't leak any types whose underlying is *Named,
+ // because any unresolved instances will lazily compute their underlying by
+ // substituting in the underlying of their origin. The origin must have
+ // either been imported or type-checked and expanded here, and in either case
+ // its underlying will be fully expanded.
+ switch t.underlying.(type) {
+ case nil:
+ if t.resolver == nil {
+ panic("nil underlying")
+ }
+ case *Named:
+ t.under() // t.under may add entries to check.cleaners
+ }
+ t.check = nil
+}
+
// Obj returns the type name for the declaration defining the named type t. For
// instantiated types, this is the type name of the base type.
func (t *Named) Obj() *TypeName {
@@ -362,11 +382,11 @@ func expandNamed(ctxt *Context, n *Named, instPos token.Pos) (tparams *TypeParam
// that it wasn't substituted. In this case we need to create a new
// *Interface before modifying receivers.
if iface == n.orig.underlying {
- iface = &Interface{
- embeddeds: iface.embeddeds,
- complete: iface.complete,
- implicit: iface.implicit, // should be false but be conservative
- }
+ old := iface
+ iface = check.newInterface()
+ iface.embeddeds = old.embeddeds
+ iface.complete = old.complete
+ iface.implicit = old.implicit // should be false but be conservative
underlying = iface
}
iface.methods = methods
diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go
index 23dcd7274d..0360f27ee6 100644
--- a/src/go/types/predicates.go
+++ b/src/go/types/predicates.go
@@ -33,7 +33,7 @@ func isBasic(t Type, info BasicInfo) bool {
// The allX predicates below report whether t is an X.
// If t is a type parameter the result is true if isX is true
// for all specified types of the type parameter's type set.
-// allX is an optimized version of isX(structuralType(t)) (which
+// allX is an optimized version of isX(coreType(t)) (which
// is the same as underIs(t, isX)).
func allBoolean(typ Type) bool { return allBasic(typ, IsBoolean) }
@@ -47,7 +47,7 @@ func allNumericOrString(typ Type) bool { return allBasic(typ, IsNumeric|IsString
// allBasic reports whether under(t) is a basic type with the specified info.
// If t is a type parameter, the result is true if isBasic(t, info) is true
// for all specific types of the type parameter's type set.
-// allBasic(t, info) is an optimized version of isBasic(structuralType(t), info).
+// allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
func allBasic(t Type, info BasicInfo) bool {
if tpar, _ := t.(*TypeParam); tpar != nil {
return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
@@ -104,11 +104,12 @@ func isGeneric(t Type) bool {
// Comparable reports whether values of type T are comparable.
func Comparable(T Type) bool {
- return comparable(T, nil, nil)
+ return comparable(T, true, nil, nil)
}
+// If dynamic is set, non-type parameter interfaces are always comparable.
// If reportf != nil, it may be used to report why T is not comparable.
-func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})) bool {
+func comparable(T Type, dynamic bool, seen map[Type]bool, reportf func(string, ...interface{})) bool {
if seen[T] {
return true
}
@@ -126,7 +127,7 @@ func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})
return true
case *Struct:
for _, f := range t.fields {
- if !comparable(f.typ, seen, nil) {
+ if !comparable(f.typ, dynamic, seen, nil) {
if reportf != nil {
reportf("struct containing %s cannot be compared", f.typ)
}
@@ -135,7 +136,7 @@ func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})
}
return true
case *Array:
- if !comparable(t.elem, seen, nil) {
+ if !comparable(t.elem, dynamic, seen, nil) {
if reportf != nil {
reportf("%s cannot be compared", t)
}
@@ -143,7 +144,7 @@ func comparable(T Type, seen map[Type]bool, reportf func(string, ...interface{})
}
return true
case *Interface:
- return !isTypeParam(T) || t.typeSet().IsComparable(seen)
+ return dynamic && !isTypeParam(T) || t.typeSet().IsComparable(seen)
}
return false
}
diff --git a/src/go/types/signature.go b/src/go/types/signature.go
index 8f89e931fb..a340ac701e 100644
--- a/src/go/types/signature.go
+++ b/src/go/types/signature.go
@@ -112,7 +112,8 @@ func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast
// - the receiver specification acts as local declaration for its type parameters, which may be blank
_, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
if len(rparams) > 0 {
- sig.rparams = bindTParams(check.declareTypeParams(nil, rparams))
+ tparams := check.declareTypeParams(nil, rparams)
+ sig.rparams = bindTParams(tparams)
// Blank identifiers don't get declared, so naive type-checking of the
// receiver type expression would fail in Checker.collectParams below,
// when Checker.ident cannot resolve the _ to a type.
@@ -122,11 +123,10 @@ func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast
// lookup in the scope.
for i, p := range rparams {
if p.Name == "_" {
- tpar := sig.rparams.At(i)
if check.recvTParamMap == nil {
check.recvTParamMap = make(map[*ast.Ident]*TypeParam)
}
- check.recvTParamMap[p] = tpar
+ check.recvTParamMap[p] = tparams[i]
}
}
// determine receiver type to get its type parameters
@@ -142,22 +142,23 @@ func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast
}
}
// provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if sig.RecvTypeParams().Len() == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, sig.RecvTypeParams().Len())
- for i, t := range sig.RecvTypeParams().list() {
- list[i] = t
- check.mono.recordCanon(t, recvTParams[i])
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tpar := range sig.RecvTypeParams().list() {
- bound := recvTParams[i].bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- tpar.bound = check.subst(tpar.obj.pos, bound, smap, nil)
+ if len(tparams) == len(recvTParams) {
+ smap := makeRenameMap(recvTParams, tparams)
+ for i, tpar := range tparams {
+ recvTPar := recvTParams[i]
+ check.mono.recordCanon(tpar, recvTPar)
+ // recvTPar.bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the current
+ // context.
+ tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil)
}
+ } else if len(tparams) < len(recvTParams) {
+ // Reporting an error here is a stop-gap measure to avoid crashes in the
+ // compiler when a type parameter/argument cannot be inferred later. It
+ // may lead to follow-on errors (see issues #51339, #51343).
+ // TODO(gri) find a better solution
+ got := measure(len(tparams), "type parameter")
+ check.errorf(recvPar, _BadRecv, "got %s, but receiver base type declares %d", got, len(recvTParams))
}
}
}
@@ -192,66 +193,77 @@ func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast
switch len(recvList) {
case 0:
// error reported by resolver
- recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
+ recv = NewParam(token.NoPos, nil, "", Typ[Invalid]) // ignore recv below
default:
// more than one receiver
- check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
+ check.error(recvList[len(recvList)-1], _InvalidRecv, "method must have exactly one receiver")
fallthrough // continue with first receiver
case 1:
recv = recvList[0]
}
+ sig.recv = recv
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
+ // Delay validation of receiver type as it may cause premature expansion
+ // of types the receiver type is dependent on (see issues #51232, #51233).
+ check.later(func() {
+ rtyp, _ := deref(recv.typ)
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if rtyp != Typ[Invalid] {
- var err string
- switch T := rtyp.(type) {
- case *Named:
- T.resolve(check.bestContext(nil))
- // The receiver type may be an instantiated type referred to
- // by an alias (which cannot have receiver parameters for now).
- if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
- check.errorf(atPos(recv.pos), _InvalidRecv, "cannot define methods on instantiated type %s", recv.typ)
- break
- }
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- } else {
- // The underlying type of a receiver base type can be a type parameter;
- // e.g. for methods with a generic receiver T[P] with type T[P any] P.
- underIs(T, func(u Type) bool {
- switch u := u.(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if rtyp != Typ[Invalid] {
+ var err string
+ switch T := rtyp.(type) {
+ case *Named:
+ T.resolve(check.bestContext(nil))
+ // The receiver type may be an instantiated type referred to
+ // by an alias (which cannot have receiver parameters for now).
+ if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
+ check.errorf(recv, _InvalidRecv, "cannot define methods on instantiated type %s", recv.typ)
+ break
+ }
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ if compilerErrorMessages {
+ check.errorf(recv, _InvalidRecv, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ // The underlying type of a receiver base type can be a type parameter;
+ // e.g. for methods with a generic receiver T[P] with type T[P any] P.
+ // TODO(gri) Such declarations are currently disallowed.
+ // Revisit the need for underIs.
+ underIs(T, func(u Type) bool {
+ switch u := u.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ return false
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
return false
}
- case *Pointer, *Interface:
- err = "pointer or interface type"
- return false
- }
- return true
- })
+ return true
+ })
+ }
+ case *Basic:
+ err = "basic or unnamed type"
+ if compilerErrorMessages {
+ check.errorf(recv, _InvalidRecv, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ default:
+ check.errorf(recv, _InvalidRecv, "invalid receiver type %s", recv.typ)
+ }
+ if err != "" {
+ check.errorf(recv, _InvalidRecv, "invalid receiver type %s (%s)", recv.typ, err)
}
- case *Basic:
- err = "basic or unnamed type"
- default:
- check.errorf(recv, _InvalidRecv, "invalid receiver type %s", recv.typ)
- }
- if err != "" {
- check.errorf(recv, _InvalidRecv, "invalid receiver type %s (%s)", recv.typ, err)
- // ok to continue
}
- }
- sig.recv = recv
+ }).describef(recv, "validate receiver %s", recv)
}
sig.params = NewTuple(params...)
diff --git a/src/go/types/stmt.go b/src/go/types/stmt.go
index b32eb18bef..9ebfbb6d63 100644
--- a/src/go/types/stmt.go
+++ b/src/go/types/stmt.go
@@ -418,9 +418,9 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
if ch.mode == invalid || val.mode == invalid {
return
}
- u := structuralType(ch.typ)
+ u := coreType(ch.typ)
if u == nil {
- check.invalidOp(inNode(s, s.Arrow), _InvalidSend, "cannot send to %s: no structural type", &ch)
+ check.invalidOp(inNode(s, s.Arrow), _InvalidSend, "cannot send to %s: no core type", &ch)
return
}
uch, _ := u.(*Chan)
@@ -821,8 +821,6 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
case *ast.RangeStmt:
inner |= breakOk | continueOk
- check.openScope(s, "for")
- defer check.closeScope()
// check expression to iterate over
var x operand
@@ -831,12 +829,12 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
// determine key/value types
var key, val Type
if x.mode != invalid {
- // Ranging over a type parameter is permitted if it has a structural type.
+ // Ranging over a type parameter is permitted if it has a core type.
var cause string
- u := structuralType(x.typ)
+ u := coreType(x.typ)
switch t := u.(type) {
case nil:
- cause = check.sprintf("%s has no structural type", x.typ)
+ cause = check.sprintf("%s has no core type", x.typ)
case *Chan:
if s.Value != nil {
check.softErrorf(s.Value, _InvalidIterVar, "range over %s permits only one iteration variable", &x)
@@ -857,6 +855,11 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
}
}
+ // Open the for-statement block scope now, after the range clause.
+ // Iteration variables declared with := need to go in this scope (was issue #51437).
+ check.openScope(s, "range")
+ defer check.closeScope()
+
// check assignment to/declaration of iteration variables
// (irregular assignment, cannot easily map to existing assignment checks)
@@ -865,9 +868,7 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
rhs := [2]Type{key, val} // key, val may be nil
if s.Tok == token.DEFINE {
- // short variable declaration; variable scope starts after the range clause
- // (the for loop opens a new scope, so variables on the lhs never redeclare
- // previously declared variables)
+ // short variable declaration
var vars []*Var
for i, lhs := range lhs {
if lhs == nil {
@@ -904,12 +905,8 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
// declare variables
if len(vars) > 0 {
- scopePos := s.X.End()
+ scopePos := s.Body.Pos()
for _, obj := range vars {
- // spec: "The scope of a constant or variable identifier declared inside
- // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl
- // for short variable declarations) and ends at the end of the innermost
- // containing block."
check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
}
} else {
diff --git a/src/go/types/subst.go b/src/go/types/subst.go
index 0cce46ac46..4b4a0f4ad6 100644
--- a/src/go/types/subst.go
+++ b/src/go/types/subst.go
@@ -21,6 +21,17 @@ func makeSubstMap(tpars []*TypeParam, targs []Type) substMap {
return proj
}
+// makeRenameMap is like makeSubstMap, but creates a map used to rename type
+// parameters in from with the type parameters in to.
+func makeRenameMap(from, to []*TypeParam) substMap {
+ assert(len(from) == len(to))
+ proj := make(substMap, len(from))
+ for i, tpar := range from {
+ proj[tpar] = to[i]
+ }
+ return proj
+}
+
func (m substMap) empty() bool {
return len(m) == 0
}
@@ -149,7 +160,10 @@ func (subst *subster) typ(typ Type) Type {
methods, mcopied := subst.funcList(t.methods)
embeddeds, ecopied := subst.typeList(t.embeddeds)
if mcopied || ecopied {
- iface := &Interface{embeddeds: embeddeds, implicit: t.implicit, complete: t.complete}
+ iface := subst.check.newInterface()
+ iface.embeddeds = embeddeds
+ iface.implicit = t.implicit
+ iface.complete = t.complete
// If we've changed the interface type, we may need to replace its
// receiver if the receiver type is the original interface. Receivers of
// *Named type are replaced during named type expansion.
diff --git a/src/go/types/termlist.go b/src/go/types/termlist.go
index c4ab0e037e..94e49caee0 100644
--- a/src/go/types/termlist.go
+++ b/src/go/types/termlist.go
@@ -92,15 +92,6 @@ func (xl termlist) norm() termlist {
return rl
}
-// If the type set represented by xl is specified by a single (non-𝓤) term,
-// singleType returns that type. Otherwise it returns nil.
-func (xl termlist) singleType() Type {
- if nl := xl.norm(); len(nl) == 1 {
- return nl[0].typ // if nl.isAll() then typ is nil, which is ok
- }
- return nil
-}
-
// union returns the union xl ∪ yl.
func (xl termlist) union(yl termlist) termlist {
return append(xl, yl...).norm()
diff --git a/src/go/types/termlist_test.go b/src/go/types/termlist_test.go
index dddca7a682..f0d58ac1bc 100644
--- a/src/go/types/termlist_test.go
+++ b/src/go/types/termlist_test.go
@@ -106,35 +106,6 @@ func TestTermlistNorm(t *testing.T) {
}
}
-func TestTermlistSingleType(t *testing.T) {
- // helper to deal with nil types
- tstring := func(typ Type) string {
- if typ == nil {
- return "nil"
- }
- return typ.String()
- }
-
- for test, want := range map[string]string{
- "∅": "nil",
- "𝓤": "nil",
- "int": "int",
- "myInt": "myInt",
- "~int": "int",
- "~int ∪ string": "nil",
- "~int ∪ myInt": "int",
- "∅ ∪ int": "int",
- "∅ ∪ ~int": "int",
- "∅ ∪ ~int ∪ string": "nil",
- } {
- xl := maketl(test)
- got := tstring(xl.singleType())
- if got != want {
- t.Errorf("(%v).singleType() == %v; want %v", test, got, want)
- }
- }
-}
-
func TestTermlistUnion(t *testing.T) {
for _, test := range []struct {
xl, yl, want string
diff --git a/src/go/types/testdata/check/builtins.go2 b/src/go/types/testdata/check/builtins.go2
index c1accff016..861597399e 100644
--- a/src/go/types/testdata/check/builtins.go2
+++ b/src/go/types/testdata/check/builtins.go2
@@ -148,7 +148,7 @@ func _[
_ = make /* ERROR expects 2 or 3 arguments */ (S1)
_ = make(S1, 10, 20)
_ = make /* ERROR expects 2 or 3 arguments */ (S1, 10, 20, 30)
- _ = make(S2 /* ERROR cannot make S2: no structural type */ , 10)
+ _ = make(S2 /* ERROR cannot make S2: no core type */ , 10)
type M0 map[string]int
_ = make(map[string]int)
@@ -156,7 +156,7 @@ func _[
_ = make(M1)
_ = make(M1, 10)
_ = make/* ERROR expects 1 or 2 arguments */(M1, 10, 20)
- _ = make(M2 /* ERROR cannot make M2: no structural type */ )
+ _ = make(M2 /* ERROR cannot make M2: no core type */ )
type C0 chan int
_ = make(chan int)
@@ -164,7 +164,7 @@ func _[
_ = make(C1)
_ = make(C1, 10)
_ = make/* ERROR expects 1 or 2 arguments */(C1, 10, 20)
- _ = make(C2 /* ERROR cannot make C2: no structural type */ )
+ _ = make(C2 /* ERROR cannot make C2: no core type */ )
_ = make(C3)
}
diff --git a/src/go/types/testdata/check/builtins.src b/src/go/types/testdata/check/builtins.src
index 7fd6a4b032..8a4c207a05 100644
--- a/src/go/types/testdata/check/builtins.src
+++ b/src/go/types/testdata/check/builtins.src
@@ -15,9 +15,9 @@ func append1() {
var x int
var s []byte
_ = append() // ERROR not enough arguments
- _ = append("foo" /* ERROR not a slice */ )
- _ = append(nil /* ERROR not a slice */ , s)
- _ = append(x /* ERROR not a slice */ , s)
+ _ = append("foo" /* ERROR must be a slice */ )
+ _ = append(nil /* ERROR must be a slice */ , s)
+ _ = append(x /* ERROR must be a slice */ , s)
_ = append(s)
_ = append(s, nil...)
append /* ERROR not used */ (s)
@@ -77,7 +77,7 @@ func append3() {
_ = append(f2())
_ = append(f3())
_ = append(f5())
- _ = append(ff /* ERROR not a slice */ ()) // TODO(gri) better error message
+ _ = append(ff /* ERROR must be a slice */ ()) // TODO(gri) better error message
}
func cap1() {
diff --git a/src/go/types/testdata/check/funcinference.go2 b/src/go/types/testdata/check/funcinference.go2
index f04b76ca1a..45d0781cd7 100644
--- a/src/go/types/testdata/check/funcinference.go2
+++ b/src/go/types/testdata/check/funcinference.go2
@@ -8,21 +8,21 @@ import "strconv"
type any interface{}
-func f0[A any, B interface{~*C}, C interface{~*D}, D interface{~*A}](A, B, C, D) {}
+func f0[A any, B interface{*C}, C interface{*D}, D interface{*A}](A, B, C, D) {}
func _() {
f := f0[string]
f("a", nil, nil, nil)
f0("a", nil, nil, nil)
}
-func f1[A any, B interface{~*A}](A, B) {}
+func f1[A any, B interface{*A}](A, B) {}
func _() {
f := f1[int]
f(int(0), new(int))
f1(int(0), new(int))
}
-func f2[A any, B interface{~[]A}](A, B) {}
+func f2[A any, B interface{[]A}](A, B) {}
func _() {
f := f2[byte]
f(byte(0), []byte{})
@@ -38,7 +38,7 @@ func _() {
// f3(x, &x, &x)
// }
-func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C) {}
+func f4[A any, B interface{[]C}, C interface{*A}](A, B, C) {}
func _() {
f := f4[int]
var x int
@@ -46,7 +46,7 @@ func _() {
f4(x, []*int{}, &x)
}
-func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A { panic(0) }
+func f5[A interface{struct{b B; c C}}, B any, C interface{*B}](x B) A { panic(0) }
func _() {
x := f5(1.2)
var _ float64 = x.b
@@ -79,7 +79,7 @@ var _ = Double(MySlice{1})
type Setter[B any] interface {
Set(string)
- ~*B
+ *B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
diff --git a/src/go/types/testdata/check/typeinference.go2 b/src/go/types/testdata/check/typeinference.go2
index 8876ccaa4e..3d3380da9c 100644
--- a/src/go/types/testdata/check/typeinference.go2
+++ b/src/go/types/testdata/check/typeinference.go2
@@ -14,7 +14,7 @@ func _() {
}
// recursive inference
-type Tr[A any, B ~*C, C ~*D, D ~*A] int
+type Tr[A any, B *C, C *D, D *A] int
func _() {
var x Tr[string]
var y Tr[string, ***string, **string, *string]
@@ -25,11 +25,11 @@ func _() {
}
// other patterns of inference
-type To0[A any, B ~[]A] int
-type To1[A any, B ~struct{a A}] int
-type To2[A any, B ~[][]A] int
-type To3[A any, B ~[3]*A] int
-type To4[A any, B any, C ~struct{a A; b B}] int
+type To0[A any, B []A] int
+type To1[A any, B struct{a A}] int
+type To2[A any, B [][]A] int
+type To3[A any, B [3]*A] int
+type To4[A any, B any, C struct{a A; b B}] int
func _() {
var _ To0[int]
var _ To1[int]
diff --git a/src/go/types/testdata/check/typeparams.go2 b/src/go/types/testdata/check/typeparams.go2
index d5b9ed6e77..29a3b16cd6 100644
--- a/src/go/types/testdata/check/typeparams.go2
+++ b/src/go/types/testdata/check/typeparams.go2
@@ -134,11 +134,11 @@ func _[T interface{ ~string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR 3
type myByte1 []byte
type myByte2 []byte
func _[T interface{ []byte | myByte1 | myByte2 }] (x T, i, j, k int) { var _ T = x[i:j:k] }
-func _[T interface{ []byte | myByte1 | []int }] (x T, i, j, k int) { var _ T = x /* ERROR no structural type */ [i:j:k] }
+func _[T interface{ []byte | myByte1 | []int }] (x T, i, j, k int) { var _ T = x /* ERROR no core type */ [i:j:k] }
func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j] }
func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR 3-index slice of string */ ] }
-func _[T interface{ []byte | myByte1 | []int | string }] (x T, i, j, k int) { var _ T = x /* ERROR no structural type */ [i:j] }
+func _[T interface{ []byte | myByte1 | []int | string }] (x T, i, j, k int) { var _ T = x /* ERROR no core type */ [i:j] }
// len/cap built-ins
@@ -230,7 +230,7 @@ func _[
for _, _ = range s1 {}
var s2 S2
- for range s2 /* ERROR cannot range over s2.*no structural type */ {}
+ for range s2 /* ERROR cannot range over s2.*no core type */ {}
var a0 []int
for range a0 {}
@@ -243,7 +243,7 @@ func _[
for _, _ = range a1 {}
var a2 A2
- for range a2 /* ERROR cannot range over a2.*no structural type */ {}
+ for range a2 /* ERROR cannot range over a2.*no core type */ {}
var p0 *[10]int
for range p0 {}
@@ -256,7 +256,7 @@ func _[
for _, _ = range p1 {}
var p2 P2
- for range p2 /* ERROR cannot range over p2.*no structural type */ {}
+ for range p2 /* ERROR cannot range over p2.*no core type */ {}
var m0 map[string]int
for range m0 {}
@@ -269,7 +269,7 @@ func _[
for _, _ = range m1 {}
var m2 M2
- for range m2 /* ERROR cannot range over m2.*no structural type */ {}
+ for range m2 /* ERROR cannot range over m2.*no core type */ {}
}
// type inference checks
diff --git a/src/go/types/testdata/examples/inference.go2 b/src/go/types/testdata/examples/inference.go2
index ffa30ee2cb..e59a544660 100644
--- a/src/go/types/testdata/examples/inference.go2
+++ b/src/go/types/testdata/examples/inference.go2
@@ -78,7 +78,7 @@ func _() {
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice) {}
+func related2[Elem any, Slice interface{[]Elem}](e Elem, s Slice) {}
func _() {
// related2 can be called with explicit instantiation.
@@ -109,16 +109,8 @@ func _() {
related3[int, []int]()
related3[byte, List[byte]]()
- // Alternatively, the 2nd type argument can be inferred
- // from the first one through constraint type inference.
- related3[int]()
-
- // The inferred type is the structural type of the Slice
- // type parameter.
- var _ []int = related3[int]()
-
- // It is not the defined parameterized type List.
- type anotherList []float32
- var _ anotherList = related3[float32]() // valid
- var _ anotherList = related3 /* ERROR cannot use .* \(value of type List\[float32\]\) as anotherList */ [float32, List[float32]]()
+ // The 2nd type argument cannot be inferred from the first
+ // one because there's two possible choices: []Elem and
+ // List[Elem].
+ related3 /* ERROR cannot infer Slice */ [int]()
}
diff --git a/src/go/types/testdata/examples/methods.go2 b/src/go/types/testdata/examples/methods.go2
index 1d76d553dc..a46f789d60 100644
--- a/src/go/types/testdata/examples/methods.go2
+++ b/src/go/types/testdata/examples/methods.go2
@@ -35,7 +35,7 @@ func (t T1[[ /* ERROR must be an identifier */ ]int]) m2() {}
// style. In m3 below, int is the name of the local receiver type parameter
// and it shadows the predeclared identifier int which then cannot be used
// anymore as expected.
-// This is no different from locally redelaring a predeclared identifier
+// This is no different from locally re-declaring a predeclared identifier
// and usually should be avoided. There are some notable exceptions; e.g.,
// sometimes it makes sense to use the identifier "copy" which happens to
// also be the name of a predeclared built-in function.
diff --git a/src/go/types/testdata/examples/types.go2 b/src/go/types/testdata/examples/types.go2
index 33642fa42f..1e83f89883 100644
--- a/src/go/types/testdata/examples/types.go2
+++ b/src/go/types/testdata/examples/types.go2
@@ -298,7 +298,7 @@ func _[T interface {~int|~float64}]() {
// It is possible to create composite literals of type parameter
// type as long as it's possible to create a composite literal
-// of the structural type of the type parameter's constraint.
+// of the core type of the type parameter's constraint.
func _[P interface{ ~[]int }]() P {
return P{}
return P{1, 2, 3}
@@ -313,7 +313,7 @@ func _[P interface{ ~[]E }, E interface{ map[string]P } ]() P {
}
// This is a degenerate case with a singleton type set, but we can create
-// composite literals even if the structural type is a defined type.
+// composite literals even if the core type is a defined type.
type MyInts []int
func _[P MyInts]() P {
diff --git a/src/go/types/testdata/examples/typesets.go2 b/src/go/types/testdata/examples/typesets.go2
index cf01072d8c..fcddf1f1a5 100644
--- a/src/go/types/testdata/examples/typesets.go2
+++ b/src/go/types/testdata/examples/typesets.go2
@@ -35,7 +35,7 @@ func _() int {
return deref(p)
}
-func addrOfCopy[V any, P ~*V](v V) P {
+func addrOfCopy[V any, P *V](v V) P {
return &v
}
diff --git a/src/go/types/testdata/fixedbugs/issue43527.go2 b/src/go/types/testdata/fixedbugs/issue43527.go2
index e4bcee51fe..2955c261f9 100644
--- a/src/go/types/testdata/fixedbugs/issue43527.go2
+++ b/src/go/types/testdata/fixedbugs/issue43527.go2
@@ -9,7 +9,7 @@ const L = 10
type (
_ [L]struct{}
_ [A /* ERROR undeclared name A for array length */ ]struct{}
- _ [B /* ERROR not an expression */ ]struct{}
+ _ [B /* ERROR invalid array length B */ ]struct{}
_[A any] struct{}
B int
diff --git a/src/go/types/testdata/fixedbugs/issue43671.go2 b/src/go/types/testdata/fixedbugs/issue43671.go2
index 46ac51ebdd..3c78f85aa4 100644
--- a/src/go/types/testdata/fixedbugs/issue43671.go2
+++ b/src/go/types/testdata/fixedbugs/issue43671.go2
@@ -12,7 +12,7 @@ type C4 interface{ chan int | chan<- int }
type C5[T any] interface{ ~chan T | <-chan T }
func _[T any](ch T) {
- <-ch // ERROR cannot receive from ch .* no structural type
+ <-ch // ERROR cannot receive from ch .* no core type
}
func _[T C0](ch T) {
@@ -28,7 +28,7 @@ func _[T C2](ch T) {
}
func _[T C3](ch T) {
- <-ch // ERROR cannot receive from ch .* no structural type
+ <-ch // ERROR cannot receive from ch .* no core type
}
func _[T C4](ch T) {
diff --git a/src/go/types/testdata/fixedbugs/issue45548.go2 b/src/go/types/testdata/fixedbugs/issue45548.go2
index b8ba0ad4a7..01c9672745 100644
--- a/src/go/types/testdata/fixedbugs/issue45548.go2
+++ b/src/go/types/testdata/fixedbugs/issue45548.go2
@@ -4,7 +4,7 @@
package p
-func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
+func f[F interface{*Q}, G interface{*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
diff --git a/src/go/types/testdata/fixedbugs/issue47115.go2 b/src/go/types/testdata/fixedbugs/issue47115.go2
index f71e06c9b2..a0bfe38de8 100644
--- a/src/go/types/testdata/fixedbugs/issue47115.go2
+++ b/src/go/types/testdata/fixedbugs/issue47115.go2
@@ -12,7 +12,7 @@ type C4 interface{ chan int | chan<- int }
type C5[T any] interface{ ~chan T | chan<- T }
func _[T any](ch T) {
- ch <- /* ERROR cannot send to ch .* no structural type */ 0
+ ch <- /* ERROR cannot send to ch .* no core type */ 0
}
func _[T C0](ch T) {
@@ -28,7 +28,7 @@ func _[T C2](ch T) {
}
func _[T C3](ch T) {
- ch <- /* ERROR cannot send to ch .* no structural type */ 0
+ ch <- /* ERROR cannot send to ch .* no core type */ 0
}
func _[T C4](ch T) {
diff --git a/src/go/types/testdata/fixedbugs/issue48619.go2 b/src/go/types/testdata/fixedbugs/issue48619.go2
index d33040d78f..72eea1ef59 100644
--- a/src/go/types/testdata/fixedbugs/issue48619.go2
+++ b/src/go/types/testdata/fixedbugs/issue48619.go2
@@ -2,24 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This issue is still open:
-// - the error messages could be better or are incorrect
-// - unification fails due to stack overflow that is caught
-
package p
func f[P any](a, _ P) {
var x int
// TODO(gri) these error messages, while correct, could be better
- f(a, x /* ERROR type int of x does not match P */)
+ f(a, x /* ERROR type int of x does not match inferred type P for P */)
f(x, a /* ERROR type P of a does not match inferred type int for P */)
}
func g[P any](a, b P) {
g(a, b)
- // TODO(gri) these error messages are incorrect because the code is valid
- g(&a, & /* ERROR type \*P of &b does not match inferred type \*P for P */ b)
- g([]P{}, [ /* ERROR type \[\]P of \(\[\]P literal\) does not match inferred type \[\]P for P */ ]P{})
+ g(&a, &b)
+ g([]P{}, []P{})
// work-around: provide type argument explicitly
g[*P](&a, &b)
diff --git a/src/go/types/testdata/fixedbugs/issue48656.go2 b/src/go/types/testdata/fixedbugs/issue48656.go2
index 493f220e98..0f60f47120 100644
--- a/src/go/types/testdata/fixedbugs/issue48656.go2
+++ b/src/go/types/testdata/fixedbugs/issue48656.go2
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This issue is still open:
-// - the error messages are unclear
-// - unification fails due to stack overflow that is caught
-
package p
func f[P *Q, Q any](P, Q) {
- // TODO(gri) these error messages are unclear
- _ = f /* ERROR P does not match \*Q */ [P]
- _ = f /* ERROR cannot infer P */ [*P]
+ _ = f[P]
+}
+
+func f2[P /* ERROR instantiation cycle */ *Q, Q any](P, Q) {
+ _ = f2[*P]
}
diff --git a/src/go/types/testdata/fixedbugs/issue49482.go2 b/src/go/types/testdata/fixedbugs/issue49482.go2
new file mode 100644
index 0000000000..4c6579ed68
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue49482.go2
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+// The following is OK, per the special handling for type literals discussed in issue #49482.
+type _[P *struct{}] struct{}
+type _[P *int,] int
+type _[P (*int),] int
+
+const P = 2 // declare P to avoid noisy 'undeclared name' errors below.
+
+// The following parse as invalid array types.
+type _[P *int /* ERROR "int \(type\) is not an expression" */ ] int
+type _[P /* ERROR non-function P */ (*int)] int
+
+// The following should be parsed as a generic type, but is instead parsed as an array type.
+type _[P *struct /* ERROR "expected expression" */ {}| int /* ERROR "not an expression" */ ] struct{}
+
+// The following fails to parse, due to the '~'
+type _[P *struct /* ERROR "expected expression" */ {}|~ /* ERROR "expected operand" */ int] struct{}
+
+// This is fragile: 'var' synchronizes the parser, and we absorb the rest of the errors.
+var /* ERROR "expected ']'" */ _ /* ERROR "value or type" */
diff --git a/src/go/types/testdata/fixedbugs/issue49735.go2 b/src/go/types/testdata/fixedbugs/issue49735.go2
new file mode 100644
index 0000000000..50870226e4
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue49735.go2
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P1 any, P2 ~byte](s1 P1, s2 P2) {
+ _ = append(nil /* ERROR first argument to append must be a slice; have untyped nil */ , 0)
+ _ = append(s1 /* ERROR s1 .* has no core type */ , 0)
+ _ = append(s2 /* ERROR s2 .* has core type byte */ , 0)
+}
diff --git a/src/go/types/testdata/fixedbugs/issue50417.go2 b/src/go/types/testdata/fixedbugs/issue50417.go2
index b6454ab003..50487fa2ff 100644
--- a/src/go/types/testdata/fixedbugs/issue50417.go2
+++ b/src/go/types/testdata/fixedbugs/issue50417.go2
@@ -51,7 +51,7 @@ func f2[P interface{ Sfm; m() }](p P) {
var _ = f2[Sfm]
-// special case: structural type is a named pointer type
+// special case: core type is a named pointer type
type PSfm *Sfm
diff --git a/src/go/types/testdata/fixedbugs/issue50755.go2 b/src/go/types/testdata/fixedbugs/issue50755.go2
new file mode 100644
index 0000000000..afc7b2414c
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue50755.go2
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The core type of M2 unifies with the type of m1
+// during function argument type inference.
+// M2's constraint is unnamed.
+func f1[K1 comparable, E1 any](m1 map[K1]E1) {}
+
+func f2[M2 map[string]int](m2 M2) {
+ f1(m2)
+}
+
+// The core type of M3 unifies with the type of m1
+// during function argument type inference.
+// M3's constraint is named.
+type Map3 map[string]int
+
+func f3[M3 Map3](m3 M3) {
+ f1(m3)
+}
+
+// The core type of M5 unifies with the core type of M4
+// during constraint type inference.
+func f4[M4 map[K4]int, K4 comparable](m4 M4) {}
+
+func f5[M5 map[K5]int, K5 comparable](m5 M5) {
+ f4(m5)
+}
+
+// test case from issue
+
+func Copy[MC ~map[KC]VC, KC comparable, VC any](dst, src MC) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
+
+func Merge[MM ~map[KM]VM, KM comparable, VM any](ms ...MM) MM {
+ result := MM{}
+ for _, m := range ms {
+ Copy(result, m)
+ }
+ return result
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51145.go b/src/go/types/testdata/fixedbugs/issue51145.go
new file mode 100644
index 0000000000..b84391df19
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51145.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "fmt"
+
+type (
+ _ [fmt /* ERROR invalid array length fmt */ ]int
+ _ [float64 /* ERROR invalid array length float64 */ ]int
+ _ [f /* ERROR invalid array length f */ ]int
+ _ [nil /* ERROR invalid array length nil */ ]int
+)
+
+func f()
+
+var _ fmt.Stringer // use fmt
diff --git a/src/go/types/testdata/fixedbugs/issue51158.go2 b/src/go/types/testdata/fixedbugs/issue51158.go2
new file mode 100644
index 0000000000..3edc505382
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51158.go2
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Type checking the following code should not cause an infinite recursion.
+func f[M map[K]int, K comparable](m M) {
+ f(m)
+}
+
+// Equivalent code using mutual recursion.
+func f1[M map[K]int, K comparable](m M) {
+ f2(m)
+}
+func f2[M map[K]int, K comparable](m M) {
+ f1(m)
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51229.go2 b/src/go/types/testdata/fixedbugs/issue51229.go2
new file mode 100644
index 0000000000..808b6471f6
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51229.go2
@@ -0,0 +1,164 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Constraint type inference should be independent of the
+// ordering of the type parameter declarations. Try all
+// permutations in the test case below.
+// Permutations produced by https://go.dev/play/p/PHcZNGJTEBZ.
+
+func f00[S1 ~[]E1, S2 ~[]E2, E1 ~byte, E2 ~byte](S1, S2) {}
+func f01[S2 ~[]E2, S1 ~[]E1, E1 ~byte, E2 ~byte](S1, S2) {}
+func f02[E1 ~byte, S1 ~[]E1, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f03[S1 ~[]E1, E1 ~byte, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f04[S2 ~[]E2, E1 ~byte, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f05[E1 ~byte, S2 ~[]E2, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f06[E2 ~byte, S2 ~[]E2, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f07[S2 ~[]E2, E2 ~byte, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f08[S1 ~[]E1, E2 ~byte, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f09[E2 ~byte, S1 ~[]E1, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f10[S2 ~[]E2, S1 ~[]E1, E2 ~byte, E1 ~byte](S1, S2) {}
+func f11[S1 ~[]E1, S2 ~[]E2, E2 ~byte, E1 ~byte](S1, S2) {}
+func f12[S1 ~[]E1, E1 ~byte, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f13[E1 ~byte, S1 ~[]E1, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f14[E2 ~byte, S1 ~[]E1, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f15[S1 ~[]E1, E2 ~byte, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f16[E1 ~byte, E2 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f17[E2 ~byte, E1 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f18[E2 ~byte, E1 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f19[E1 ~byte, E2 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f20[S2 ~[]E2, E2 ~byte, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f21[E2 ~byte, S2 ~[]E2, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f22[E1 ~byte, S2 ~[]E2, E2 ~byte, S1 ~[]E1](S1, S2) {}
+func f23[S2 ~[]E2, E1 ~byte, E2 ~byte, S1 ~[]E1](S1, S2) {}
+
+type myByte byte
+
+func _(a []byte, b []myByte) {
+ f00(a, b)
+ f01(a, b)
+ f02(a, b)
+ f03(a, b)
+ f04(a, b)
+ f05(a, b)
+ f06(a, b)
+ f07(a, b)
+ f08(a, b)
+ f09(a, b)
+ f10(a, b)
+ f11(a, b)
+ f12(a, b)
+ f13(a, b)
+ f14(a, b)
+ f15(a, b)
+ f16(a, b)
+ f17(a, b)
+ f18(a, b)
+ f19(a, b)
+ f20(a, b)
+ f21(a, b)
+ f22(a, b)
+ f23(a, b)
+}
+
+// Constraint type inference may have to iterate.
+// Again, the order of the type parameters shouldn't matter.
+
+func g0[S ~[]E, M ~map[string]S, E any](m M) {}
+func g1[M ~map[string]S, S ~[]E, E any](m M) {}
+func g2[E any, S ~[]E, M ~map[string]S](m M) {}
+func g3[S ~[]E, E any, M ~map[string]S](m M) {}
+func g4[M ~map[string]S, E any, S ~[]E](m M) {}
+func g5[E any, M ~map[string]S, S ~[]E](m M) {}
+
+func _(m map[string][]byte) {
+ g0(m)
+ g1(m)
+ g2(m)
+ g3(m)
+ g4(m)
+ g5(m)
+}
+
+// Worst-case scenario.
+// There are 10 unknown type parameters. In each iteration of
+// constraint type inference we infer one more, from right to left.
+// Each iteration looks repeatedly at all 11 type parameters,
+// requiring a total of 10*11 = 110 iterations with the current
+// implementation. Pathological case.
+
+func h[K any, J ~*K, I ~*J, H ~*I, G ~*H, F ~*G, E ~*F, D ~*E, C ~*D, B ~*C, A ~*B](x A) {}
+
+func _(x **********int) {
+ h(x)
+}
+
+// Examples with channel constraints and tilde.
+
+func ch1[P chan<- int]() (_ P) { return } // core(P) == chan<- int (single type, no tilde)
+func ch2[P ~chan int]() { return } // core(P) == ~chan<- int (tilde)
+func ch3[P chan E, E any](E) { return } // core(P) == chan<- E (single type, no tilde)
+func ch4[P chan E | ~chan<- E, E any](E) { return } // core(P) == ~chan<- E (tilde)
+func ch5[P chan int | chan<- int]() { return } // core(P) == chan<- int (not a single type)
+
+func _() {
+ // P can be inferred as there's a single specific type and no tilde.
+ var _ chan int = ch1 /* ERROR cannot use ch1.*value of type chan<- int */ ()
+ var _ chan<- int = ch1()
+
+ // P cannot be inferred as there's a tilde.
+ ch2 /* ERROR cannot infer P */ ()
+ type myChan chan int
+ ch2[myChan]()
+
+ // P can be inferred as there's a single specific type and no tilde.
+ var e int
+ ch3(e)
+
+ // P cannot be inferred as there's more than one specific type and a tilde.
+ ch4 /* ERROR cannot infer P */ (e)
+ _ = ch4[chan int]
+
+ // P cannot be inferred as there's more than one specific type.
+ ch5 /* ERROR cannot infer P */ ()
+ ch5[chan<- int]()
+}
+
+// test case from issue
+
+func equal[M1 ~map[K1]V1, M2 ~map[K2]V2, K1, K2 ~uint32, V1, V2 ~string](m1 M1, m2 M2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || V2(v1) != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+func equalFixed[K1, K2 ~uint32, V1, V2 ~string](m1 map[K1]V1, m2 map[K2]V2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || v1 != V1(v2) {
+ return false
+ }
+ }
+ return true
+}
+
+type (
+ someNumericID uint32
+ someStringID string
+)
+
+func _() {
+ foo := map[uint32]string{10: "bar"}
+ bar := map[someNumericID]someStringID{10: "bar"}
+ equal(foo, bar)
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51232.go2 b/src/go/types/testdata/fixedbugs/issue51232.go2
new file mode 100644
index 0000000000..6e575a376d
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51232.go2
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn[RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn func() Fn[RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn[RCT] {
+ return c.makeFn()
+}
+
+func NewConcrete[RCT RC[RG], RG any](Rc RCT) F[RCT] {
+ return &concreteF[RCT]{
+ makeFn: nil,
+ }
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51233.go2 b/src/go/types/testdata/fixedbugs/issue51233.go2
new file mode 100644
index 0000000000..5c8393d039
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51233.go2
@@ -0,0 +1,25 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type FFn[RCT RC[RG], RG any] func() Fn[RCT]
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn[RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn FFn[RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn[RCT] {
+ return c.makeFn()
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51257.go2 b/src/go/types/testdata/fixedbugs/issue51257.go2
new file mode 100644
index 0000000000..8a3eb3278d
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51257.go2
@@ -0,0 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+
+type S1 struct{ x int }
+type S2 struct{ x any }
+type S3 struct{ x [10]interface{ m() } }
+
+func _[P1 comparable, P2 S2]() {
+ _ = f[S1]
+ _ = f[S2 /* ERROR S2 does not implement comparable */ ]
+ _ = f[S3 /* ERROR S3 does not implement comparable */ ]
+
+ type L1 struct { x P1 }
+ type L2 struct { x P2 }
+ _ = f[L1]
+ _ = f[L2 /* ERROR L2 does not implement comparable */ ]
+}
+
+
+// example from issue
+
+type Set[T comparable] map[T]struct{}
+
+func NewSetFromSlice[T comparable](items []T) *Set[T] {
+ s := Set[T]{}
+
+ for _, item := range items {
+ s[item] = struct{}{}
+ }
+
+ return &s
+}
+
+type T struct{ x any }
+
+func main() {
+ NewSetFromSlice /* ERROR T does not implement comparable */ ([]T{
+ {"foo"},
+ {5},
+ })
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51335.go2 b/src/go/types/testdata/fixedbugs/issue51335.go2
new file mode 100644
index 0000000000..0b5a1af082
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51335.go2
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S1 struct{}
+type S2 struct{}
+
+func _[P *S1|*S2]() {
+ _= []P{{ /* ERROR invalid composite literal element type P: no core type */ }}
+}
+
+func _[P *S1|S1]() {
+ _= []P{{ /* ERROR invalid composite literal element type P: no core type */ }}
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51339.go2 b/src/go/types/testdata/fixedbugs/issue51339.go2
new file mode 100644
index 0000000000..6803c44d76
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51339.go2
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+type T[P any, B *P] struct{}
+
+func (T /* ERROR cannot use generic type */ ) m0() {}
+func (/* ERROR got 1 type parameter, but receiver base type declares 2 */ T[_]) m1() {}
+func (T[_, _]) m2() {}
+// TODO(gri) this error is unfortunate (issue #51343)
+func (T /* ERROR got 3 arguments but 2 type parameters */ [_, _, _]) m3() {}
diff --git a/src/go/types/testdata/fixedbugs/issue51360.go b/src/go/types/testdata/fixedbugs/issue51360.go
new file mode 100644
index 0000000000..fe3de04dbf
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51360.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ len.Println /* ERROR cannot select on len */
+ len.Println /* ERROR cannot select on len */ ()
+ _ = len.Println /* ERROR cannot select on len */
+ _ = len /* ERROR cannot index len */ [0]
+ _ = *len /* ERROR cannot indirect len */
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51376.go2 b/src/go/types/testdata/fixedbugs/issue51376.go2
new file mode 100644
index 0000000000..d51607b7ab
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51376.go2
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Map map[string]int
+
+func f[M ~map[K]V, K comparable, V any](M) {}
+func g[M map[K]V, K comparable, V any](M) {}
+
+func _[M1 ~map[K]V, M2 map[K]V, K comparable, V any]() {
+ var m1 M1
+ f(m1)
+ g /* ERROR M1 does not implement map\[K\]V */ (m1) // M1 has tilde
+
+ var m2 M2
+ f(m2)
+ g(m2) // M1 does not have tilde
+
+ var m3 Map
+ f(m3)
+ g /* ERROR Map does not implement map\[string\]int */ (m3) // M in g does not have tilde
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51386.go2 b/src/go/types/testdata/fixedbugs/issue51386.go2
new file mode 100644
index 0000000000..ef6223927a
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51386.go2
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myString string
+
+func _[P ~string | ~[]byte | ~[]rune]() {
+ _ = P("")
+ const s myString = ""
+ _ = P(s)
+}
+
+func _[P myString]() {
+ _ = P("")
+}
diff --git a/src/go/types/testdata/fixedbugs/issue51437.go b/src/go/types/testdata/fixedbugs/issue51437.go
new file mode 100644
index 0000000000..376261516e
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue51437.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T struct{}
+
+func (T) m() []int { return nil }
+
+func f(x T) {
+ for _, x := range func() []int {
+ return x.m() // x declared in parameter list of f
+ }() {
+ _ = x // x declared by range clause
+ }
+}
diff --git a/src/go/types/type.go b/src/go/types/type.go
index 3acb19c412..130637530b 100644
--- a/src/go/types/type.go
+++ b/src/go/types/type.go
@@ -7,9 +7,7 @@ package types
// A Type represents a type of Go.
// All types implement the Type interface.
type Type interface {
- // Underlying returns the underlying type of a type
- // w/o following forwarding chains. Only used by
- // client packages.
+ // Underlying returns the underlying type of a type.
Underlying() Type
// String returns a string representation of a type.
@@ -27,13 +25,13 @@ func under(t Type) Type {
return t.Underlying()
}
-// If t is not a type parameter, structuralType returns the underlying type.
-// If t is a type parameter, structuralType returns the single underlying
+// If t is not a type parameter, coreType returns the underlying type.
+// If t is a type parameter, coreType returns the single underlying
// type of all types in its type set if it exists, or nil otherwise. If the
// type set contains only unrestricted and restricted channel types (with
// identical element types), the single underlying type is the restricted
// channel type if the restrictions are always the same, or nil otherwise.
-func structuralType(t Type) Type {
+func coreType(t Type) Type {
tpar, _ := t.(*TypeParam)
if tpar == nil {
return under(t)
@@ -59,10 +57,10 @@ func structuralType(t Type) Type {
return nil
}
-// structuralString is like structuralType but also considers []byte
+// coreString is like coreType but also considers []byte
// and strings as identical. In this case, if successful and we saw
// a string, the result is of type (possibly untyped) string.
-func structuralString(t Type) Type {
+func coreString(t Type) Type {
tpar, _ := t.(*TypeParam)
if tpar == nil {
return under(t) // string or untyped string
diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go
index 71e6861b87..778c687d43 100644
--- a/src/go/types/typeparam.go
+++ b/src/go/types/typeparam.go
@@ -35,6 +35,7 @@ func NewTypeParam(obj *TypeName, constraint Type) *TypeParam {
return (*Checker)(nil).newTypeParam(obj, constraint)
}
+// check may be nil
func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
// Always increment lastID, even if it is not used.
id := nextID()
@@ -49,9 +50,7 @@ func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
// iface may mutate typ.bound, so we must ensure that iface() is called
// at least once before the resulting TypeParam escapes.
if check != nil {
- check.later(func() {
- typ.iface()
- })
+ check.needsCleanup(typ)
} else if constraint != nil {
typ.iface()
}
@@ -95,9 +94,12 @@ func (t *TypeParam) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
+func (t *TypeParam) cleanup() {
+ t.iface()
+ t.check = nil
+}
+
// iface returns the constraint interface of t.
-// TODO(gri) If we make tparamIsIface the default, this should be renamed to under
-// (similar to Named.under).
func (t *TypeParam) iface() *Interface {
bound := t.bound
@@ -138,16 +140,6 @@ func (t *TypeParam) iface() *Interface {
return ityp
}
-// singleType returns the single type of the type parameter constraint; or nil.
-func (t *TypeParam) singleType() Type {
- return t.iface().typeSet().singleType()
-}
-
-// hasTerms reports whether the type parameter constraint has specific type terms.
-func (t *TypeParam) hasTerms() bool {
- return t.iface().typeSet().hasTerms()
-}
-
// is calls f with the specific type terms of t's constraint and reports whether
// all calls to f returned true. If there are no specific terms, is
// returns the result of f(nil).
diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go
index e1f73015b9..4c3f018cfe 100644
--- a/src/go/types/typeset.go
+++ b/src/go/types/typeset.go
@@ -37,7 +37,7 @@ func (s *_TypeSet) IsComparable(seen map[Type]bool) bool {
return s.comparable
}
return s.is(func(t *term) bool {
- return t != nil && comparable(t.typ, seen, nil)
+ return t != nil && comparable(t.typ, false, seen, nil)
})
}
@@ -101,9 +101,6 @@ func (s *_TypeSet) String() string {
// hasTerms reports whether the type set has specific type terms.
func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll() }
-// singleType returns the single type in s if there is exactly one; otherwise the result is nil.
-func (s *_TypeSet) singleType() Type { return s.terms.singleType() }
-
// subsetOf reports whether s1 ⊆ s2.
func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) }
diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go
index 00c250b5b6..724c40963f 100644
--- a/src/go/types/typexpr.go
+++ b/src/go/types/typexpr.go
@@ -323,7 +323,7 @@ func (check *Checker) typInternal(e0 ast.Expr, def *Named) (T Type) {
return typ
case *ast.InterfaceType:
- typ := new(Interface)
+ typ := check.newInterface()
def.setUnderlying(typ)
if def != nil {
typ.obj = def.obj
@@ -487,12 +487,20 @@ func (check *Checker) instantiatedType(ix *typeparams.IndexExpr, def *Named) (re
// and returns the constant length >= 0, or a value < 0
// to indicate an error (and thus an unknown length).
func (check *Checker) arrayLength(e ast.Expr) int64 {
- // If e is an undeclared identifier, the array declaration might be an
- // attempt at a parameterized type declaration with missing constraint.
- // Provide a better error message than just "undeclared name: X".
- if name, _ := e.(*ast.Ident); name != nil && check.lookup(name.Name) == nil {
- check.errorf(name, _InvalidArrayLen, "undeclared name %s for array length", name.Name)
- return -1
+ // If e is an identifier, the array declaration might be an
+ // attempt at a parameterized type declaration with missing
+ // constraint. Provide an error message that mentions array
+ // length.
+ if name, _ := e.(*ast.Ident); name != nil {
+ obj := check.lookup(name.Name)
+ if obj == nil {
+ check.errorf(name, _InvalidArrayLen, "undeclared name %s for array length", name.Name)
+ return -1
+ }
+ if _, ok := obj.(*Const); !ok {
+ check.errorf(name, _InvalidArrayLen, "invalid array length %s", name.Name)
+ return -1
+ }
}
var x operand
diff --git a/src/go/types/unify.go b/src/go/types/unify.go
index be2037ca81..7b9aeeee0a 100644
--- a/src/go/types/unify.go
+++ b/src/go/types/unify.go
@@ -9,6 +9,7 @@ package types
import (
"bytes"
"fmt"
+ "strings"
)
// The unifier maintains two separate sets of type parameters x and y
@@ -26,7 +27,7 @@ import (
// parameter P ("x" side), but the argument type P must be left alone so
// that unification resolves the type parameter P to P.
//
-// For bidirection unification, both sets are provided. This enables
+// For bidirectional unification, both sets are provided. This enables
// unification to go from argument to parameter type and vice versa.
// For constraint type inference, we use bidirectional unification
// where both the x and y type parameters are identical. This is done
@@ -41,6 +42,19 @@ const (
// Whether to panic when unificationDepthLimit is reached. Turn on when
// investigating infinite recursion.
panicAtUnificationDepthLimit = false
+
+ // If enableCoreTypeUnification is set, unification will consider
+ // the core types, if any, of non-local (unbound) type parameters.
+ enableCoreTypeUnification = true
+
+ // If traceInference is set, unification will print a trace of its operation.
+ // Interpretation of trace:
+ // x ≡ y attempt to unify types x and y
+ // p ➞ y type parameter p is set to type y (p is inferred to be y)
+ // p ⇄ q type parameters p and q match (p is inferred to be q and vice versa)
+ // x ≢ y types x and y cannot be unified
+ // [p, q, ...] ➞ [x, y, ...] mapping from type parameters to types
+ traceInference = false
)
// A unifier maintains the current type parameters for x and y
@@ -58,6 +72,7 @@ type unifier struct {
// exactly. If exact is not set, a named type's underlying type
// is considered if unification would fail otherwise, and the
// direction of channels is ignored.
+// TODO(gri) exact is not set anymore by a caller. Consider removing it.
func newUnifier(exact bool) *unifier {
u := &unifier{exact: exact}
u.x.unifier = u
@@ -70,6 +85,10 @@ func (u *unifier) unify(x, y Type) bool {
return u.nify(x, y, nil)
}
+func (u *unifier) tracef(format string, args ...interface{}) {
+ fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, nil, true, format, args...))
+}
+
// A tparamsList describes a list of type parameters and the types inferred for them.
type tparamsList struct {
unifier *unifier
@@ -121,6 +140,9 @@ func (d *tparamsList) init(tparams []*TypeParam) {
// If both type parameters already have a type associated with them and they are
// not joined, join fails and returns false.
func (u *unifier) join(i, j int) bool {
+ if traceInference {
+ u.tracef("%s ⇄ %s", u.x.tparams[i], u.y.tparams[j])
+ }
ti := u.x.indices[i]
tj := u.y.indices[j]
switch {
@@ -210,6 +232,9 @@ func (d *tparamsList) at(i int) Type {
func (d *tparamsList) set(i int, typ Type) {
assert(typ != nil)
u := d.unifier
+ if traceInference {
+ u.tracef("%s ➞ %s", d.tparams[i], typ)
+ }
switch ti := d.indices[i]; {
case ti < 0:
u.types[-ti-1] = typ
@@ -222,6 +247,17 @@ func (d *tparamsList) set(i int, typ Type) {
}
}
+// unknowns returns the number of type parameters for which no type has been set yet.
+func (d *tparamsList) unknowns() int {
+ n := 0
+ for _, ti := range d.indices {
+ if ti <= 0 {
+ n++
+ }
+ }
+ return n
+}
+
// types returns the list of inferred types (via unification) for the type parameters
// described by d, and an index. If all types were inferred, the returned index is < 0.
// Otherwise, it is the index of the first type parameter which couldn't be inferred;
@@ -247,9 +283,16 @@ func (u *unifier) nifyEq(x, y Type, p *ifacePair) bool {
// adapted version of Checker.identical. For changes to that
// code the corresponding changes should be made here.
// Must not be called directly from outside the unifier.
-func (u *unifier) nify(x, y Type, p *ifacePair) bool {
+func (u *unifier) nify(x, y Type, p *ifacePair) (result bool) {
+ if traceInference {
+ u.tracef("%s ≡ %s", x, y)
+ }
+
// Stop gap for cases where unification fails.
if u.depth >= unificationDepthLimit {
+ if traceInference {
+ u.tracef("depth %d >= %d", u.depth, unificationDepthLimit)
+ }
if panicAtUnificationDepthLimit {
panic("unification reached recursion depth limit")
}
@@ -258,6 +301,9 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
u.depth++
defer func() {
u.depth--
+ if traceInference && !result {
+ u.tracef("%s ≢ %s", x, y)
+ }
}()
if !u.exact {
@@ -267,8 +313,14 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
// (We use !hasName to exclude any type with a name, including
// basic types and type parameters; the rest are unamed types.)
if nx, _ := x.(*Named); nx != nil && !hasName(y) {
+ if traceInference {
+ u.tracef("under %s ≡ %s", nx, y)
+ }
return u.nify(nx.under(), y, p)
} else if ny, _ := y.(*Named); ny != nil && !hasName(x) {
+ if traceInference {
+ u.tracef("%s ≡ under %s", x, ny)
+ }
return u.nify(x, ny.under(), p)
}
}
@@ -302,6 +354,39 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
return true
}
+ // If we get here and x or y is a type parameter, they are type parameters
+ // from outside our declaration list. Try to unify their core types, if any
+ // (see issue #50755 for a test case).
+ if enableCoreTypeUnification && !u.exact {
+ if isTypeParam(x) && !hasName(y) {
+ // When considering the type parameter for unification
+ // we look at the adjusted core term (adjusted core type
+ // with tilde information).
+ // If the adjusted core type is a named type N; the
+ // corresponding core type is under(N). Since !u.exact
+ // and y doesn't have a name, unification will end up
+ // comparing under(N) to y, so we can just use the core
+ // type instead. And we can ignore the tilde because we
+ // already look at the underlying types on both sides
+ // and we have known types on both sides.
+ // Optimization.
+ if cx := coreType(x); cx != nil {
+ if traceInference {
+ u.tracef("core %s ≡ %s", x, y)
+ }
+ return u.nify(cx, y, p)
+ }
+ } else if isTypeParam(y) && !hasName(x) {
+ // see comment above
+ if cy := coreType(y); cy != nil {
+ if traceInference {
+ u.tracef("%s ≡ core %s", x, y)
+ }
+ return u.nify(x, cy, p)
+ }
+ }
+ }
+
// For type unification, do not shortcut (x == y) for identical
// types. Instead keep comparing them element-wise to unify the
// matching (and equal type parameter types). A simple test case
@@ -490,7 +575,7 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
// avoid a crash in case of nil type
default:
- panic(fmt.Sprintf("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
+ panic(sprintf(nil, nil, true, "u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
}
return false
diff --git a/src/go/types/validtype.go b/src/go/types/validtype.go
index c4ec2f2e0a..7d7029bce2 100644
--- a/src/go/types/validtype.go
+++ b/src/go/types/validtype.go
@@ -79,7 +79,7 @@ func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeIn
// would have reported a type cycle and couldn't have been
// imported in the first place.
assert(t.obj.pkg == check.pkg)
- t.underlying = Typ[Invalid] // t is in the current package (no race possibilty)
+ t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
// Find the starting point of the cycle and report it.
for i, tn := range path {
if tn == t.obj {
diff --git a/src/internal/cfg/cfg.go b/src/internal/cfg/cfg.go
index 4cb3fbd4f3..78664d7a96 100644
--- a/src/internal/cfg/cfg.go
+++ b/src/internal/cfg/cfg.go
@@ -62,6 +62,7 @@ const KnownEnv = `
GOTOOLDIR
GOVCS
GOWASM
+ GOWORK
GO_EXTLINK_ENABLED
PKG_CONFIG
`
diff --git a/src/internal/goversion/goversion.go b/src/internal/goversion/goversion.go
index 8fcea100dc..da33e68caf 100644
--- a/src/internal/goversion/goversion.go
+++ b/src/internal/goversion/goversion.go
@@ -9,4 +9,4 @@ package goversion
//
// It should be updated at the start of each development cycle to be
// the version of the next Go 1.x release. See golang.org/issue/40705.
-const Version = 18
+const Version = 19
diff --git a/src/internal/pkgbits/codes.go b/src/internal/pkgbits/codes.go
new file mode 100644
index 0000000000..8438ab3216
--- /dev/null
+++ b/src/internal/pkgbits/codes.go
@@ -0,0 +1,60 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+type Code interface {
+ Marker() SyncMarker
+ Value() int
+}
+
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int { return int(c) }
+
+const (
+ ValBool CodeVal = iota
+ ValString
+ ValInt64
+ ValBigInt
+ ValBigRat
+ ValBigFloat
+)
+
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int { return int(c) }
+
+const (
+ TypeBasic CodeType = iota
+ TypeNamed
+ TypePointer
+ TypeSlice
+ TypeArray
+ TypeChan
+ TypeMap
+ TypeSignature
+ TypeStruct
+ TypeInterface
+ TypeUnion
+ TypeTypeParam
+)
+
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int { return int(c) }
+
+const (
+ ObjAlias CodeObj = iota
+ ObjConst
+ ObjType
+ ObjFunc
+ ObjVar
+ ObjStub
+)
diff --git a/src/internal/pkgbits/decoder.go b/src/internal/pkgbits/decoder.go
new file mode 100644
index 0000000000..9c8ad446ca
--- /dev/null
+++ b/src/internal/pkgbits/decoder.go
@@ -0,0 +1,336 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+)
+
+type PkgDecoder struct {
+ pkgPath string
+
+ elemEndsEnds [numRelocs]uint32
+ elemEnds []uint32
+ elemData string
+}
+
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+ pr := PkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ var version uint32
+ assert(binary.Read(r, binary.LittleEndian, &version) == nil)
+ assert(version == 0)
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, os.SEEK_CUR)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData) == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+func (pr *PkgDecoder) TotalElems() int {
+ return len(pr.elemEnds)
+}
+
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx int) int {
+ absIdx := idx
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx int) string {
+ absIdx := pr.AbsIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+func (pr *PkgDecoder) StringIdx(idx int) string {
+ return pr.DataIdx(RelocString, idx)
+}
+
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx int, marker SyncMarker) Decoder {
+ r := pr.NewDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx int) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+ r.Data = *strings.NewReader(pr.DataIdx(k, idx))
+
+ r.Sync(SyncRelocs)
+ r.Relocs = make([]RelocEnt, r.Len())
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), r.Len()}
+ }
+
+ return r
+}
+
+type Decoder struct {
+ common *PkgDecoder
+
+ Relocs []RelocEnt
+ Data strings.Reader
+
+ k RelocKind
+ Idx int
+}
+
+func (r *Decoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected decoding error: %w", err)
+ }
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+ x, err := binary.ReadUvarint(&r.Data)
+ r.checkErr(err)
+ return x
+}
+
+func (r *Decoder) rawVarint() int64 {
+ ux := r.rawUvarint()
+
+ // Zig-zag decode.
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) int {
+ e := r.Relocs[idx]
+ assert(e.Kind == k)
+ return e.Idx
+}
+
+func (r *Decoder) Sync(mWant SyncMarker) {
+ if !EnableSync {
+ return
+ }
+
+ pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
+ mHave := SyncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+func (r *Decoder) Bool() bool {
+ r.Sync(SyncBool)
+ x, err := r.Data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+func (r *Decoder) Int64() int64 {
+ r.Sync(SyncInt64)
+ return r.rawVarint()
+}
+
+func (r *Decoder) Uint64() uint64 {
+ r.Sync(SyncUint64)
+ return r.rawUvarint()
+}
+
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+ r.Sync(mark)
+ return r.Len()
+}
+
+func (r *Decoder) Reloc(k RelocKind) int {
+ r.Sync(SyncUseReloc)
+ return r.rawReloc(k, r.Len())
+}
+
+func (r *Decoder) String() string {
+ r.Sync(SyncString)
+ return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+func (r *Decoder) Strings() []string {
+ res := make([]string, r.Len())
+ for i := range res {
+ res[i] = r.String()
+ }
+ return res
+}
+
+func (r *Decoder) Value() constant.Value {
+ r.Sync(SyncValue)
+ isComplex := r.Bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+ switch tag := CodeVal(r.Code(SyncVal)); tag {
+ default:
+ panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+ case ValBool:
+ return constant.MakeBool(r.Bool())
+ case ValString:
+ return constant.MakeString(r.String())
+ case ValInt64:
+ return constant.MakeInt64(r.Int64())
+ case ValBigInt:
+ return constant.Make(r.bigInt())
+ case ValBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case ValBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *Decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.String()))
+ if r.Bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.String())) == nil)
+ return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+func (pr *PkgDecoder) PeekPkgPath(idx int) string {
+ r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef)
+ path := r.String()
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+func (pr *PkgDecoder) PeekObj(idx int) (string, string, CodeObj) {
+ r := pr.NewDecoder(RelocName, idx, SyncObject1)
+ r.Sync(SyncSym)
+ r.Sync(SyncPkg)
+ path := pr.PeekPkgPath(r.Reloc(RelocPkg))
+ name := r.String()
+ assert(name != "")
+
+ tag := CodeObj(r.Code(SyncCodeObj))
+
+ return path, name, tag
+}
diff --git a/src/internal/pkgbits/encoder.go b/src/internal/pkgbits/encoder.go
new file mode 100644
index 0000000000..820c707940
--- /dev/null
+++ b/src/internal/pkgbits/encoder.go
@@ -0,0 +1,287 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "bytes"
+ "encoding/binary"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+)
+
+type PkgEncoder struct {
+ elems [numRelocs][]string
+
+ stringsIdx map[string]int
+
+ syncFrames int
+}
+
+func NewPkgEncoder(syncFrames int) PkgEncoder {
+ return PkgEncoder{
+ stringsIdx: make(map[string]int),
+ syncFrames: syncFrames,
+ }
+}
+
+func (pw *PkgEncoder) DumpTo(out io.Writer) {
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ writeUint32(0) // version
+
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+}
+
+func (pw *PkgEncoder) StringIdx(s string) int {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[RelocString][idx] == s)
+ return idx
+ }
+
+ idx := len(pw.elems[RelocString])
+ pw.elems[RelocString] = append(pw.elems[RelocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+ e := pw.NewEncoderRaw(k)
+ e.Sync(marker)
+ return e
+}
+
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+ idx := len(pw.elems[k])
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return Encoder{
+ p: pw,
+ k: k,
+ Idx: idx,
+ }
+}
+
+// Encoders
+
+type Encoder struct {
+ p *PkgEncoder
+
+ Relocs []RelocEnt
+ Data bytes.Buffer
+
+ encodingRelocHeader bool
+
+ k RelocKind
+ Idx int
+}
+
+func (w *Encoder) Flush() int {
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.Data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ panic("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.Sync(SyncRelocs)
+ w.Len(len(w.Relocs))
+ for _, rent := range w.Relocs {
+ w.Sync(SyncReloc)
+ w.Len(int(rent.Kind))
+ w.Len(rent.Idx)
+ }
+
+ io.Copy(&sb, &w.Data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.Idx] = sb.String()
+
+ return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected encoding error: %v", err)
+ }
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.Data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx int) int {
+ // TODO(mdempsky): Use map for lookup.
+ for i, rent := range w.Relocs {
+ if rent.Kind == r && rent.Idx == idx {
+ return i
+ }
+ }
+
+ i := len(w.Relocs)
+ w.Relocs = append(w.Relocs, RelocEnt{r, idx})
+ return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+ if !EnableSync {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+ pcs := make([]uintptr, w.p.syncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+ }
+}
+
+func (w *Encoder) Bool(b bool) bool {
+ w.Sync(SyncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.Data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+func (w *Encoder) Int64(x int64) {
+ w.Sync(SyncInt64)
+ w.rawVarint(x)
+}
+
+func (w *Encoder) Uint64(x uint64) {
+ w.Sync(SyncUint64)
+ w.rawUvarint(x)
+}
+
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+func (w *Encoder) Reloc(r RelocKind, idx int) {
+ w.Sync(SyncUseReloc)
+ w.Len(w.rawReloc(r, idx))
+}
+
+func (w *Encoder) Code(c Code) {
+ w.Sync(c.Marker())
+ w.Len(c.Value())
+}
+
+func (w *Encoder) String(s string) {
+ w.Sync(SyncString)
+ w.Reloc(RelocString, w.p.StringIdx(s))
+}
+
+func (w *Encoder) Strings(ss []string) {
+ w.Len(len(ss))
+ for _, s := range ss {
+ w.String(s)
+ }
+}
+
+func (w *Encoder) Value(val constant.Value) {
+ w.Sync(SyncValue)
+ if w.Bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ errorf("unhandled %v (%v)", val, val.Kind())
+ case bool:
+ w.Code(ValBool)
+ w.Bool(v)
+ case string:
+ w.Code(ValString)
+ w.String(v)
+ case int64:
+ w.Code(ValInt64)
+ w.Int64(v)
+ case *big.Int:
+ w.Code(ValBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.Code(ValBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.Code(ValBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.String(string(b)) // TODO: More efficient encoding.
+ w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.String(string(b)) // TODO: More efficient encoding.
+}
diff --git a/src/cmd/compile/internal/noder/frames_go1.go b/src/internal/pkgbits/frames_go1.go
index d00e0f51f9..5294f6a63e 100644
--- a/src/cmd/compile/internal/noder/frames_go1.go
+++ b/src/internal/pkgbits/frames_go1.go
@@ -7,7 +7,7 @@
// TODO(mdempsky): Remove after #44505 is resolved
-package noder
+package pkgbits
import "runtime"
diff --git a/src/cmd/compile/internal/noder/frames_go17.go b/src/internal/pkgbits/frames_go17.go
index 48d77625b4..5235d46afc 100644
--- a/src/cmd/compile/internal/noder/frames_go17.go
+++ b/src/internal/pkgbits/frames_go17.go
@@ -5,7 +5,7 @@
//go:build go1.7
// +build go1.7
-package noder
+package pkgbits
import "runtime"
diff --git a/src/cmd/compile/internal/noder/reloc.go b/src/internal/pkgbits/reloc.go
index 669a6182e6..43040ca2ff 100644
--- a/src/cmd/compile/internal/noder/reloc.go
+++ b/src/internal/pkgbits/reloc.go
@@ -4,39 +4,37 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package noder
+package pkgbits
-// A reloc indicates a particular section within a unified IR export.
-//
-// TODO(mdempsky): Rename to "section" or something similar?
-type reloc int
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int
// A relocEnt (relocation entry) is an entry in an atom's local
// reference table.
//
// TODO(mdempsky): Rename this too.
-type relocEnt struct {
- kind reloc
- idx int
+type RelocEnt struct {
+ Kind RelocKind
+ Idx int
}
// Reserved indices within the meta relocation section.
const (
- publicRootIdx = 0
- privateRootIdx = 1
+ PublicRootIdx = 0
+ PrivateRootIdx = 1
)
const (
- relocString reloc = iota
- relocMeta
- relocPosBase
- relocPkg
- relocName
- relocType
- relocObj
- relocObjExt
- relocObjDict
- relocBody
+ RelocString RelocKind = iota
+ RelocMeta
+ RelocPosBase
+ RelocPkg
+ RelocName
+ RelocType
+ RelocObj
+ RelocObjExt
+ RelocObjDict
+ RelocBody
numRelocs = iota
)
diff --git a/src/internal/pkgbits/support.go b/src/internal/pkgbits/support.go
new file mode 100644
index 0000000000..f7579dfdc4
--- /dev/null
+++ b/src/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
+
+func errorf(format string, args ...any) {
+ panic(fmt.Errorf(format, args...))
+}
diff --git a/src/internal/pkgbits/sync.go b/src/internal/pkgbits/sync.go
new file mode 100644
index 0000000000..b2c9139ce6
--- /dev/null
+++ b/src/internal/pkgbits/sync.go
@@ -0,0 +1,125 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "fmt"
+ "strings"
+)
+
+// EnableSync controls whether sync markers are written into unified
+// IR's export data format and also whether they're expected when
+// reading them back in. They're inessential to the correct
+// functioning of unified IR, but are helpful during development to
+// detect mistakes.
+//
+// When sync is enabled, writer stack frames will also be included in
+// the export data. Currently, a fixed number of frames are included,
+// controlled by -d=syncframes (default 0).
+const EnableSync = true
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+ _ SyncMarker = iota
+
+ // Public markers (known to go/types importers).
+
+ // Low-level coding markers.
+
+ SyncEOF
+ SyncBool
+ SyncInt64
+ SyncUint64
+ SyncString
+ SyncValue
+ SyncVal
+ SyncRelocs
+ SyncReloc
+ SyncUseReloc
+
+ // Higher-level object and type markers.
+ SyncPublic
+ SyncPos
+ SyncPosBase
+ SyncObject
+ SyncObject1
+ SyncPkg
+ SyncPkgDef
+ SyncMethod
+ SyncType
+ SyncTypeIdx
+ SyncTypeParamNames
+ SyncSignature
+ SyncParams
+ SyncParam
+ SyncCodeObj
+ SyncSym
+ SyncLocalIdent
+ SyncSelector
+
+ // Private markers (only known to cmd/compile).
+ SyncPrivate
+
+ SyncFuncExt
+ SyncVarExt
+ SyncTypeExt
+ SyncPragma
+
+ SyncExprList
+ SyncExprs
+ SyncExpr
+ SyncOp
+ SyncFuncLit
+ SyncCompLit
+
+ SyncDecl
+ SyncFuncBody
+ SyncOpenScope
+ SyncCloseScope
+ SyncCloseAnotherScope
+ SyncDeclNames
+ SyncDeclName
+
+ SyncStmts
+ SyncBlockStmt
+ SyncIfStmt
+ SyncForStmt
+ SyncSwitchStmt
+ SyncRangeStmt
+ SyncCaseClause
+ SyncCommClause
+ SyncSelectStmt
+ SyncDecls
+ SyncLabeledStmt
+ SyncUseObjLocal
+ SyncAddLocal
+ SyncLinkname
+ SyncStmt1
+ SyncStmtsEnd
+ SyncLabel
+ SyncOptLabel
+)
diff --git a/src/internal/pkgbits/syncmarker_string.go b/src/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 0000000000..91154a001d
--- /dev/null
+++ b/src/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,87 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SyncEOF-1]
+ _ = x[SyncBool-2]
+ _ = x[SyncInt64-3]
+ _ = x[SyncUint64-4]
+ _ = x[SyncString-5]
+ _ = x[SyncValue-6]
+ _ = x[SyncVal-7]
+ _ = x[SyncRelocs-8]
+ _ = x[SyncReloc-9]
+ _ = x[SyncUseReloc-10]
+ _ = x[SyncPublic-11]
+ _ = x[SyncPos-12]
+ _ = x[SyncPosBase-13]
+ _ = x[SyncObject-14]
+ _ = x[SyncObject1-15]
+ _ = x[SyncPkg-16]
+ _ = x[SyncPkgDef-17]
+ _ = x[SyncMethod-18]
+ _ = x[SyncType-19]
+ _ = x[SyncTypeIdx-20]
+ _ = x[SyncTypeParamNames-21]
+ _ = x[SyncSignature-22]
+ _ = x[SyncParams-23]
+ _ = x[SyncParam-24]
+ _ = x[SyncCodeObj-25]
+ _ = x[SyncSym-26]
+ _ = x[SyncLocalIdent-27]
+ _ = x[SyncSelector-28]
+ _ = x[SyncPrivate-29]
+ _ = x[SyncFuncExt-30]
+ _ = x[SyncVarExt-31]
+ _ = x[SyncTypeExt-32]
+ _ = x[SyncPragma-33]
+ _ = x[SyncExprList-34]
+ _ = x[SyncExprs-35]
+ _ = x[SyncExpr-36]
+ _ = x[SyncOp-37]
+ _ = x[SyncFuncLit-38]
+ _ = x[SyncCompLit-39]
+ _ = x[SyncDecl-40]
+ _ = x[SyncFuncBody-41]
+ _ = x[SyncOpenScope-42]
+ _ = x[SyncCloseScope-43]
+ _ = x[SyncCloseAnotherScope-44]
+ _ = x[SyncDeclNames-45]
+ _ = x[SyncDeclName-46]
+ _ = x[SyncStmts-47]
+ _ = x[SyncBlockStmt-48]
+ _ = x[SyncIfStmt-49]
+ _ = x[SyncForStmt-50]
+ _ = x[SyncSwitchStmt-51]
+ _ = x[SyncRangeStmt-52]
+ _ = x[SyncCaseClause-53]
+ _ = x[SyncCommClause-54]
+ _ = x[SyncSelectStmt-55]
+ _ = x[SyncDecls-56]
+ _ = x[SyncLabeledStmt-57]
+ _ = x[SyncUseObjLocal-58]
+ _ = x[SyncAddLocal-59]
+ _ = x[SyncLinkname-60]
+ _ = x[SyncStmt1-61]
+ _ = x[SyncStmtsEnd-62]
+ _ = x[SyncLabel-63]
+ _ = x[SyncOptLabel-64]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 220, 227, 234, 238, 246, 255, 265, 282, 291, 299, 304, 313, 319, 326, 336, 345, 355, 365, 375, 380, 391, 402, 410, 418, 423, 431, 436, 444}
+
+func (i SyncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+ return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/src/math/big/intmarsh.go b/src/math/big/intmarsh.go
index c1422e2710..ce429ffc11 100644
--- a/src/math/big/intmarsh.go
+++ b/src/math/big/intmarsh.go
@@ -67,7 +67,10 @@ func (z *Int) UnmarshalText(text []byte) error {
// MarshalJSON implements the json.Marshaler interface.
func (x *Int) MarshalJSON() ([]byte, error) {
- return x.MarshalText()
+ if x == nil {
+ return []byte("null"), nil
+ }
+ return x.abs.itoa(x.neg, 10), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
diff --git a/src/math/big/intmarsh_test.go b/src/math/big/intmarsh_test.go
index f82956ceaf..8e7d29f9dd 100644
--- a/src/math/big/intmarsh_test.go
+++ b/src/math/big/intmarsh_test.go
@@ -97,6 +97,19 @@ func TestIntJSONEncoding(t *testing.T) {
}
}
+func TestIntJSONEncodingNil(t *testing.T) {
+ var x *Int
+ b, err := x.MarshalJSON()
+ if err != nil {
+ t.Fatalf("marshaling of nil failed: %s", err)
+ }
+ got := string(b)
+ want := "null"
+ if got != want {
+ t.Fatalf("marshaling of nil failed: got %s want %s", got, want)
+ }
+}
+
func TestIntXMLEncoding(t *testing.T) {
for _, test := range encodingTests {
for _, sign := range []string{"", "+", "-"} {
diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go
index 21aa91f665..b989d12c58 100644
--- a/src/net/dnsclient_unix.go
+++ b/src/net/dnsclient_unix.go
@@ -30,6 +30,10 @@ const (
// to be used as a useTCP parameter to exchange
useTCPOnly = true
useUDPOrTCP = false
+
+ // Maximum DNS packet size.
+ // Value taken from https://dnsflagday.net/2020/.
+ maxDNSPacketSize = 1232
)
var (
@@ -56,6 +60,19 @@ func newRequest(q dnsmessage.Question) (id uint16, udpReq, tcpReq []byte, err er
if err := b.Question(q); err != nil {
return 0, nil, nil, err
}
+
+ // Accept packets up to maxDNSPacketSize. RFC 6891.
+ if err := b.StartAdditionals(); err != nil {
+ return 0, nil, nil, err
+ }
+ var rh dnsmessage.ResourceHeader
+ if err := rh.SetEDNS0(maxDNSPacketSize, dnsmessage.RCodeSuccess, false); err != nil {
+ return 0, nil, nil, err
+ }
+ if err := b.OPTResource(rh, dnsmessage.OPTResource{}); err != nil {
+ return 0, nil, nil, err
+ }
+
tcpReq, err = b.Finish()
udpReq = tcpReq[2:]
l := len(tcpReq) - 2
@@ -82,7 +99,7 @@ func dnsPacketRoundTrip(c Conn, id uint16, query dnsmessage.Question, b []byte)
return dnsmessage.Parser{}, dnsmessage.Header{}, err
}
- b = make([]byte, 512) // see RFC 1035
+ b = make([]byte, maxDNSPacketSize)
for {
n, err := c.Read(b)
if err != nil {
diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go
index 14366eca8c..e5f01dba2a 100644
--- a/src/net/dnsclient_unix_test.go
+++ b/src/net/dnsclient_unix_test.go
@@ -881,7 +881,7 @@ func (f *fakeDNSPacketConn) Close() error {
func TestIgnoreDNSForgeries(t *testing.T) {
c, s := Pipe()
go func() {
- b := make([]byte, 512)
+ b := make([]byte, maxDNSPacketSize)
n, err := s.Read(b)
if err != nil {
t.Error(err)
@@ -2161,3 +2161,58 @@ func TestRootNS(t *testing.T) {
t.Errorf("records = [%v]; want [%v]", strings.Join(records, " "), want[0])
}
}
+
+// Test that we advertise support for a larger DNS packet size.
+// This isn't a great test as it just tests the dnsmessage package
+// against itself.
+func TestDNSPacketSize(t *testing.T) {
+ fake := fakeDNSServer{
+ rh: func(_, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) {
+ if len(q.Additionals) == 0 {
+ t.Error("missing EDNS record")
+ } else if opt, ok := q.Additionals[0].Body.(*dnsmessage.OPTResource); !ok {
+ t.Errorf("additional record type %T, expected OPTResource", q.Additionals[0])
+ } else if len(opt.Options) != 0 {
+ t.Errorf("found %d Options, expected none", len(opt.Options))
+ } else {
+ got := int(q.Additionals[0].Header.Class)
+ t.Logf("EDNS packet size == %d", got)
+ if got != maxDNSPacketSize {
+ t.Errorf("EDNS packet size == %d, want %d", got, maxDNSPacketSize)
+ }
+ }
+
+ // Hand back a dummy answer to verify that
+ // LookupIPAddr completes.
+ r := dnsmessage.Message{
+ Header: dnsmessage.Header{
+ ID: q.Header.ID,
+ Response: true,
+ RCode: dnsmessage.RCodeSuccess,
+ },
+ Questions: q.Questions,
+ }
+ if q.Questions[0].Type == dnsmessage.TypeA {
+ r.Answers = []dnsmessage.Resource{
+ {
+ Header: dnsmessage.ResourceHeader{
+ Name: q.Questions[0].Name,
+ Type: dnsmessage.TypeA,
+ Class: dnsmessage.ClassINET,
+ Length: 4,
+ },
+ Body: &dnsmessage.AResource{
+ A: TestAddr,
+ },
+ },
+ }
+ }
+ return r, nil
+ },
+ }
+
+ r := &Resolver{PreferGo: true, Dial: fake.DialContext}
+ if _, err := r.LookupIPAddr(context.Background(), "go.dev"); err != nil {
+ t.Errorf("lookup failed: %v", err)
+ }
+}
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index 5fe3e6ebb4..e41b20a15b 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -606,6 +606,9 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
} else if !pconn.shouldRetryRequest(req, err) {
// Issue 16465: return underlying net.Conn.Read error from peek,
// as we've historically done.
+ if e, ok := err.(nothingWrittenError); ok {
+ err = e.error
+ }
if e, ok := err.(transportReadFromServerError); ok {
err = e.err
}
@@ -2032,6 +2035,9 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte
}
if _, ok := err.(transportReadFromServerError); ok {
+ if pc.nwrite == startBytesWritten {
+ return nothingWrittenError{err}
+ }
// Don't decorate
return err
}
diff --git a/src/net/http/transport_internal_test.go b/src/net/http/transport_internal_test.go
index 1cce27235d..2ed637e9f0 100644
--- a/src/net/http/transport_internal_test.go
+++ b/src/net/http/transport_internal_test.go
@@ -52,8 +52,8 @@ func TestTransportPersistConnReadLoopEOF(t *testing.T) {
conn.Close() // simulate the server hanging up on the client
_, err = pc.roundTrip(treq)
- if !isTransportReadFromServerError(err) && err != errServerClosedIdle {
- t.Errorf("roundTrip = %#v, %v; want errServerClosedIdle or transportReadFromServerError", err, err)
+ if !isNothingWrittenError(err) && !isTransportReadFromServerError(err) && err != errServerClosedIdle {
+ t.Errorf("roundTrip = %#v, %v; want errServerClosedIdle, transportReadFromServerError, or nothingWrittenError", err, err)
}
<-pc.closech
@@ -63,6 +63,11 @@ func TestTransportPersistConnReadLoopEOF(t *testing.T) {
}
}
+func isNothingWrittenError(err error) bool {
+ _, ok := err.(nothingWrittenError)
+ return ok
+}
+
func isTransportReadFromServerError(err error) bool {
_, ok := err.(transportReadFromServerError)
return ok
diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go
index 063d650c60..3a31f56bea 100644
--- a/src/net/lookup_test.go
+++ b/src/net/lookup_test.go
@@ -883,21 +883,66 @@ func TestLookupNonLDH(t *testing.T) {
func TestLookupContextCancel(t *testing.T) {
mustHaveExternalNetwork(t)
- defer dnsWaitGroup.Wait()
+ testenv.SkipFlakyNet(t)
- ctx, ctxCancel := context.WithCancel(context.Background())
- ctxCancel()
- _, err := DefaultResolver.LookupIPAddr(ctx, "google.com")
- if err.(*DNSError).Err != errCanceled.Error() {
- testenv.SkipFlakyNet(t)
- t.Fatal(err)
+ origTestHookLookupIP := testHookLookupIP
+ defer func() {
+ dnsWaitGroup.Wait()
+ testHookLookupIP = origTestHookLookupIP
+ }()
+
+ lookupCtx, cancelLookup := context.WithCancel(context.Background())
+ unblockLookup := make(chan struct{})
+
+ // Set testHookLookupIP to start a new, concurrent call to LookupIPAddr
+ // and cancel the original one, then block until the canceled call has returned
+ // (ensuring that it has performed any synchronous cleanup).
+ testHookLookupIP = func(
+ ctx context.Context,
+ fn func(context.Context, string, string) ([]IPAddr, error),
+ network string,
+ host string,
+ ) ([]IPAddr, error) {
+ select {
+ case <-unblockLookup:
+ default:
+ // Start a concurrent LookupIPAddr for the same host while the caller is
+ // still blocked, and sleep a little to give it time to be deduplicated
+ // before we cancel (and unblock) the caller.
+ // (If the timing doesn't quite work out, we'll end up testing sequential
+ // calls instead of concurrent ones, but the test should still pass.)
+ t.Logf("starting concurrent LookupIPAddr")
+ dnsWaitGroup.Add(1)
+ go func() {
+ defer dnsWaitGroup.Done()
+ _, err := DefaultResolver.LookupIPAddr(context.Background(), host)
+ if err != nil {
+ t.Error(err)
+ }
+ }()
+ time.Sleep(1 * time.Millisecond)
+ }
+
+ cancelLookup()
+ <-unblockLookup
+ // If the concurrent lookup above is deduplicated to this one
+ // (as we expect to happen most of the time), it is important
+ // that the original call does not cancel the shared Context.
+ // (See https://go.dev/issue/22724.) Explicitly check for
+ // cancellation now, just in case fn itself doesn't notice it.
+ if err := ctx.Err(); err != nil {
+ t.Logf("testHookLookupIP canceled")
+ return nil, err
+ }
+ t.Logf("testHookLookupIP performing lookup")
+ return fn(ctx, network, host)
}
- ctx = context.Background()
- _, err = DefaultResolver.LookupIPAddr(ctx, "google.com")
- if err != nil {
- testenv.SkipFlakyNet(t)
- t.Fatal(err)
+
+ _, err := DefaultResolver.LookupIPAddr(lookupCtx, "google.com")
+ if dnsErr, ok := err.(*DNSError); !ok || dnsErr.Err != errCanceled.Error() {
+ t.Errorf("unexpected error from canceled, blocked LookupIPAddr: %v", err)
}
+ close(unblockLookup)
}
// Issue 24330: treat the nil *Resolver like a zero value. Verify nothing
diff --git a/src/net/net.go b/src/net/net.go
index 77e54a9125..d91e743a01 100644
--- a/src/net/net.go
+++ b/src/net/net.go
@@ -703,6 +703,12 @@ var (
_ io.Reader = (*Buffers)(nil)
)
+// WriteTo writes contents of the buffers to w.
+//
+// WriteTo implements io.WriterTo for Buffers.
+//
+// WriteTo modifies the slice v as well as v[i] for 0 <= i < len(v),
+// but does not modify v[i][j] for any i, j.
func (v *Buffers) WriteTo(w io.Writer) (n int64, err error) {
if wv, ok := w.(buffersWriter); ok {
return wv.writeBuffers(v)
@@ -719,6 +725,12 @@ func (v *Buffers) WriteTo(w io.Writer) (n int64, err error) {
return n, nil
}
+// Read from the buffers.
+//
+// Read implements io.Reader for Buffers.
+//
+// Read modifies the slice v as well as v[i] for 0 <= i < len(v),
+// but does not modify v[i][j] for any i, j.
func (v *Buffers) Read(p []byte) (n int, err error) {
for len(p) > 0 && len(*v) > 0 {
n0 := copy(p, (*v)[0])
diff --git a/src/net/net_test.go b/src/net/net_test.go
index 7b169916f1..76a9c8b151 100644
--- a/src/net/net_test.go
+++ b/src/net/net_test.go
@@ -9,7 +9,6 @@ package net
import (
"errors"
"fmt"
- "internal/testenv"
"io"
"net/internal/socktest"
"os"
@@ -515,35 +514,50 @@ func TestCloseUnblocksRead(t *testing.T) {
// Issue 24808: verify that ECONNRESET is not temporary for read.
func TestNotTemporaryRead(t *testing.T) {
- if runtime.GOOS == "freebsd" {
- testenv.SkipFlaky(t, 25289)
- }
- if runtime.GOOS == "aix" {
- testenv.SkipFlaky(t, 29685)
- }
t.Parallel()
- server := func(cs *TCPConn) error {
- cs.SetLinger(0)
- // Give the client time to get stuck in a Read.
- time.Sleep(50 * time.Millisecond)
+
+ ln := newLocalListener(t, "tcp")
+ serverDone := make(chan struct{})
+ dialed := make(chan struct{})
+ go func() {
+ defer close(serverDone)
+
+ cs, err := ln.Accept()
+ if err != nil {
+ return
+ }
+ <-dialed
+ cs.(*TCPConn).SetLinger(0)
cs.Close()
- return nil
+
+ ln.Close()
+ }()
+ defer func() { <-serverDone }()
+
+ ss, err := Dial("tcp", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
}
- client := func(ss *TCPConn) error {
- _, err := ss.Read([]byte{0})
- if err == nil {
- return errors.New("Read succeeded unexpectedly")
- } else if err == io.EOF {
- // This happens on Plan 9.
- return nil
- } else if ne, ok := err.(Error); !ok {
- return fmt.Errorf("unexpected error %v", err)
- } else if ne.Temporary() {
- return fmt.Errorf("unexpected temporary error %v", err)
+ defer ss.Close()
+ close(dialed)
+ _, err = ss.Read([]byte{0})
+ if err == nil {
+ t.Fatal("Read succeeded unexpectedly")
+ } else if err == io.EOF {
+ // This happens on Plan 9, but for some reason (prior to CL 385314) it was
+ // accepted everywhere else too.
+ if runtime.GOOS == "plan9" {
+ return
}
- return nil
+ // TODO: during an open development cycle, try making this a failure
+ // and see whether it causes the test to become flaky anywhere else.
+ return
+ }
+ if ne, ok := err.(Error); !ok {
+ t.Errorf("Read error does not implement net.Error: %v", err)
+ } else if ne.Temporary() {
+ t.Errorf("Read error is unexpectedly temporary: %v", err)
}
- withTCPConnPair(t, client, server)
}
// The various errors should implement the Error interface.
diff --git a/src/net/smtp/auth.go b/src/net/smtp/auth.go
index fd1a472f93..7a32ef6a2e 100644
--- a/src/net/smtp/auth.go
+++ b/src/net/smtp/auth.go
@@ -16,8 +16,7 @@ type Auth interface {
// Start begins an authentication with a server.
// It returns the name of the authentication protocol
// and optionally data to include in the initial AUTH message
- // sent to the server. It can return proto == "" to indicate
- // that the authentication should be skipped.
+ // sent to the server.
// If it returns a non-nil error, the SMTP client aborts
// the authentication attempt and closes the connection.
Start(server *ServerInfo) (proto string, toServer []byte, err error)
diff --git a/src/os/example_test.go b/src/os/example_test.go
index e8554b0b12..53e3c5227b 100644
--- a/src/os/example_test.go
+++ b/src/os/example_test.go
@@ -241,3 +241,25 @@ func ExampleWriteFile() {
log.Fatal(err)
}
}
+
+func ExampleMkdir() {
+ err := os.Mkdir("testdir", 0750)
+ if err != nil && !os.IsExist(err) {
+ log.Fatal(err)
+ }
+ err = os.WriteFile("testdir/testfile.txt", []byte("Hello, Gophers!"), 0660)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func ExampleMkdirAll() {
+ err := os.MkdirAll("test/subdir", 0750)
+ if err != nil && !os.IsExist(err) {
+ log.Fatal(err)
+ }
+ err = os.WriteFile("test/subdir/testfile.txt", []byte("Hello, Gophers!"), 0660)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/os/os_test.go b/src/os/os_test.go
index 82ca6f987d..63427deb6e 100644
--- a/src/os/os_test.go
+++ b/src/os/os_test.go
@@ -28,6 +28,16 @@ import (
"time"
)
+func TestMain(m *testing.M) {
+ if Getenv("GO_OS_TEST_DRAIN_STDIN") == "1" {
+ os.Stdout.Close()
+ io.Copy(io.Discard, os.Stdin)
+ Exit(0)
+ }
+
+ Exit(m.Run())
+}
+
var dot = []string{
"dir_unix.go",
"env.go",
@@ -2259,9 +2269,18 @@ func testKillProcess(t *testing.T, processKiller func(p *Process)) {
testenv.MustHaveExec(t)
t.Parallel()
- // Re-exec the test binary itself to emulate "sleep 1".
- cmd := osexec.Command(Args[0], "-test.run", "TestSleep")
- err := cmd.Start()
+ // Re-exec the test binary to start a process that hangs until stdin is closed.
+ cmd := osexec.Command(Args[0])
+ cmd.Env = append(os.Environ(), "GO_OS_TEST_DRAIN_STDIN=1")
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = cmd.Start()
if err != nil {
t.Fatalf("Failed to start test process: %v", err)
}
@@ -2270,19 +2289,14 @@ func testKillProcess(t *testing.T, processKiller func(p *Process)) {
if err := cmd.Wait(); err == nil {
t.Errorf("Test process succeeded, but expected to fail")
}
+ stdin.Close() // Keep stdin alive until the process has finished dying.
}()
- time.Sleep(100 * time.Millisecond)
- processKiller(cmd.Process)
-}
+ // Wait for the process to be started.
+ // (It will close its stdout when it reaches TestMain.)
+ io.Copy(io.Discard, stdout)
-// TestSleep emulates "sleep 1". It is a helper for testKillProcess, so we
-// don't have to rely on an external "sleep" command being available.
-func TestSleep(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping in short mode")
- }
- time.Sleep(time.Second)
+ processKiller(cmd.Process)
}
func TestKillStartProcess(t *testing.T) {
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 866f38e687..5364166eab 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -3682,8 +3682,11 @@ func TestTagGet(t *testing.T) {
}
func TestBytes(t *testing.T) {
- type B []byte
- x := B{1, 2, 3, 4}
+ shouldPanic("on int Value", func() { ValueOf(0).Bytes() })
+ shouldPanic("of non-byte slice", func() { ValueOf([]string{}).Bytes() })
+
+ type S []byte
+ x := S{1, 2, 3, 4}
y := ValueOf(x).Bytes()
if !bytes.Equal(x, y) {
t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
@@ -3691,6 +3694,28 @@ func TestBytes(t *testing.T) {
if &x[0] != &y[0] {
t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
}
+
+ type A [4]byte
+ a := A{1, 2, 3, 4}
+ shouldPanic("unaddressable", func() { ValueOf(a).Bytes() })
+ shouldPanic("on ptr Value", func() { ValueOf(&a).Bytes() })
+ b := ValueOf(&a).Elem().Bytes()
+ if !bytes.Equal(a[:], y) {
+ t.Fatalf("ValueOf(%v).Bytes() = %v", a, b)
+ }
+ if &a[0] != &b[0] {
+ t.Errorf("ValueOf(%p).Bytes() = %p", &a[0], &b[0])
+ }
+
+ // Per issue #24746, it was decided that Bytes can be called on byte slices
+ // that normally cannot be converted from per Go language semantics.
+ type B byte
+ type SB []B
+ type AB [4]B
+ ValueOf([]B{1, 2, 3, 4}).Bytes() // should not panic
+ ValueOf(new([4]B)).Elem().Bytes() // should not panic
+ ValueOf(SB{1, 2, 3, 4}).Bytes() // should not panic
+ ValueOf(new(AB)).Elem().Bytes() // should not panic
}
func TestSetBytes(t *testing.T) {
diff --git a/src/reflect/asm_arm64.s b/src/reflect/asm_arm64.s
index 5b9b3573fa..812b8a02c3 100644
--- a/src/reflect/asm_arm64.s
+++ b/src/reflect/asm_arm64.s
@@ -33,9 +33,14 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
ADD $LOCAL_REGARGS, RSP, R20
CALL runtime·spillArgs(SB)
MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R26, R0
+ MOVD R20, R1
+#else
MOVD R26, 8(RSP)
MOVD R20, 16(RSP)
- CALL ·moveMakeFuncArgPtrs(SB)
+#endif
+ CALL ·moveMakeFuncArgPtrs<ABIInternal>(SB)
MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
@@ -61,9 +66,14 @@ TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
ADD $LOCAL_REGARGS, RSP, R20
CALL runtime·spillArgs(SB)
MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R26, R0
+ MOVD R20, R1
+#else
MOVD R26, 8(RSP)
MOVD R20, 16(RSP)
- CALL ·moveMakeFuncArgPtrs(SB)
+#endif
+ CALL ·moveMakeFuncArgPtrs<ABIInternal>(SB)
MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
diff --git a/src/reflect/value.go b/src/reflect/value.go
index dcc359dae4..89f0253570 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -286,14 +286,28 @@ func (v Value) Bool() bool {
}
// Bytes returns v's underlying value.
-// It panics if v's underlying value is not a slice of bytes.
+// It panics if v's underlying value is not a slice of bytes or
+// an addressable array of bytes.
func (v Value) Bytes() []byte {
- v.mustBe(Slice)
- if v.typ.Elem().Kind() != Uint8 {
- panic("reflect.Value.Bytes of non-byte slice")
+ switch v.kind() {
+ case Slice:
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]byte)(v.ptr)
+ case Array:
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte array")
+ }
+ if !v.CanAddr() {
+ panic("reflect.Value.Bytes of unaddressable byte array")
+ }
+ p := (*byte)(v.ptr)
+ n := int((*arrayType)(unsafe.Pointer(v.typ)).len)
+ return unsafe.Slice(p, n)
}
- // Slice is always bigger than a word; assume flagIndir.
- return *(*[]byte)(v.ptr)
+ panic(&ValueError{"reflect.Value.Bytes", v.kind()})
}
// runes returns v's underlying value.
diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go
index f975bb3894..7d56bd6b8e 100644
--- a/src/regexp/regexp.go
+++ b/src/regexp/regexp.go
@@ -46,7 +46,7 @@
// the match of the first parenthesized subexpression, and so on.
//
// If 'Index' is present, matches and submatches are identified by byte index
-// pairs within the input string: result[2*n:2*n+1] identifies the indexes of
+// pairs within the input string: result[2*n:2*n+2] identifies the indexes of
// the nth submatch. The pair for n==0 identifies the match of the entire
// expression. If 'Index' is not present, the match is identified by the text
// of the match/submatch. If an index is negative or text is nil, it means that
diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
index 06a92fb3d7..0f6587ab27 100644
--- a/src/regexp/syntax/parse.go
+++ b/src/regexp/syntax/parse.go
@@ -76,13 +76,29 @@ const (
opVerticalBar
)
+// maxHeight is the maximum height of a regexp parse tree.
+// It is somewhat arbitrarily chosen, but the idea is to be large enough
+// that no one will actually hit in real use but at the same time small enough
+// that recursion on the Regexp tree will not hit the 1GB Go stack limit.
+// The maximum amount of stack for a single recursive frame is probably
+// closer to 1kB, so this could potentially be raised, but it seems unlikely
+// that people have regexps nested even this deeply.
+// We ran a test on Google's C++ code base and turned up only
+// a single use case with depth > 100; it had depth 128.
+// Using depth 1000 should be plenty of margin.
+// As an optimization, we don't even bother calculating heights
+// until we've allocated at least maxHeight Regexp structures.
+const maxHeight = 1000
+
type parser struct {
flags Flags // parse mode flags
stack []*Regexp // stack of parsed expressions
free *Regexp
numCap int // number of capturing groups seen
wholeRegexp string
- tmpClass []rune // temporary char class work space
+ tmpClass []rune // temporary char class work space
+ numRegexp int // number of regexps allocated
+ height map[*Regexp]int // regexp height for height limit check
}
func (p *parser) newRegexp(op Op) *Regexp {
@@ -92,16 +108,52 @@ func (p *parser) newRegexp(op Op) *Regexp {
*re = Regexp{}
} else {
re = new(Regexp)
+ p.numRegexp++
}
re.Op = op
return re
}
func (p *parser) reuse(re *Regexp) {
+ if p.height != nil {
+ delete(p.height, re)
+ }
re.Sub0[0] = p.free
p.free = re
}
+func (p *parser) checkHeight(re *Regexp) {
+ if p.numRegexp < maxHeight {
+ return
+ }
+ if p.height == nil {
+ p.height = make(map[*Regexp]int)
+ for _, re := range p.stack {
+ p.checkHeight(re)
+ }
+ }
+ if p.calcHeight(re, true) > maxHeight {
+ panic(ErrInternalError)
+ }
+}
+
+func (p *parser) calcHeight(re *Regexp, force bool) int {
+ if !force {
+ if h, ok := p.height[re]; ok {
+ return h
+ }
+ }
+ h := 1
+ for _, sub := range re.Sub {
+ hsub := p.calcHeight(sub, false)
+ if h < 1+hsub {
+ h = 1 + hsub
+ }
+ }
+ p.height[re] = h
+ return h
+}
+
// Parse stack manipulation.
// push pushes the regexp re onto the parse stack and returns the regexp.
@@ -137,6 +189,7 @@ func (p *parser) push(re *Regexp) *Regexp {
}
p.stack = append(p.stack, re)
+ p.checkHeight(re)
return re
}
@@ -246,6 +299,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
re.Sub = re.Sub0[:1]
re.Sub[0] = sub
p.stack[n-1] = re
+ p.checkHeight(re)
if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
@@ -693,6 +747,21 @@ func literalRegexp(s string, flags Flags) *Regexp {
// Flags, and returns a regular expression parse tree. The syntax is
// described in the top-level comment.
func Parse(s string, flags Flags) (*Regexp, error) {
+ return parse(s, flags)
+}
+
+func parse(s string, flags Flags) (_ *Regexp, err error) {
+ defer func() {
+ switch r := recover(); r {
+ default:
+ panic(r)
+ case nil:
+ // ok
+ case ErrInternalError:
+ err = &Error{Code: ErrInternalError, Expr: s}
+ }
+ }()
+
if flags&Literal != 0 {
// Trivial parser for literal string.
if err := checkUTF8(s); err != nil {
@@ -704,7 +773,6 @@ func Parse(s string, flags Flags) (*Regexp, error) {
// Otherwise, must do real work.
var (
p parser
- err error
c rune
op Op
lastRepeat string
diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
index 5581ba1ca5..1ef6d8a3fe 100644
--- a/src/regexp/syntax/parse_test.go
+++ b/src/regexp/syntax/parse_test.go
@@ -207,6 +207,11 @@ var parseTests = []parseTest{
// Valid repetitions.
{`((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}))`, ``},
{`((((((((((x{1}){2}){2}){2}){2}){2}){2}){2}){2}){2})`, ``},
+
+ // Valid nesting.
+ {strings.Repeat("(", 999) + strings.Repeat(")", 999), ``},
+ {strings.Repeat("(?:", 999) + strings.Repeat(")*", 999), ``},
+ {"(" + strings.Repeat("|", 12345) + ")", ``}, // not nested at all
}
const testFlags = MatchNL | PerlX | UnicodeGroups
@@ -482,6 +487,8 @@ var invalidRegexps = []string{
`a{100000}`,
`a{100000,}`,
"((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})",
+ strings.Repeat("(", 1000) + strings.Repeat(")", 1000),
+ strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000),
`\Q\E*`,
}
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 594cd5ed0d..e16880c950 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -937,8 +937,9 @@ aes0to15:
PAND masks<>(SB)(BX*8), X1
final1:
- AESENC X0, X1 // scramble input, xor in seed
- AESENC X1, X1 // scramble combo 2 times
+ PXOR X0, X1 // xor data with seed
+ AESENC X1, X1 // scramble combo 3 times
+ AESENC X1, X1
AESENC X1, X1
MOVL X1, (DX)
RET
@@ -971,9 +972,13 @@ aes17to32:
MOVOU (AX), X2
MOVOU -16(AX)(BX*1), X3
+ // xor with seed
+ PXOR X0, X2
+ PXOR X1, X3
+
// scramble 3 times
- AESENC X0, X2
- AESENC X1, X3
+ AESENC X2, X2
+ AESENC X3, X3
AESENC X2, X2
AESENC X3, X3
AESENC X2, X2
@@ -1000,10 +1005,15 @@ aes33to64:
MOVOU -32(AX)(BX*1), X6
MOVOU -16(AX)(BX*1), X7
- AESENC X0, X4
- AESENC X1, X5
- AESENC X2, X6
- AESENC X3, X7
+ PXOR X0, X4
+ PXOR X1, X5
+ PXOR X2, X6
+ PXOR X3, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
AESENC X4, X4
AESENC X5, X5
@@ -1069,7 +1079,12 @@ aesloop:
DECL BX
JNE aesloop
- // 2 more scrambles to finish
+ // 3 more scrambles to finish
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
AESENC X4, X4
AESENC X5, X5
AESENC X6, X6
diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go
index 8c250f72d6..37509b1292 100644
--- a/src/runtime/crash_cgo_test.go
+++ b/src/runtime/crash_cgo_test.go
@@ -234,6 +234,7 @@ func TestCgoCrashTraceback(t *testing.T) {
switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform {
case "darwin/amd64":
case "linux/amd64":
+ case "linux/arm64":
case "linux/ppc64le":
default:
t.Skipf("not yet supported on %s", platform)
@@ -251,6 +252,7 @@ func TestCgoCrashTracebackGo(t *testing.T) {
switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform {
case "darwin/amd64":
case "linux/amd64":
+ case "linux/arm64":
case "linux/ppc64le":
default:
t.Skipf("not yet supported on %s", platform)
@@ -284,7 +286,7 @@ func TestCgoTracebackContextPreemption(t *testing.T) {
func testCgoPprof(t *testing.T, buildArg, runArg, top, bottom string) {
t.Parallel()
- if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le") {
+ if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le" && runtime.GOARCH != "arm64") {
t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH)
}
testenv.MustHaveGoRun(t)
@@ -626,13 +628,11 @@ func TestSegv(t *testing.T) {
// a VDSO call via asmcgocall.
testenv.SkipFlaky(t, 50504)
}
- if testenv.Builder() == "linux-mips64le-mengzhuo" && strings.Contains(got, "runtime: unknown pc") {
- // Runtime sometimes throw "unknown pc" when generating the traceback.
- // Curiously, that doesn't seem to happen on the linux-mips64le-rtrk
- // builder.
- testenv.SkipFlaky(t, 50605)
- }
}
+ if test == "SegvInCgo" && strings.Contains(got, "runtime: unknown pc") {
+ testenv.SkipFlaky(t, 50979)
+ }
+
nowant := "runtime: "
if strings.Contains(got, nowant) {
t.Errorf("unexpectedly saw %q in output", nowant)
diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go
index 1eb10f9b60..a218205af4 100644
--- a/src/runtime/crash_unix_test.go
+++ b/src/runtime/crash_unix_test.go
@@ -132,7 +132,7 @@ func TestCrashDumpsAllThreads(t *testing.T) {
out := outbuf.Bytes()
n := bytes.Count(out, []byte("main.crashDumpsAllThreadsLoop("))
if n != 4 {
- t.Errorf("found %d instances of main.loop; expected 4", n)
+ t.Errorf("found %d instances of main.crashDumpsAllThreadsLoop; expected 4", n)
t.Logf("%s", out)
}
}
diff --git a/src/runtime/debug/mod.go b/src/runtime/debug/mod.go
index 14a496a8eb..688e2581ed 100644
--- a/src/runtime/debug/mod.go
+++ b/src/runtime/debug/mod.go
@@ -5,9 +5,9 @@
package debug
import (
- "bytes"
"fmt"
"runtime"
+ "strconv"
"strings"
)
@@ -23,8 +23,8 @@ func ReadBuildInfo() (info *BuildInfo, ok bool) {
return nil, false
}
data = data[16 : len(data)-16]
- bi := &BuildInfo{}
- if err := bi.UnmarshalText([]byte(data)); err != nil {
+ bi, err := ParseBuildInfo(data)
+ if err != nil {
return nil, false
}
@@ -63,8 +63,18 @@ type BuildSetting struct {
Key, Value string
}
-func (bi *BuildInfo) MarshalText() ([]byte, error) {
- buf := &bytes.Buffer{}
+// quoteKey reports whether key is required to be quoted.
+func quoteKey(key string) bool {
+ return len(key) == 0 || strings.ContainsAny(key, "= \t\r\n\"`")
+}
+
+// quoteValue reports whether value is required to be quoted.
+func quoteValue(value string) bool {
+ return strings.ContainsAny(value, " \t\r\n\"`")
+}
+
+func (bi *BuildInfo) String() string {
+ buf := new(strings.Builder)
if bi.GoVersion != "" {
fmt.Fprintf(buf, "go\t%s\n", bi.GoVersion)
}
@@ -76,12 +86,8 @@ func (bi *BuildInfo) MarshalText() ([]byte, error) {
buf.WriteString(word)
buf.WriteByte('\t')
buf.WriteString(m.Path)
- mv := m.Version
- if mv == "" {
- mv = "(devel)"
- }
buf.WriteByte('\t')
- buf.WriteString(mv)
+ buf.WriteString(m.Version)
if m.Replace == nil {
buf.WriteByte('\t')
buf.WriteString(m.Sum)
@@ -91,27 +97,28 @@ func (bi *BuildInfo) MarshalText() ([]byte, error) {
}
buf.WriteByte('\n')
}
- if bi.Main.Path != "" {
+ if bi.Main != (Module{}) {
formatMod("mod", bi.Main)
}
for _, dep := range bi.Deps {
formatMod("dep", *dep)
}
for _, s := range bi.Settings {
- if strings.ContainsAny(s.Key, "= \t\n") {
- return nil, fmt.Errorf("invalid build setting key %q", s.Key)
+ key := s.Key
+ if quoteKey(key) {
+ key = strconv.Quote(key)
}
- if strings.Contains(s.Value, "\n") {
- return nil, fmt.Errorf("invalid build setting value for key %q: contains newline", s.Value)
+ value := s.Value
+ if quoteValue(value) {
+ value = strconv.Quote(value)
}
- fmt.Fprintf(buf, "build\t%s=%s\n", s.Key, s.Value)
+ fmt.Fprintf(buf, "build\t%s=%s\n", key, value)
}
- return buf.Bytes(), nil
+ return buf.String()
}
-func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
- *bi = BuildInfo{}
+func ParseBuildInfo(data string) (bi *BuildInfo, err error) {
lineNum := 1
defer func() {
if err != nil {
@@ -120,67 +127,69 @@ func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
}()
var (
- pathLine = []byte("path\t")
- modLine = []byte("mod\t")
- depLine = []byte("dep\t")
- repLine = []byte("=>\t")
- buildLine = []byte("build\t")
- newline = []byte("\n")
- tab = []byte("\t")
+ pathLine = "path\t"
+ modLine = "mod\t"
+ depLine = "dep\t"
+ repLine = "=>\t"
+ buildLine = "build\t"
+ newline = "\n"
+ tab = "\t"
)
- readModuleLine := func(elem [][]byte) (Module, error) {
+ readModuleLine := func(elem []string) (Module, error) {
if len(elem) != 2 && len(elem) != 3 {
return Module{}, fmt.Errorf("expected 2 or 3 columns; got %d", len(elem))
}
+ version := elem[1]
sum := ""
if len(elem) == 3 {
- sum = string(elem[2])
+ sum = elem[2]
}
return Module{
- Path: string(elem[0]),
- Version: string(elem[1]),
+ Path: elem[0],
+ Version: version,
Sum: sum,
}, nil
}
+ bi = new(BuildInfo)
var (
last *Module
- line []byte
+ line string
ok bool
)
// Reverse of BuildInfo.String(), except for go version.
for len(data) > 0 {
- line, data, ok = bytes.Cut(data, newline)
+ line, data, ok = strings.Cut(data, newline)
if !ok {
break
}
switch {
- case bytes.HasPrefix(line, pathLine):
+ case strings.HasPrefix(line, pathLine):
elem := line[len(pathLine):]
bi.Path = string(elem)
- case bytes.HasPrefix(line, modLine):
- elem := bytes.Split(line[len(modLine):], tab)
+ case strings.HasPrefix(line, modLine):
+ elem := strings.Split(line[len(modLine):], tab)
last = &bi.Main
*last, err = readModuleLine(elem)
if err != nil {
- return err
+ return nil, err
}
- case bytes.HasPrefix(line, depLine):
- elem := bytes.Split(line[len(depLine):], tab)
+ case strings.HasPrefix(line, depLine):
+ elem := strings.Split(line[len(depLine):], tab)
last = new(Module)
bi.Deps = append(bi.Deps, last)
*last, err = readModuleLine(elem)
if err != nil {
- return err
+ return nil, err
}
- case bytes.HasPrefix(line, repLine):
- elem := bytes.Split(line[len(repLine):], tab)
+ case strings.HasPrefix(line, repLine):
+ elem := strings.Split(line[len(repLine):], tab)
if len(elem) != 3 {
- return fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
+ return nil, fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
}
if last == nil {
- return fmt.Errorf("replacement with no module on previous line")
+ return nil, fmt.Errorf("replacement with no module on previous line")
}
last.Replace = &Module{
Path: string(elem[0]),
@@ -188,17 +197,63 @@ func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
Sum: string(elem[2]),
}
last = nil
- case bytes.HasPrefix(line, buildLine):
- key, val, ok := strings.Cut(string(line[len(buildLine):]), "=")
- if !ok {
- return fmt.Errorf("invalid build line")
+ case strings.HasPrefix(line, buildLine):
+ kv := line[len(buildLine):]
+ if len(kv) < 1 {
+ return nil, fmt.Errorf("build line missing '='")
+ }
+
+ var key, rawValue string
+ switch kv[0] {
+ case '=':
+ return nil, fmt.Errorf("build line with missing key")
+
+ case '`', '"':
+ rawKey, err := strconv.QuotedPrefix(kv)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted key in build line")
+ }
+ if len(kv) == len(rawKey) {
+ return nil, fmt.Errorf("build line missing '=' after quoted key")
+ }
+ if c := kv[len(rawKey)]; c != '=' {
+ return nil, fmt.Errorf("unexpected character after quoted key: %q", c)
+ }
+ key, _ = strconv.Unquote(rawKey)
+ rawValue = kv[len(rawKey)+1:]
+
+ default:
+ var ok bool
+ key, rawValue, ok = strings.Cut(kv, "=")
+ if !ok {
+ return nil, fmt.Errorf("build line missing '=' after key")
+ }
+ if quoteKey(key) {
+ return nil, fmt.Errorf("unquoted key %q must be quoted", key)
+ }
}
- if key == "" {
- return fmt.Errorf("empty key")
+
+ var value string
+ if len(rawValue) > 0 {
+ switch rawValue[0] {
+ case '`', '"':
+ var err error
+ value, err = strconv.Unquote(rawValue)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted value in build line")
+ }
+
+ default:
+ value = rawValue
+ if quoteValue(value) {
+ return nil, fmt.Errorf("unquoted value %q must be quoted", value)
+ }
+ }
}
- bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: val})
+
+ bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: value})
}
lineNum++
}
- return nil
+ return bi, nil
}
diff --git a/src/runtime/debug/mod_test.go b/src/runtime/debug/mod_test.go
new file mode 100644
index 0000000000..b2917692f4
--- /dev/null
+++ b/src/runtime/debug/mod_test.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+)
+
+// strip removes two leading tabs after each newline of s.
+func strip(s string) string {
+ replaced := strings.ReplaceAll(s, "\n\t\t", "\n")
+ if len(replaced) > 0 && replaced[0] == '\n' {
+ replaced = replaced[1:]
+ }
+ return replaced
+}
+
+func FuzzParseBuildInfoRoundTrip(f *testing.F) {
+ // Package built from outside a module, missing some fields..
+ f.Add(strip(`
+ path rsc.io/fortune
+ mod rsc.io/fortune v1.0.0
+ `))
+
+ // Package built from the standard library, missing some fields..
+ f.Add(`path cmd/test2json`)
+
+ // Package built from inside a module.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ mod example.com/m (devel)
+ build -compiler=gc
+ `))
+
+ // Package built in GOPATH mode.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build -compiler=gc
+ `))
+
+ // Escaped build info.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build CRAZY_ENV="requires\nescaping"
+ `))
+
+ f.Fuzz(func(t *testing.T, s string) {
+ bi, err := debug.ParseBuildInfo(s)
+ if err != nil {
+ // Not a round-trippable BuildInfo string.
+ t.Log(err)
+ return
+ }
+
+ // s2 could have different escaping from s.
+ // However, it should parse to exactly the same contents.
+ s2 := bi.String()
+ bi2, err := debug.ParseBuildInfo(s2)
+ if err != nil {
+ t.Fatalf("%v:\n%s", err, s2)
+ }
+
+ if !reflect.DeepEqual(bi2, bi) {
+ t.Fatalf("Parsed representation differs.\ninput:\n%s\noutput:\n%s", s, s2)
+ }
+ })
+}
diff --git a/src/runtime/defs1_netbsd_386.go b/src/runtime/defs1_netbsd_386.go
index a4548e6f06..b6e47a008d 100644
--- a/src/runtime/defs1_netbsd_386.go
+++ b/src/runtime/defs1_netbsd_386.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_netbsd_amd64.go b/src/runtime/defs1_netbsd_amd64.go
index 4b0e79ebb6..b8292fa3cc 100644
--- a/src/runtime/defs1_netbsd_amd64.go
+++ b/src/runtime/defs1_netbsd_amd64.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_netbsd_arm.go b/src/runtime/defs1_netbsd_arm.go
index 2b5d5990d3..d2cb4865b6 100644
--- a/src/runtime/defs1_netbsd_arm.go
+++ b/src/runtime/defs1_netbsd_arm.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_netbsd_arm64.go b/src/runtime/defs1_netbsd_arm64.go
index 740dc77658..7776fe1d99 100644
--- a/src/runtime/defs1_netbsd_arm64.go
+++ b/src/runtime/defs1_netbsd_arm64.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_solaris_amd64.go b/src/runtime/defs1_solaris_amd64.go
index 19e8a2512e..3c13f33331 100644
--- a/src/runtime/defs1_solaris_amd64.go
+++ b/src/runtime/defs1_solaris_amd64.go
@@ -13,7 +13,6 @@ const (
_ETIMEDOUT = 0x91
_EWOULDBLOCK = 0xb
_EINPROGRESS = 0x96
- _ENOSYS = 0x59
_PROT_NONE = 0x0
_PROT_READ = 0x1
diff --git a/src/runtime/defs_dragonfly.go b/src/runtime/defs_dragonfly.go
index 47a2e4d123..952163b555 100644
--- a/src/runtime/defs_dragonfly.go
+++ b/src/runtime/defs_dragonfly.go
@@ -31,7 +31,6 @@ const (
EFAULT = C.EFAULT
EBUSY = C.EBUSY
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_dragonfly_amd64.go b/src/runtime/defs_dragonfly_amd64.go
index f3c6ecd04b..4358c1e0c2 100644
--- a/src/runtime/defs_dragonfly_amd64.go
+++ b/src/runtime/defs_dragonfly_amd64.go
@@ -10,7 +10,6 @@ const (
_EFAULT = 0xe
_EBUSY = 0x10
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x20000
diff --git a/src/runtime/defs_freebsd.go b/src/runtime/defs_freebsd.go
index 9ba97c8459..3fbd580ac5 100644
--- a/src/runtime/defs_freebsd.go
+++ b/src/runtime/defs_freebsd.go
@@ -48,7 +48,6 @@ const (
EINTR = C.EINTR
EFAULT = C.EFAULT
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_freebsd_386.go b/src/runtime/defs_freebsd_386.go
index f822934d58..ff4dcfa5fe 100644
--- a/src/runtime/defs_freebsd_386.go
+++ b/src/runtime/defs_freebsd_386.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_freebsd_amd64.go b/src/runtime/defs_freebsd_amd64.go
index 0b696cf227..f537c898e4 100644
--- a/src/runtime/defs_freebsd_amd64.go
+++ b/src/runtime/defs_freebsd_amd64.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_freebsd_arm.go b/src/runtime/defs_freebsd_arm.go
index b6f3e790cf..2e20ae7d78 100644
--- a/src/runtime/defs_freebsd_arm.go
+++ b/src/runtime/defs_freebsd_arm.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_freebsd_arm64.go b/src/runtime/defs_freebsd_arm64.go
index 0759a1238f..1838108fdb 100644
--- a/src/runtime/defs_freebsd_arm64.go
+++ b/src/runtime/defs_freebsd_arm64.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_linux.go b/src/runtime/defs_linux.go
index fa94e388f4..e55bb6bbbc 100644
--- a/src/runtime/defs_linux.go
+++ b/src/runtime/defs_linux.go
@@ -37,7 +37,6 @@ const (
EINTR = C.EINTR
EAGAIN = C.EAGAIN
ENOMEM = C.ENOMEM
- ENOSYS = C.ENOSYS
PROT_NONE = C.PROT_NONE
PROT_READ = C.PROT_READ
@@ -91,6 +90,8 @@ const (
SIGPWR = C.SIGPWR
SIGSYS = C.SIGSYS
+ SIGRTMIN = C.SIGRTMIN
+
FPE_INTDIV = C.FPE_INTDIV
FPE_INTOVF = C.FPE_INTOVF
FPE_FLTDIV = C.FPE_FLTDIV
diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go
index 24fb58bbf8..5376bded2b 100644
--- a/src/runtime/defs_linux_386.go
+++ b/src/runtime/defs_linux_386.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_amd64.go b/src/runtime/defs_linux_amd64.go
index 36da22f8ce..da4d357532 100644
--- a/src/runtime/defs_linux_amd64.go
+++ b/src/runtime/defs_linux_amd64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go
index 13d06969e3..18aa0931e5 100644
--- a/src/runtime/defs_linux_arm.go
+++ b/src/runtime/defs_linux_arm.go
@@ -11,7 +11,6 @@ const (
_EINTR = 0x4
_ENOMEM = 0xc
_EAGAIN = 0xb
- _ENOSYS = 0x26
_PROT_NONE = 0
_PROT_READ = 0x1
@@ -63,6 +62,7 @@ const (
_SIGIO = 0x1d
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_arm64.go b/src/runtime/defs_linux_arm64.go
index f9ee9cbc35..c5d7d7e3fd 100644
--- a/src/runtime/defs_linux_arm64.go
+++ b/src/runtime/defs_linux_arm64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_mips64x.go b/src/runtime/defs_linux_mips64x.go
index 2601082ee1..e645248131 100644
--- a/src/runtime/defs_linux_mips64x.go
+++ b/src/runtime/defs_linux_mips64x.go
@@ -12,7 +12,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x59
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -66,6 +65,8 @@ const (
_SIGXCPU = 0x1e
_SIGXFSZ = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_mipsx.go b/src/runtime/defs_linux_mipsx.go
index 37651ef7e4..5afb6f423f 100644
--- a/src/runtime/defs_linux_mipsx.go
+++ b/src/runtime/defs_linux_mipsx.go
@@ -12,7 +12,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x59
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -66,6 +65,8 @@ const (
_SIGXCPU = 0x1e
_SIGXFSZ = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_ppc64.go b/src/runtime/defs_linux_ppc64.go
index c7aa7234c1..f3e305e34e 100644
--- a/src/runtime/defs_linux_ppc64.go
+++ b/src/runtime/defs_linux_ppc64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -63,6 +62,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_ppc64le.go b/src/runtime/defs_linux_ppc64le.go
index c7aa7234c1..f3e305e34e 100644
--- a/src/runtime/defs_linux_ppc64le.go
+++ b/src/runtime/defs_linux_ppc64le.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -63,6 +62,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_riscv64.go b/src/runtime/defs_linux_riscv64.go
index 747e26bc4b..29496acdcb 100644
--- a/src/runtime/defs_linux_riscv64.go
+++ b/src/runtime/defs_linux_riscv64.go
@@ -10,7 +10,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -65,6 +64,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_s390x.go b/src/runtime/defs_linux_s390x.go
index 740d8100c5..817a29ed30 100644
--- a/src/runtime/defs_linux_s390x.go
+++ b/src/runtime/defs_linux_s390x.go
@@ -10,7 +10,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_netbsd.go b/src/runtime/defs_netbsd.go
index df8bc579f2..6b084c06b5 100644
--- a/src/runtime/defs_netbsd.go
+++ b/src/runtime/defs_netbsd.go
@@ -33,7 +33,6 @@ const (
EINTR = C.EINTR
EFAULT = C.EFAULT
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_openbsd.go b/src/runtime/defs_openbsd.go
index ec7d82a33c..cbf53eb9ef 100644
--- a/src/runtime/defs_openbsd.go
+++ b/src/runtime/defs_openbsd.go
@@ -34,7 +34,6 @@ const (
EINTR = C.EINTR
EFAULT = C.EFAULT
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_openbsd_386.go b/src/runtime/defs_openbsd_386.go
index a866ec880a..35c559bb45 100644
--- a/src/runtime/defs_openbsd_386.go
+++ b/src/runtime/defs_openbsd_386.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_amd64.go b/src/runtime/defs_openbsd_amd64.go
index 46f1245201..d7432daedd 100644
--- a/src/runtime/defs_openbsd_amd64.go
+++ b/src/runtime/defs_openbsd_amd64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_arm.go b/src/runtime/defs_openbsd_arm.go
index 6f128c4284..471b3063fb 100644
--- a/src/runtime/defs_openbsd_arm.go
+++ b/src/runtime/defs_openbsd_arm.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_arm64.go b/src/runtime/defs_openbsd_arm64.go
index d2b947feb2..5300ab087c 100644
--- a/src/runtime/defs_openbsd_arm64.go
+++ b/src/runtime/defs_openbsd_arm64.go
@@ -10,7 +10,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_mips64.go b/src/runtime/defs_openbsd_mips64.go
index 28d70b7a01..a8789ef451 100644
--- a/src/runtime/defs_openbsd_mips64.go
+++ b/src/runtime/defs_openbsd_mips64.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_solaris.go b/src/runtime/defs_solaris.go
index ec16c9dcce..f626498525 100644
--- a/src/runtime/defs_solaris.go
+++ b/src/runtime/defs_solaris.go
@@ -43,7 +43,6 @@ const (
ETIMEDOUT = C.ETIMEDOUT
EWOULDBLOCK = C.EWOULDBLOCK
EINPROGRESS = C.EINPROGRESS
- ENOSYS = C.ENOSYS
PROT_NONE = C.PROT_NONE
PROT_READ = C.PROT_READ
diff --git a/src/runtime/export_aix_test.go b/src/runtime/export_aix_test.go
index 162552d04c..51df951738 100644
--- a/src/runtime/export_aix_test.go
+++ b/src/runtime/export_aix_test.go
@@ -5,3 +5,4 @@
package runtime
var Fcntl = syscall_fcntl1
+var SetNonblock = setNonblock
diff --git a/src/runtime/export_darwin_test.go b/src/runtime/export_darwin_test.go
index e9b6eb36da..66e2c02c4f 100644
--- a/src/runtime/export_darwin_test.go
+++ b/src/runtime/export_darwin_test.go
@@ -11,3 +11,5 @@ func Fcntl(fd, cmd, arg uintptr) (uintptr, uintptr) {
}
return uintptr(r), 0
}
+
+var SetNonblock = setNonblock
diff --git a/src/runtime/export_pipe2_test.go b/src/runtime/export_pipe2_test.go
index bdf39c60df..8d49009b43 100644
--- a/src/runtime/export_pipe2_test.go
+++ b/src/runtime/export_pipe2_test.go
@@ -7,9 +7,5 @@
package runtime
func Pipe() (r, w int32, errno int32) {
- r, w, errno = pipe2(0)
- if errno == _ENOSYS {
- return pipe()
- }
- return r, w, errno
+ return pipe2(0)
}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 0f21838721..0ac15ce82c 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -1199,6 +1199,8 @@ func (th *TimeHistogram) Record(duration int64) {
(*timeHistogram)(th).record(duration)
}
+var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
+
func SetIntArgRegs(a int) int {
lock(&finlock)
old := intArgRegs
@@ -1330,3 +1332,21 @@ func Releasem() {
}
var Timediv = timediv
+
+type PIController struct {
+ piController
+}
+
+func NewPIController(kp, ti, tt, min, max float64) *PIController {
+ return &PIController{piController{
+ kp: kp,
+ ti: ti,
+ tt: tt,
+ min: min,
+ max: max,
+ }}
+}
+
+func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
+ return c.piController.next(input, setpoint, period)
+}
diff --git a/src/runtime/export_unix_test.go b/src/runtime/export_unix_test.go
index 9f046b95e0..4a587cb780 100644
--- a/src/runtime/export_unix_test.go
+++ b/src/runtime/export_unix_test.go
@@ -9,7 +9,6 @@ package runtime
import "unsafe"
var NonblockingPipe = nonblockingPipe
-var SetNonblock = setNonblock
var Closeonexec = closeonexec
func sigismember(mask *sigset, i int) bool {
diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h
index a454dcaa69..2e2bb30446 100644
--- a/src/runtime/funcdata.h
+++ b/src/runtime/funcdata.h
@@ -20,6 +20,7 @@
#define FUNCDATA_OpenCodedDeferInfo 4 /* info for func with open-coded defers */
#define FUNCDATA_ArgInfo 5
#define FUNCDATA_ArgLiveInfo 6
+#define FUNCDATA_WrapInfo 7
// Pseudo-assembly statements.
diff --git a/src/runtime/histogram.go b/src/runtime/histogram.go
index 0cccbcca16..cd7e29a8c8 100644
--- a/src/runtime/histogram.go
+++ b/src/runtime/histogram.go
@@ -47,7 +47,7 @@ const (
// │ └---- Next 4 bits -> sub-bucket 1
// └------- Bit 5 set -> super-bucket 2
//
- // Following this pattern, bucket 45 will have the bit 48 set. We don't
+ // Following this pattern, super-bucket 44 will have the bit 47 set. We don't
// have any buckets for higher values, so the highest sub-bucket will
// contain values of 2^48-1 nanoseconds or approx. 3 days. This range is
// more than enough to handle durations produced by the runtime.
@@ -139,36 +139,30 @@ func float64NegInf() float64 {
func timeHistogramMetricsBuckets() []float64 {
b := make([]float64, timeHistTotalBuckets+1)
b[0] = float64NegInf()
- for i := 0; i < timeHistNumSuperBuckets; i++ {
- superBucketMin := uint64(0)
- // The (inclusive) minimum for the first non-negative bucket is 0.
- if i > 0 {
- // The minimum for the second bucket will be
- // 1 << timeHistSubBucketBits, indicating that all
- // sub-buckets are represented by the next timeHistSubBucketBits
- // bits.
- // Thereafter, we shift up by 1 each time, so we can represent
- // this pattern as (i-1)+timeHistSubBucketBits.
- superBucketMin = uint64(1) << uint(i-1+timeHistSubBucketBits)
- }
- // subBucketShift is the amount that we need to shift the sub-bucket
- // index to combine it with the bucketMin.
- subBucketShift := uint(0)
- if i > 1 {
- // The first two super buckets are exact with respect to integers,
- // so we'll never have to shift the sub-bucket index. Thereafter,
- // we shift up by 1 with each subsequent bucket.
- subBucketShift = uint(i - 2)
- }
+ // Super-bucket 0 has no bits above timeHistSubBucketBits
+ // set, so just iterate over each bucket and assign the
+ // incrementing bucket.
+ for i := 0; i < timeHistNumSubBuckets; i++ {
+ bucketNanos := uint64(i)
+ b[i+1] = float64(bucketNanos) / 1e9
+ }
+ // Generate the rest of the super-buckets. It's easier to reason
+ // about if we cut out the 0'th bucket, so subtract one since
+ // we just handled that bucket.
+ for i := 0; i < timeHistNumSuperBuckets-1; i++ {
for j := 0; j < timeHistNumSubBuckets; j++ {
- // j is the sub-bucket index. By shifting the index into position to
- // combine with the bucket minimum, we obtain the minimum value for that
- // sub-bucket.
- subBucketMin := superBucketMin + (uint64(j) << subBucketShift)
-
- // Convert the subBucketMin which is in nanoseconds to a float64 seconds value.
+ // Set the super-bucket bit.
+ bucketNanos := uint64(1) << (i + timeHistSubBucketBits)
+ // Set the sub-bucket bits.
+ bucketNanos |= uint64(j) << i
+ // The index for this bucket is going to be the (i+1)'th super bucket
+ // (note that we're starting from zero, but handled the first super-bucket
+ // earlier, so we need to compensate), and the j'th sub bucket.
+ // Add 1 because we left space for -Inf.
+ bucketIndex := (i+1)*timeHistNumSubBuckets + j + 1
+ // Convert nanoseconds to seconds via a division.
// These values will all be exactly representable by a float64.
- b[i*timeHistNumSubBuckets+j+1] = float64(subBucketMin) / 1e9
+ b[bucketIndex] = float64(bucketNanos) / 1e9
}
}
b[len(b)-1] = float64Inf()
diff --git a/src/runtime/histogram_test.go b/src/runtime/histogram_test.go
index dbc64fa559..b12b65a41e 100644
--- a/src/runtime/histogram_test.go
+++ b/src/runtime/histogram_test.go
@@ -68,3 +68,43 @@ func TestTimeHistogram(t *testing.T) {
dummyTimeHistogram = TimeHistogram{}
}
+
+func TestTimeHistogramMetricsBuckets(t *testing.T) {
+ buckets := TimeHistogramMetricsBuckets()
+
+ nonInfBucketsLen := TimeHistNumSubBuckets * TimeHistNumSuperBuckets
+ expBucketsLen := nonInfBucketsLen + 2 // Count -Inf and +Inf.
+ if len(buckets) != expBucketsLen {
+ t.Fatalf("unexpected length of buckets: got %d, want %d", len(buckets), expBucketsLen)
+ }
+ // Check the first non-Inf 2*TimeHistNumSubBuckets buckets in order, skipping the
+ // first bucket which should be -Inf (checked later).
+ //
+ // Because of the way this scheme works, the bottom TimeHistNumSubBuckets
+ // buckets are fully populated, and then the next TimeHistNumSubBuckets
+ // have the TimeHistSubBucketBits'th bit set, while the bottom are once
+ // again fully populated.
+ for i := 1; i <= 2*TimeHistNumSubBuckets+1; i++ {
+ if got, want := buckets[i], float64(i-1)/1e9; got != want {
+ t.Errorf("expected bucket %d to have value %e, got %e", i, want, got)
+ }
+ }
+ // Check some values.
+ idxToBucket := map[int]float64{
+ 0: math.Inf(-1),
+ 33: float64(0x10<<1) / 1e9,
+ 34: float64(0x11<<1) / 1e9,
+ 49: float64(0x10<<2) / 1e9,
+ 58: float64(0x19<<2) / 1e9,
+ 65: float64(0x10<<3) / 1e9,
+ 513: float64(0x10<<31) / 1e9,
+ 519: float64(0x16<<31) / 1e9,
+ expBucketsLen - 2: float64(0x1f<<43) / 1e9,
+ expBucketsLen - 1: math.Inf(1),
+ }
+ for idx, bucket := range idxToBucket {
+ if got, want := buckets[idx], bucket; got != want {
+ t.Errorf("expected bucket %d to have value %e, got %e", idx, want, got)
+ }
+ }
+}
diff --git a/src/runtime/internal/atomic/atomic_arm.s b/src/runtime/internal/atomic/atomic_arm.s
index be3fd3a395..92cbe8a34f 100644
--- a/src/runtime/internal/atomic/atomic_arm.s
+++ b/src/runtime/internal/atomic/atomic_arm.s
@@ -229,16 +229,22 @@ store64loop:
// functions tail-call into the appropriate implementation, which
// means they must not open a frame. Hence, when they go down the
// panic path, at that point they push the LR to create a real frame
-// (they don't need to pop it because panic won't return).
+// (they don't need to pop it because panic won't return; however, we
+// do need to set the SP delta back).
+
+// Check if R1 is 8-byte aligned, panic if not.
+// Clobbers R2.
+#define CHECK_ALIGN \
+ AND.S $7, R1, R2 \
+ BEQ 4(PC) \
+ MOVW.W R14, -4(R13) /* prepare a real frame */ \
+ BL ·panicUnaligned(SB) \
+ ADD $4, R13 /* compensate SP delta */
TEXT ·Cas64(SB),NOSPLIT,$-4-21
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -249,11 +255,7 @@ TEXT ·Cas64(SB),NOSPLIT,$-4-21
TEXT ·Xadd64(SB),NOSPLIT,$-4-20
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -264,11 +266,7 @@ TEXT ·Xadd64(SB),NOSPLIT,$-4-20
TEXT ·Xchg64(SB),NOSPLIT,$-4-20
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -279,11 +277,7 @@ TEXT ·Xchg64(SB),NOSPLIT,$-4-20
TEXT ·Load64(SB),NOSPLIT,$-4-12
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -294,11 +288,7 @@ TEXT ·Load64(SB),NOSPLIT,$-4-12
TEXT ·Store64(SB),NOSPLIT,$-4-12
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
diff --git a/src/runtime/internal/syscall/asm_linux_386.s b/src/runtime/internal/syscall/asm_linux_386.s
new file mode 100644
index 0000000000..15aae4d8bd
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_386.s
@@ -0,0 +1,34 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// See ../sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// Syscall # in AX, args in BX CX DX SI DI BP, return in AX
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ MOVL num+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL a4+16(FP), SI
+ MOVL a5+20(FP), DI
+ MOVL a6+24(FP), BP
+ INVOKE_SYSCALL
+ CMPL AX, $0xfffff001
+ JLS ok
+ MOVL $-1, r1+28(FP)
+ MOVL $0, r2+32(FP)
+ NEGL AX
+ MOVL AX, errno+36(FP)
+ RET
+ok:
+ MOVL AX, r1+28(FP)
+ MOVL DX, r2+32(FP)
+ MOVL $0, errno+36(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_amd64.s b/src/runtime/internal/syscall/asm_linux_amd64.s
new file mode 100644
index 0000000000..961d9bd640
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_amd64.s
@@ -0,0 +1,33 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// Syscall # in AX, args in DI SI DX R10 R8 R9, return in AX DX.
+//
+// Note that this differs from "standard" ABI convention, which would pass 4th
+// arg in CX, not R10.
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVQ num+0(FP), AX // syscall entry
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ a4+32(FP), R10
+ MOVQ a5+40(FP), R8
+ MOVQ a6+48(FP), R9
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS ok
+ MOVQ $-1, r1+56(FP)
+ MOVQ $0, r2+64(FP)
+ NEGQ AX
+ MOVQ AX, errno+72(FP)
+ RET
+ok:
+ MOVQ AX, r1+56(FP)
+ MOVQ DX, r2+64(FP)
+ MOVQ $0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_arm.s b/src/runtime/internal/syscall/asm_linux_arm.s
new file mode 100644
index 0000000000..dbf1826d94
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_arm.s
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ MOVW num+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW a4+16(FP), R3
+ MOVW a5+20(FP), R4
+ MOVW a6+24(FP), R5
+ SWI $0
+ MOVW $0xfffff001, R6
+ CMP R6, R0
+ BLS ok
+ MOVW $-1, R1
+ MOVW R1, r1+28(FP)
+ MOVW $0, R2
+ MOVW R2, r2+32(FP)
+ RSB $0, R0, R0
+ MOVW R0, errno+36(FP)
+ RET
+ok:
+ MOVW R0, r1+28(FP)
+ MOVW R1, r2+32(FP)
+ MOVW $0, R0
+ MOVW R0, errno+36(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_arm64.s b/src/runtime/internal/syscall/asm_linux_arm64.s
new file mode 100644
index 0000000000..83e862ff72
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVD num+0(FP), R8 // syscall entry
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD a4+32(FP), R3
+ MOVD a5+40(FP), R4
+ MOVD a6+48(FP), R5
+ SVC
+ CMN $4095, R0
+ BCC ok
+ MOVD $-1, R4
+ MOVD R4, r1+56(FP)
+ MOVD ZR, r2+64(FP)
+ NEG R0, R0
+ MOVD R0, errno+72(FP)
+ RET
+ok:
+ MOVD R0, r1+56(FP)
+ MOVD R1, r2+64(FP)
+ MOVD ZR, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_mips64x.s b/src/runtime/internal/syscall/asm_linux_mips64x.s
new file mode 100644
index 0000000000..0e88a2d8ac
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_mips64x.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips64 || mips64le)
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVV num+0(FP), R2 // syscall entry
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV a4+32(FP), R7
+ MOVV a5+40(FP), R8
+ MOVV a6+48(FP), R9
+ SYSCALL
+ BEQ R7, ok
+ MOVV $-1, R1
+ MOVV R1, r1+56(FP)
+ MOVV R0, r2+64(FP)
+ MOVV R2, errno+72(FP)
+ RET
+ok:
+ MOVV R2, r1+56(FP)
+ MOVV R3, r2+64(FP)
+ MOVV R0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_mipsx.s b/src/runtime/internal/syscall/asm_linux_mipsx.s
new file mode 100644
index 0000000000..050029eaa1
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_mipsx.s
@@ -0,0 +1,34 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips || mipsle)
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// The 5th and 6th arg go at sp+16, sp+20.
+// Note that frame size of 20 means that 24 bytes gets reserved on stack.
+TEXT ·Syscall6(SB),NOSPLIT,$20-40
+ MOVW num+0(FP), R2 // syscall entry
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW a4+16(FP), R7
+ MOVW a5+20(FP), R8
+ MOVW a6+24(FP), R9
+ MOVW R8, 16(R29)
+ MOVW R9, 20(R29)
+ SYSCALL
+ BEQ R7, ok
+ MOVW $-1, R1
+ MOVW R1, r1+28(FP)
+ MOVW R0, r2+32(FP)
+ MOVW R2, errno+36(FP)
+ RET
+ok:
+ MOVW R2, r1+28(FP)
+ MOVW R3, r2+32(FP)
+ MOVW R0, errno+36(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_ppc64x.s b/src/runtime/internal/syscall/asm_linux_ppc64x.s
new file mode 100644
index 0000000000..8cf8737df8
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_ppc64x.s
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (ppc64 || ppc64le)
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVD num+0(FP), R9 // syscall entry
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD a4+32(FP), R6
+ MOVD a5+40(FP), R7
+ MOVD a6+48(FP), R8
+ SYSCALL R9
+ MOVD R0, r2+64(FP) // r2 is not used. Always set to 0.
+ BVC ok
+ MOVD $-1, R4
+ MOVD R4, r1+56(FP)
+ MOVD R3, errno+72(FP)
+ RET
+ok:
+ MOVD R3, r1+56(FP)
+ MOVD R0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_riscv64.s b/src/runtime/internal/syscall/asm_linux_riscv64.s
new file mode 100644
index 0000000000..a8652fdd6b
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_riscv64.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOV num+0(FP), A7 // syscall entry
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV a4+32(FP), A3
+ MOV a5+40(FP), A4
+ MOV a6+48(FP), A5
+ ECALL
+ MOV $-4096, T0
+ BLTU T0, A0, err
+ MOV A0, r1+56(FP)
+ MOV A1, r2+64(FP)
+ MOV ZERO, errno+72(FP)
+ RET
+err:
+ MOV $-1, T0
+ MOV T0, r1+56(FP)
+ MOV ZERO, r2+64(FP)
+ SUB A0, ZERO, A0
+ MOV A0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_s390x.s b/src/runtime/internal/syscall/asm_linux_s390x.s
new file mode 100644
index 0000000000..1b27f29390
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_s390x.s
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVD num+0(FP), R1 // syscall entry
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD a4+32(FP), R5
+ MOVD a5+40(FP), R6
+ MOVD a6+48(FP), R7
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, ok
+ MOVD $-1, r1+56(FP)
+ MOVD $0, r2+64(FP)
+ NEG R2, R2
+ MOVD R2, errno+72(FP)
+ RET
+ok:
+ MOVD R2, r1+56(FP)
+ MOVD R3, r2+64(FP)
+ MOVD $0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/syscall_linux.go b/src/runtime/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000000..06d5f21e7c
--- /dev/null
+++ b/src/runtime/internal/syscall/syscall_linux.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syscall provides the syscall primitives required for the runtime.
+package syscall
+
+// TODO(https://go.dev/issue/51087): This package is incomplete and currently
+// only contains very minimal support for Linux.
+
+// Syscall6 calls system call number 'num' with arguments a1-6.
+func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
diff --git a/src/runtime/memmove_ppc64x.s b/src/runtime/memmove_ppc64x.s
index e69e71a4a1..2152fb4f69 100644
--- a/src/runtime/memmove_ppc64x.s
+++ b/src/runtime/memmove_ppc64x.s
@@ -139,36 +139,38 @@ backwardtailloop:
BC 16, 0, backwardtailloop // bndz
nobackwardtail:
- BC 4, 5, LR // ble CR1 lr
+ BC 4, 5, LR // blelr cr1, return if DWORDS == 0
+ SRDCC $2,DWORDS,QWORDS // Compute number of 32B blocks and compare to 0
+ BNE backward32setup // If QWORDS != 0, start the 32B copy loop.
-backwardlarge:
- MOVD DWORDS, CTR
- SUB TGT, SRC, TMP // Use vsx if moving
- CMP TMP, $32 // at least 32 byte chunks
- BLT backwardlargeloop // and distance >= 32
- SRDCC $2,DWORDS,QWORDS // 32 byte chunks
- BNE backward32setup
+backward24:
+ // DWORDS is a value between 1-3.
+ CMP DWORDS, $2
-backwardlargeloop:
MOVD -8(SRC), TMP
- SUB $8,SRC
MOVD TMP, -8(TGT)
- SUB $8,TGT
- BC 16, 0, backwardlargeloop // bndz
+ BC 12, 0, LR // bltlr, return if DWORDS == 1
+
+ MOVD -16(SRC), TMP
+ MOVD TMP, -16(TGT)
+ BC 12, 2, LR // beqlr, return if DWORDS == 2
+
+ MOVD -24(SRC), TMP
+ MOVD TMP, -24(TGT)
RET
backward32setup:
- MOVD QWORDS, CTR // set up loop ctr
- MOVD $16, IDX16 // 32 bytes at a time
+ ANDCC $3,DWORDS // Compute remaining DWORDS and compare to 0
+ MOVD QWORDS, CTR // set up loop ctr
+ MOVD $16, IDX16 // 32 bytes at a time
backward32loop:
SUB $32, TGT
SUB $32, SRC
- LXVD2X (R0)(TGT), VS32 // load 16 bytes
- LXVD2X (IDX16)(TGT), VS33
- STXVD2X VS32, (R0)(SRC) // store 16 bytes
- STXVD2X VS33, (IDX16)(SRC)
- BC 16, 0, backward32loop // bndz
- BC 4, 5, LR // ble CR1 lr
- MOVD DWORDS, CTR
- BR backwardlargeloop
+ LXVD2X (R0)(SRC), VS32 // load 16x2 bytes
+ LXVD2X (IDX16)(SRC), VS33
+ STXVD2X VS32, (R0)(TGT) // store 16x2 bytes
+ STXVD2X VS33, (IDX16)(TGT)
+ BC 16, 0, backward32loop // bndz
+ BC 12, 2, LR // beqlr, return if DWORDS == 0
+ BR backward24
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index e2ac5d4993..10623e4d67 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -187,21 +187,15 @@ func runfinq() {
f := &fb.fin[i-1]
var regs abi.RegArgs
- var framesz uintptr
- if argRegs > 0 {
- // The args can always be passed in registers if they're
- // available, because platforms we support always have no
- // argument registers available, or more than 2.
- //
- // But unfortunately because we can have an arbitrary
- // amount of returns and it would be complex to try and
- // figure out how many of those can get passed in registers,
- // just conservatively assume none of them do.
- framesz = f.nret
- } else {
- // Need to pass arguments on the stack too.
- framesz = unsafe.Sizeof((any)(nil)) + f.nret
- }
+ // The args may be passed in registers or on stack. Even for
+ // the register case, we still need the spill slots.
+ // TODO: revisit if we remove spill slots.
+ //
+ // Unfortunately because we can have an arbitrary
+ // amount of returns and it would be complex to try and
+ // figure out how many of those can get passed in registers,
+ // just conservatively assume none of them do.
+ framesz := unsafe.Sizeof((any)(nil)) + f.nret
if framecap < framesz {
// The frame does not contain pointers interesting for GC,
// all not yet finalized objects are stored in finq.
diff --git a/src/runtime/mfinal_test.go b/src/runtime/mfinal_test.go
index 04ba7a6830..902ccc57f8 100644
--- a/src/runtime/mfinal_test.go
+++ b/src/runtime/mfinal_test.go
@@ -42,6 +42,15 @@ func TestFinalizerType(t *testing.T) {
{func(x *int) any { return Tintptr(x) }, func(v *int) { finalize(v) }},
{func(x *int) any { return (*Tint)(x) }, func(v *Tint) { finalize((*int)(v)) }},
{func(x *int) any { return (*Tint)(x) }, func(v Tinter) { finalize((*int)(v.(*Tint))) }},
+ // Test case for argument spill slot.
+ // If the spill slot was not counted for the frame size, it will (incorrectly) choose
+ // call32 as the result has (exactly) 32 bytes. When the argument actually spills,
+ // it clobbers the caller's frame (likely the return PC).
+ {func(x *int) any { return x }, func(v any) [4]int64 {
+ print() // force spill
+ finalize(v.(*int))
+ return [4]int64{}
+ }},
}
for i, tt := range finalizerTests {
diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go
index f06560201a..d54dbc26c2 100644
--- a/src/runtime/mgcpacer.go
+++ b/src/runtime/mgcpacer.go
@@ -154,6 +154,8 @@ type gcControllerState struct {
// For goexperiment.PacerRedesign.
consMarkController piController
+ _ uint32 // Padding for atomics on 32-bit platforms.
+
// heapGoal is the goal heapLive for when next GC ends.
// Set to ^uint64(0) if disabled.
//
@@ -670,10 +672,31 @@ func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) floa
currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
(float64(scanWork) * (1 - utilization))
- // Update cons/mark controller.
- // Period for this is 1 GC cycle.
+ // Update cons/mark controller. The time period for this is 1 GC cycle.
+ //
+ // This use of a PI controller might seem strange. So, here's an explanation:
+ //
+ // currentConsMark represents the consMark we *should've* had to be perfectly
+ // on-target for this cycle. Given that we assume the next GC will be like this
+ // one in the steady-state, it stands to reason that we should just pick that
+ // as our next consMark. In practice, however, currentConsMark is too noisy:
+ // we're going to be wildly off-target in each GC cycle if we do that.
+ //
+ // What we do instead is make a long-term assumption: there is some steady-state
+ // consMark value, but it's obscured by noise. By constantly shooting for this
+ // noisy-but-perfect consMark value, the controller will bounce around a bit,
+ // but its average behavior, in aggregate, should be less noisy and closer to
+ // the true long-term consMark value, provided its tuned to be slightly overdamped.
+ var ok bool
oldConsMark := c.consMark
- c.consMark = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ if !ok {
+ // The error spiraled out of control. This is incredibly unlikely seeing
+ // as this controller is essentially just a smoothing function, but it might
+ // mean that something went very wrong with how currentConsMark was calculated.
+ // Just reset consMark and keep going.
+ c.consMark = 0
+ }
if debug.gcpacertrace > 0 {
printlock()
@@ -681,6 +704,9 @@ func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) floa
print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
+ if !ok {
+ print("[controller reset]")
+ }
println()
printunlock()
}
@@ -1263,15 +1289,38 @@ type piController struct {
// PI controller state.
errIntegral float64 // Integral of the error from t=0 to now.
+
+ // Error flags.
+ errOverflow bool // Set if errIntegral ever overflowed.
+ inputOverflow bool // Set if an operation with the input overflowed.
}
-func (c *piController) next(input, setpoint, period float64) float64 {
+// next provides a new sample to the controller.
+//
+// input is the sample, setpoint is the desired point, and period is how much
+// time (in whatever unit makes the most sense) has passed since the last sample.
+//
+// Returns a new value for the variable it's controlling, and whether the operation
+// completed successfully. One reason this might fail is if error has been growing
+// in an unbounded manner, to the point of overflow.
+//
+// In the specific case of an error overflow occurs, the errOverflow field will be
+// set and the rest of the controller's internal state will be fully reset.
+func (c *piController) next(input, setpoint, period float64) (float64, bool) {
// Compute the raw output value.
prop := c.kp * (setpoint - input)
rawOutput := prop + c.errIntegral
// Clamp rawOutput into output.
output := rawOutput
+ if isInf(output) || isNaN(output) {
+ // The input had a large enough magnitude that either it was already
+ // overflowed, or some operation with it overflowed.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.inputOverflow = true
+ return c.min, false
+ }
if output < c.min {
output = c.min
} else if output > c.max {
@@ -1281,6 +1330,19 @@ func (c *piController) next(input, setpoint, period float64) float64 {
// Update the controller's state.
if c.ti != 0 && c.tt != 0 {
c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
+ if isInf(c.errIntegral) || isNaN(c.errIntegral) {
+ // So much error has accumulated that we managed to overflow.
+ // The assumptions around the controller have likely broken down.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.errOverflow = true
+ return c.min, false
+ }
}
- return output
+ return output, true
+}
+
+// reset resets the controller state, except for controller error flags.
+func (c *piController) reset() {
+ c.errIntegral = 0
}
diff --git a/src/runtime/mgcpacer_test.go b/src/runtime/mgcpacer_test.go
index 9ec0e5172b..10a8ca2520 100644
--- a/src/runtime/mgcpacer_test.go
+++ b/src/runtime/mgcpacer_test.go
@@ -715,3 +715,48 @@ func (f float64Stream) limit(min, max float64) float64Stream {
return v
}
}
+
+func FuzzPIController(f *testing.F) {
+ isNormal := func(x float64) bool {
+ return !math.IsInf(x, 0) && !math.IsNaN(x)
+ }
+ isPositive := func(x float64) bool {
+ return isNormal(x) && x > 0
+ }
+ // Seed with constants from controllers in the runtime.
+ // It's not critical that we keep these in sync, they're just
+ // reasonable seed inputs.
+ f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01)
+ f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84)
+ f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) {
+ // Ignore uninteresting invalid parameters. These parameters
+ // are constant, so in practice surprising values will be documented
+ // or will be other otherwise immediately visible.
+ //
+ // We just want to make sure that given a non-Inf, non-NaN input,
+ // we always get a non-Inf, non-NaN output.
+ if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) {
+ return
+ }
+ if !isNormal(min) || !isNormal(max) || min > max {
+ return
+ }
+ // Use a random source, but make it deterministic.
+ rs := rand.New(rand.NewSource(800))
+ randFloat64 := func() float64 {
+ return math.Float64frombits(rs.Uint64())
+ }
+ p := NewPIController(kp, ti, tt, min, max)
+ state := float64(0)
+ for i := 0; i < 100; i++ {
+ input := randFloat64()
+ // Ignore the "ok" parameter. We're just trying to break it.
+ // state is intentionally completely uncorrelated with the input.
+ var ok bool
+ state, ok = p.Next(input, setPoint, 1.0)
+ if !isNormal(state) {
+ t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok)
+ }
+ }
+ })
+}
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index c27e189af9..5f50378adf 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -165,11 +165,12 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
// Sleep/wait state of the background scavenger.
var scavenge struct {
- lock mutex
- g *g
- parked bool
- timer *timer
- sysmonWake uint32 // Set atomically.
+ lock mutex
+ g *g
+ parked bool
+ timer *timer
+ sysmonWake uint32 // Set atomically.
+ printControllerReset bool // Whether the scavenger is in cooldown.
}
// readyForScavenger signals sysmon to wake the scavenger because
@@ -295,8 +296,14 @@ func bgscavenge(c chan int) {
max: 1000.0, // 1000:1
}
// It doesn't really matter what value we start at, but we can't be zero, because
- // that'll cause divide-by-zero issues.
- critSleepRatio := 0.001
+ // that'll cause divide-by-zero issues. Pick something conservative which we'll
+ // also use as a fallback.
+ const startingCritSleepRatio = 0.001
+ critSleepRatio := startingCritSleepRatio
+ // Duration left in nanoseconds during which we avoid using the controller and
+ // we hold critSleepRatio at a conservative value. Used if the controller's
+ // assumptions fail to hold.
+ controllerCooldown := int64(0)
for {
released := uintptr(0)
crit := float64(0)
@@ -383,9 +390,22 @@ func bgscavenge(c chan int) {
// because of the additional overheads of using scavenged memory.
crit *= 1 + scavengeCostRatio
- // Go to sleep for our current sleepNS.
+ // Go to sleep based on how much time we spent doing work.
slept := scavengeSleep(int64(crit / critSleepRatio))
+ // Stop here if we're cooling down from the controller.
+ if controllerCooldown > 0 {
+ // crit and slept aren't exact measures of time, but it's OK to be a bit
+ // sloppy here. We're just hoping we're avoiding some transient bad behavior.
+ t := slept + int64(crit)
+ if t > controllerCooldown {
+ controllerCooldown = 0
+ } else {
+ controllerCooldown -= t
+ }
+ continue
+ }
+
// Calculate the CPU time spent.
//
// This may be slightly inaccurate with respect to GOMAXPROCS, but we're
@@ -395,7 +415,20 @@ func bgscavenge(c chan int) {
cpuFraction := float64(crit) / ((float64(slept) + crit) * float64(gomaxprocs))
// Update the critSleepRatio, adjusting until we reach our ideal fraction.
- critSleepRatio = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
+ var ok bool
+ critSleepRatio, ok = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
+ if !ok {
+ // The core assumption of the controller, that we can get a proportional
+ // response, broke down. This may be transient, so temporarily switch to
+ // sleeping a fixed, conservative amount.
+ critSleepRatio = startingCritSleepRatio
+ controllerCooldown = 5e9 // 5 seconds.
+
+ // Signal the scav trace printer to output this.
+ lock(&scavenge.lock)
+ scavenge.printControllerReset = true
+ unlock(&scavenge.lock)
+ }
}
}
@@ -434,7 +467,11 @@ func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
// released should be the amount of memory released since the last time this
// was called, and forced indicates whether the scavenge was forced by the
// application.
+//
+// scavenge.lock must be held.
func printScavTrace(gen uint32, released uintptr, forced bool) {
+ assertLockHeld(&scavenge.lock)
+
printlock()
print("scav ", gen, " ",
released>>10, " KiB work, ",
@@ -443,6 +480,9 @@ func printScavTrace(gen uint32, released uintptr, forced bool) {
)
if forced {
print(" (forced)")
+ } else if scavenge.printControllerReset {
+ print(" [controller reset]")
+ scavenge.printControllerReset = false
}
println()
printunlock()
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 17c9b75d69..37a8cf8a5d 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -122,7 +122,7 @@ func header(arch string) {
fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
if beLe[arch] {
base := arch[:len(arch)-1]
- fmt.Fprintf(out, "//go:build %s || %sle\n", base, base)
+ fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base)
}
fmt.Fprintf(out, "#include \"go_asm.h\"\n")
fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
@@ -147,8 +147,9 @@ type layout struct {
type regPos struct {
pos int
- op string
- reg string
+ saveOp string
+ restoreOp string
+ reg string
// If this register requires special save and restore, these
// give those operations with a %d placeholder for the stack
@@ -157,7 +158,12 @@ type regPos struct {
}
func (l *layout) add(op, reg string, size int) {
- l.regs = append(l.regs, regPos{op: op, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack})
+ l.stack += size
+}
+
+func (l *layout) add2(sop, rop, reg string, size int) {
+ l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack})
l.stack += size
}
@@ -171,7 +177,7 @@ func (l *layout) save() {
if reg.save != "" {
p(reg.save, reg.pos)
} else {
- p("%s %s, %d(%s)", reg.op, reg.reg, reg.pos, l.sp)
+ p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp)
}
}
}
@@ -182,7 +188,7 @@ func (l *layout) restore() {
if reg.restore != "" {
p(reg.restore, reg.pos)
} else {
- p("%s %d(%s), %s", reg.op, reg.pos, l.sp, reg.reg)
+ p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg)
}
}
}
@@ -324,12 +330,13 @@ func genARM64() {
// R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special
// and not saved here.
var l = layout{sp: "RSP", stack: 8} // add slot to save PC of interrupted instruction
- for i := 0; i <= 26; i++ {
+ for i := 0; i < 26; i += 2 {
if i == 18 {
+ i--
continue // R18 is not used, skip
}
- reg := fmt.Sprintf("R%d", i)
- l.add("MOVD", reg, 8)
+ reg := fmt.Sprintf("(R%d, R%d)", i, i+1)
+ l.add2("STP", "LDP", reg, 16)
}
// Add flag registers.
l.addSpecial(
@@ -342,9 +349,9 @@ func genARM64() {
8)
// TODO: FPCR? I don't think we'll change it, so no need to save.
// Add floating point registers F0-F31.
- for i := 0; i <= 31; i++ {
- reg := fmt.Sprintf("F%d", i)
- l.add("FMOVD", reg, 8)
+ for i := 0; i < 31; i += 2 {
+ reg := fmt.Sprintf("(F%d, F%d)", i, i+1)
+ l.add2("FSTPD", "FLDPD", reg, 16)
}
if l.stack%16 != 0 {
l.stack += 8 // SP needs 16-byte alignment
@@ -353,10 +360,8 @@ func genARM64() {
// allocate frame, save PC of interrupted instruction (in LR)
p("MOVD R30, %d(RSP)", -l.stack)
p("SUB $%d, RSP", l.stack)
- p("#ifdef GOOS_linux")
p("MOVD R29, -8(RSP)") // save frame pointer (only used on Linux)
p("SUB $8, RSP, R29") // set up new frame pointer
- p("#endif")
// On iOS, save the LR again after decrementing SP. We run the
// signal handler on the G stack (as it doesn't support sigaltstack),
// so any writes below SP may be clobbered.
@@ -369,11 +374,9 @@ func genARM64() {
l.restore()
p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
- p("#ifdef GOOS_linux")
- p("MOVD -8(RSP), R29") // restore frame pointer
- p("#endif")
- p("MOVD (RSP), R27") // load PC to REGTMP
- p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall)
+ p("MOVD -8(RSP), R29") // restore frame pointer
+ p("MOVD (RSP), R27") // load PC to REGTMP
+ p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall)
p("JMP (R27)")
}
diff --git a/src/runtime/nbpipe_pipe2.go b/src/runtime/nbpipe_pipe2.go
index 6a555bcd99..22d60b4a63 100644
--- a/src/runtime/nbpipe_pipe2.go
+++ b/src/runtime/nbpipe_pipe2.go
@@ -7,16 +7,5 @@
package runtime
func nonblockingPipe() (r, w int32, errno int32) {
- r, w, errno = pipe2(_O_NONBLOCK | _O_CLOEXEC)
- if errno == -_ENOSYS {
- r, w, errno = pipe()
- if errno != 0 {
- return -1, -1, errno
- }
- closeonexec(r)
- setNonblock(r)
- closeonexec(w)
- setNonblock(w)
- }
- return r, w, errno
+ return pipe2(_O_NONBLOCK | _O_CLOEXEC)
}
diff --git a/src/runtime/nbpipe_pipe_test.go b/src/runtime/nbpipe_pipe_test.go
new file mode 100644
index 0000000000..c8cb3cf691
--- /dev/null
+++ b/src/runtime/nbpipe_pipe_test.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin
+
+package runtime_test
+
+import (
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+func TestSetNonblock(t *testing.T) {
+ t.Parallel()
+
+ r, w, errno := runtime.Pipe()
+ if errno != 0 {
+ t.Fatal(syscall.Errno(errno))
+ }
+ defer func() {
+ runtime.Close(r)
+ runtime.Close(w)
+ }()
+
+ checkIsPipe(t, r, w)
+
+ runtime.SetNonblock(r)
+ runtime.SetNonblock(w)
+ checkNonblocking(t, r, "reader")
+ checkNonblocking(t, w, "writer")
+
+ runtime.Closeonexec(r)
+ runtime.Closeonexec(w)
+ checkCloseonexec(t, r, "reader")
+ checkCloseonexec(t, w, "writer")
+}
diff --git a/src/runtime/nbpipe_test.go b/src/runtime/nbpipe_test.go
index 36342cfde8..b6869e7974 100644
--- a/src/runtime/nbpipe_test.go
+++ b/src/runtime/nbpipe_test.go
@@ -66,28 +66,3 @@ func checkCloseonexec(t *testing.T, fd int32, name string) {
t.Errorf("FD_CLOEXEC not set in %s flags %#x", name, flags)
}
}
-
-func TestSetNonblock(t *testing.T) {
- t.Parallel()
-
- r, w, errno := runtime.Pipe()
- if errno != 0 {
- t.Fatal(syscall.Errno(errno))
- }
- defer func() {
- runtime.Close(r)
- runtime.Close(w)
- }()
-
- checkIsPipe(t, r, w)
-
- runtime.SetNonblock(r)
- runtime.SetNonblock(w)
- checkNonblocking(t, r, "reader")
- checkNonblocking(t, w, "writer")
-
- runtime.Closeonexec(r)
- runtime.Closeonexec(w)
- checkCloseonexec(t, r, "reader")
- checkCloseonexec(t, w, "writer")
-}
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 2e946656d0..5aee04d5a8 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -562,13 +562,6 @@ func write1(fd uintptr, buf unsafe.Pointer, nbyte int32) int32 {
}
//go:nosplit
-func pipe() (r, w int32, errno int32) {
- var p [2]int32
- _, e := sysvicall1Err(&libc_pipe, uintptr(noescape(unsafe.Pointer(&p))))
- return p[0], p[1], int32(e)
-}
-
-//go:nosplit
func pipe2(flags int32) (r, w int32, errno int32) {
var p [2]int32
_, e := sysvicall2Err(&libc_pipe2, uintptr(noescape(unsafe.Pointer(&p))), uintptr(flags))
@@ -580,12 +573,6 @@ func closeonexec(fd int32) {
fcntl(fd, _F_SETFD, _FD_CLOEXEC)
}
-//go:nosplit
-func setNonblock(fd int32) {
- flags := fcntl(fd, _F_GETFL, 0)
- fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
-}
-
func osyield1()
//go:nosplit
@@ -634,3 +621,12 @@ func sysauxv(auxv []uintptr) {
}
}
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index aeff593d50..292ff94795 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -373,3 +373,12 @@ func setNonblock(fd int32) {
flags := fcntl(fd, _F_GETFL, 0)
fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 0f0eb6c6fd..9065b76375 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -459,3 +459,12 @@ func sysargs(argc int32, argv **byte) {
func signalM(mp *m, sig int) {
pthread_kill(pthread(mp.procid), uint32(sig))
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index cba2e42ab0..a56706b415 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -62,10 +62,8 @@ func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
-func setNonblock(fd int32)
// From DragonFly's <sys/sysctl.h>
const (
@@ -324,3 +322,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
lwp_kill(-1, int32(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index c63b0e3d69..e4d15474d8 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -47,10 +47,8 @@ func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
-func setNonblock(fd int32)
// From FreeBSD's <sys/sysctl.h>
const (
@@ -460,3 +458,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
thr_kill(thread(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 32a1e1b4f7..efb54ff20e 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -8,9 +8,15 @@ import (
"internal/abi"
"internal/goarch"
"runtime/internal/atomic"
+ "runtime/internal/syscall"
"unsafe"
)
+// sigPerThreadSyscall is the same signal (SIGSETXID) used by glibc for
+// per-thread syscalls on Linux. We use it for the same purpose in non-cgo
+// binaries.
+const sigPerThreadSyscall = _SIGRTMIN + 1
+
type mOS struct {
// profileTimer holds the ID of the POSIX interval timer for profiling CPU
// usage on this thread.
@@ -21,6 +27,10 @@ type mOS struct {
// are in signal handling code, access to that field uses atomic operations.
profileTimer int32
profileTimerValid uint32
+
+ // needPerThreadSyscall indicates that a per-thread syscall is required
+ // for doAllThreadsSyscall.
+ needPerThreadSyscall atomic.Uint8
}
//go:noescape
@@ -436,9 +446,7 @@ func osyield_no_g() {
osyield()
}
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
-func setNonblock(fd int32)
const (
_si_max_size = 128
@@ -664,3 +672,205 @@ func setThreadCPUProfiler(hz int32) {
mp.profileTimer = timerid
atomic.Store(&mp.profileTimerValid, 1)
}
+
+// perThreadSyscallArgs contains the system call number, arguments, and
+// expected return values for a system call to be executed on all threads.
+type perThreadSyscallArgs struct {
+ trap uintptr
+ a1 uintptr
+ a2 uintptr
+ a3 uintptr
+ a4 uintptr
+ a5 uintptr
+ a6 uintptr
+ r1 uintptr
+ r2 uintptr
+}
+
+// perThreadSyscall is the system call to execute for the ongoing
+// doAllThreadsSyscall.
+//
+// perThreadSyscall may only be written while mp.needPerThreadSyscall == 0 on
+// all Ms.
+var perThreadSyscall perThreadSyscallArgs
+
+// syscall_runtime_doAllThreadsSyscall and executes a specified system call on
+// all Ms.
+//
+// The system call is expected to succeed and return the same value on every
+// thread. If any threads do not match, the runtime throws.
+//
+//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
+//go:uintptrescapes
+func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ if iscgo {
+ // In cgo, we are not aware of threads created in C, so this approach will not work.
+ panic("doAllThreadsSyscall not supported with cgo enabled")
+ }
+
+ // STW to guarantee that user goroutines see an atomic change to thread
+ // state. Without STW, goroutines could migrate Ms while change is in
+ // progress and e.g., see state old -> new -> old -> new.
+ //
+ // N.B. Internally, this function does not depend on STW to
+ // successfully change every thread. It is only needed for user
+ // expectations, per above.
+ stopTheWorld("doAllThreadsSyscall")
+
+ // This function depends on several properties:
+ //
+ // 1. All OS threads that already exist are associated with an M in
+ // allm. i.e., we won't miss any pre-existing threads.
+ // 2. All Ms listed in allm will eventually have an OS thread exist.
+ // i.e., they will set procid and be able to receive signals.
+ // 3. OS threads created after we read allm will clone from a thread
+ // that has executed the system call. i.e., they inherit the
+ // modified state.
+ //
+ // We achieve these through different mechanisms:
+ //
+ // 1. Addition of new Ms to allm in allocm happens before clone of its
+ // OS thread later in newm.
+ // 2. newm does acquirem to avoid being preempted, ensuring that new Ms
+ // created in allocm will eventually reach OS thread clone later in
+ // newm.
+ // 3. We take allocmLock for write here to prevent allocation of new Ms
+ // while this function runs. Per (1), this prevents clone of OS
+ // threads that are not yet in allm.
+ allocmLock.lock()
+
+ // Disable preemption, preventing us from changing Ms, as we handle
+ // this M specially.
+ //
+ // N.B. STW and lock() above do this as well, this is added for extra
+ // clarity.
+ acquirem()
+
+ // N.B. allocmLock also prevents concurrent execution of this function,
+ // serializing use of perThreadSyscall, mp.needPerThreadSyscall, and
+ // ensuring all threads execute system calls from multiple calls in the
+ // same order.
+
+ r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
+ if GOARCH == "ppc64" || GOARCH == "ppc64le" {
+ // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
+ r2 = 0
+ }
+ if errno != 0 {
+ releasem(getg().m)
+ allocmLock.unlock()
+ startTheWorld()
+ return r1, r2, errno
+ }
+
+ perThreadSyscall = perThreadSyscallArgs{
+ trap: trap,
+ a1: a1,
+ a2: a2,
+ a3: a3,
+ a4: a4,
+ a5: a5,
+ a6: a6,
+ r1: r1,
+ r2: r2,
+ }
+
+ // Wait for all threads to start.
+ //
+ // As described above, some Ms have been added to allm prior to
+ // allocmLock, but not yet completed OS clone and set procid.
+ //
+ // At minimum we must wait for a thread to set procid before we can
+ // send it a signal.
+ //
+ // We take this one step further and wait for all threads to start
+ // before sending any signals. This prevents system calls from getting
+ // applied twice: once in the parent and once in the child, like so:
+ //
+ // A B C
+ // add C to allm
+ // doAllThreadsSyscall
+ // allocmLock.lock()
+ // signal B
+ // <receive signal>
+ // execute syscall
+ // <signal return>
+ // clone C
+ // <thread start>
+ // set procid
+ // signal C
+ // <receive signal>
+ // execute syscall
+ // <signal return>
+ //
+ // In this case, thread C inherited the syscall-modified state from
+ // thread B and did not need to execute the syscall, but did anyway
+ // because doAllThreadsSyscall could not be sure whether it was
+ // required.
+ //
+ // Some system calls may not be idempotent, so we ensure each thread
+ // executes the system call exactly once.
+ for mp := allm; mp != nil; mp = mp.alllink {
+ for atomic.Load64(&mp.procid) == 0 {
+ // Thread is starting.
+ osyield()
+ }
+ }
+
+ // Signal every other thread, where they will execute perThreadSyscall
+ // from the signal handler.
+ gp := getg()
+ tid := gp.m.procid
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if atomic.Load64(&mp.procid) == tid {
+ // Our thread already performed the syscall.
+ continue
+ }
+ mp.needPerThreadSyscall.Store(1)
+ signalM(mp, sigPerThreadSyscall)
+ }
+
+ // Wait for all threads to complete.
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if mp.procid == tid {
+ continue
+ }
+ for mp.needPerThreadSyscall.Load() != 0 {
+ osyield()
+ }
+ }
+
+ perThreadSyscall = perThreadSyscallArgs{}
+
+ releasem(getg().m)
+ allocmLock.unlock()
+ startTheWorld()
+
+ return r1, r2, errno
+}
+
+// runPerThreadSyscall runs perThreadSyscall for this M if required.
+//
+// This function throws if the system call returns with anything other than the
+// expected values.
+//go:nosplit
+func runPerThreadSyscall() {
+ gp := getg()
+ if gp.m.needPerThreadSyscall.Load() == 0 {
+ return
+ }
+
+ args := perThreadSyscall
+ r1, r2, errno := syscall.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
+ if GOARCH == "ppc64" || GOARCH == "ppc64le" {
+ // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
+ r2 = 0
+ }
+ if errno != 0 || r1 != args.r1 || r2 != args.r2 {
+ print("trap:", args.trap, ", a123456=[", args.a1, ",", args.a2, ",", args.a3, ",", args.a4, ",", args.a5, ",", args.a6, "]\n")
+ print("results: got {r1=", r1, ",r2=", r2, ",errno=", errno, "}, want {r1=", args.r1, ",r2=", args.r2, ",errno=0\n")
+ throw("AllThreadsSyscall6 results differ between threads; runtime corrupted")
+ }
+
+ gp.m.needPerThreadSyscall.Store(0)
+}
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index cd9508c706..88a4a8b90e 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -78,10 +78,8 @@ func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
-func setNonblock(fd int32)
const (
_ESRCH = 3
@@ -428,3 +426,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
lwp_kill(int32(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 2d0e71de53..1a00b890db 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -286,3 +286,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
thrkill(int32(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_openbsd_syscall2.go b/src/runtime/os_openbsd_syscall2.go
index 99542fb2de..a48f5fa88a 100644
--- a/src/runtime/os_openbsd_syscall2.go
+++ b/src/runtime/os_openbsd_syscall2.go
@@ -70,7 +70,6 @@ func sigprocmask(how int32, new, old *sigset) {
}
}
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
//go:noescape
@@ -95,6 +94,5 @@ func nanotime1() int64
func sigaltstack(new, old *stackt)
func closeonexec(fd int32)
-func setNonblock(fd int32)
func walltime() (sec int64, nsec int32)
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 1a44ab7ad7..322579cdc4 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -794,7 +794,7 @@ func use(x [8 << 18]byte) {}
func TestBlockProfile(t *testing.T) {
type TestCase struct {
name string
- f func()
+ f func(*testing.T)
stk []string
re string
}
@@ -903,7 +903,7 @@ func TestBlockProfile(t *testing.T) {
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
for _, test := range tests {
- test.f()
+ test.f(t)
}
t.Run("debug=1", func(t *testing.T) {
@@ -979,42 +979,73 @@ func containsStack(got [][]string, want []string) bool {
return false
}
-const blockDelay = 10 * time.Millisecond
+// awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
+// shows a goroutine in the given state with a stack frame in
+// runtime/pprof.<fName>.
+func awaitBlockedGoroutine(t *testing.T, state, fName string) {
+ re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
+ r := regexp.MustCompile(re)
-func blockChanRecv() {
+ if deadline, ok := t.Deadline(); ok {
+ if d := time.Until(deadline); d > 1*time.Second {
+ timer := time.AfterFunc(d-1*time.Second, func() {
+ debug.SetTraceback("all")
+ panic(fmt.Sprintf("timed out waiting for %#q", re))
+ })
+ defer timer.Stop()
+ }
+ }
+
+ buf := make([]byte, 64<<10)
+ for {
+ runtime.Gosched()
+ n := runtime.Stack(buf, true)
+ if n == len(buf) {
+ // Buffer wasn't large enough for a full goroutine dump.
+ // Resize it and try again.
+ buf = make([]byte, 2*len(buf))
+ continue
+ }
+ if r.Match(buf[:n]) {
+ return
+ }
+ }
+}
+
+func blockChanRecv(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan receive", "blockChanRecv")
c <- true
}()
<-c
}
-func blockChanSend() {
+func blockChanSend(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan send", "blockChanSend")
<-c
}()
c <- true
}
-func blockChanClose() {
+func blockChanClose(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan receive", "blockChanClose")
close(c)
}()
<-c
}
-func blockSelectRecvAsync() {
+func blockSelectRecvAsync(t *testing.T) {
const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
for i := 0; i < numTries; i++ {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync")
c <- true
}
}()
@@ -1026,11 +1057,11 @@ func blockSelectRecvAsync() {
}
}
-func blockSelectSendSync() {
+func blockSelectSendSync(t *testing.T) {
c := make(chan bool)
c2 := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "select", "blockSelectSendSync")
<-c
}()
select {
@@ -1039,11 +1070,11 @@ func blockSelectSendSync() {
}
}
-func blockMutex() {
+func blockMutex(t *testing.T) {
var mu sync.Mutex
mu.Lock()
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "semacquire", "blockMutex")
mu.Unlock()
}()
// Note: Unlock releases mu before recording the mutex event,
@@ -1053,12 +1084,12 @@ func blockMutex() {
mu.Lock()
}
-func blockCond() {
+func blockCond(t *testing.T) {
var mu sync.Mutex
c := sync.NewCond(&mu)
mu.Lock()
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond")
mu.Lock()
c.Signal()
mu.Unlock()
@@ -1144,7 +1175,7 @@ func TestMutexProfile(t *testing.T) {
t.Fatalf("need MutexProfileRate 0, got %d", old)
}
- blockMutex()
+ blockMutex(t)
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s
index 36ee13282c..c27d475dee 100644
--- a/src/runtime/preempt_arm64.s
+++ b/src/runtime/preempt_arm64.s
@@ -6,142 +6,80 @@
TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R30, -496(RSP)
SUB $496, RSP
- #ifdef GOOS_linux
MOVD R29, -8(RSP)
SUB $8, RSP, R29
- #endif
#ifdef GOOS_ios
MOVD R30, (RSP)
#endif
- MOVD R0, 8(RSP)
- MOVD R1, 16(RSP)
- MOVD R2, 24(RSP)
- MOVD R3, 32(RSP)
- MOVD R4, 40(RSP)
- MOVD R5, 48(RSP)
- MOVD R6, 56(RSP)
- MOVD R7, 64(RSP)
- MOVD R8, 72(RSP)
- MOVD R9, 80(RSP)
- MOVD R10, 88(RSP)
- MOVD R11, 96(RSP)
- MOVD R12, 104(RSP)
- MOVD R13, 112(RSP)
- MOVD R14, 120(RSP)
- MOVD R15, 128(RSP)
- MOVD R16, 136(RSP)
- MOVD R17, 144(RSP)
- MOVD R19, 152(RSP)
- MOVD R20, 160(RSP)
- MOVD R21, 168(RSP)
- MOVD R22, 176(RSP)
- MOVD R23, 184(RSP)
- MOVD R24, 192(RSP)
- MOVD R25, 200(RSP)
- MOVD R26, 208(RSP)
+ STP (R0, R1), 8(RSP)
+ STP (R2, R3), 24(RSP)
+ STP (R4, R5), 40(RSP)
+ STP (R6, R7), 56(RSP)
+ STP (R8, R9), 72(RSP)
+ STP (R10, R11), 88(RSP)
+ STP (R12, R13), 104(RSP)
+ STP (R14, R15), 120(RSP)
+ STP (R16, R17), 136(RSP)
+ STP (R19, R20), 152(RSP)
+ STP (R21, R22), 168(RSP)
+ STP (R23, R24), 184(RSP)
+ STP (R25, R26), 200(RSP)
MOVD NZCV, R0
MOVD R0, 216(RSP)
MOVD FPSR, R0
MOVD R0, 224(RSP)
- FMOVD F0, 232(RSP)
- FMOVD F1, 240(RSP)
- FMOVD F2, 248(RSP)
- FMOVD F3, 256(RSP)
- FMOVD F4, 264(RSP)
- FMOVD F5, 272(RSP)
- FMOVD F6, 280(RSP)
- FMOVD F7, 288(RSP)
- FMOVD F8, 296(RSP)
- FMOVD F9, 304(RSP)
- FMOVD F10, 312(RSP)
- FMOVD F11, 320(RSP)
- FMOVD F12, 328(RSP)
- FMOVD F13, 336(RSP)
- FMOVD F14, 344(RSP)
- FMOVD F15, 352(RSP)
- FMOVD F16, 360(RSP)
- FMOVD F17, 368(RSP)
- FMOVD F18, 376(RSP)
- FMOVD F19, 384(RSP)
- FMOVD F20, 392(RSP)
- FMOVD F21, 400(RSP)
- FMOVD F22, 408(RSP)
- FMOVD F23, 416(RSP)
- FMOVD F24, 424(RSP)
- FMOVD F25, 432(RSP)
- FMOVD F26, 440(RSP)
- FMOVD F27, 448(RSP)
- FMOVD F28, 456(RSP)
- FMOVD F29, 464(RSP)
- FMOVD F30, 472(RSP)
- FMOVD F31, 480(RSP)
+ FSTPD (F0, F1), 232(RSP)
+ FSTPD (F2, F3), 248(RSP)
+ FSTPD (F4, F5), 264(RSP)
+ FSTPD (F6, F7), 280(RSP)
+ FSTPD (F8, F9), 296(RSP)
+ FSTPD (F10, F11), 312(RSP)
+ FSTPD (F12, F13), 328(RSP)
+ FSTPD (F14, F15), 344(RSP)
+ FSTPD (F16, F17), 360(RSP)
+ FSTPD (F18, F19), 376(RSP)
+ FSTPD (F20, F21), 392(RSP)
+ FSTPD (F22, F23), 408(RSP)
+ FSTPD (F24, F25), 424(RSP)
+ FSTPD (F26, F27), 440(RSP)
+ FSTPD (F28, F29), 456(RSP)
+ FSTPD (F30, F31), 472(RSP)
CALL ·asyncPreempt2(SB)
- FMOVD 480(RSP), F31
- FMOVD 472(RSP), F30
- FMOVD 464(RSP), F29
- FMOVD 456(RSP), F28
- FMOVD 448(RSP), F27
- FMOVD 440(RSP), F26
- FMOVD 432(RSP), F25
- FMOVD 424(RSP), F24
- FMOVD 416(RSP), F23
- FMOVD 408(RSP), F22
- FMOVD 400(RSP), F21
- FMOVD 392(RSP), F20
- FMOVD 384(RSP), F19
- FMOVD 376(RSP), F18
- FMOVD 368(RSP), F17
- FMOVD 360(RSP), F16
- FMOVD 352(RSP), F15
- FMOVD 344(RSP), F14
- FMOVD 336(RSP), F13
- FMOVD 328(RSP), F12
- FMOVD 320(RSP), F11
- FMOVD 312(RSP), F10
- FMOVD 304(RSP), F9
- FMOVD 296(RSP), F8
- FMOVD 288(RSP), F7
- FMOVD 280(RSP), F6
- FMOVD 272(RSP), F5
- FMOVD 264(RSP), F4
- FMOVD 256(RSP), F3
- FMOVD 248(RSP), F2
- FMOVD 240(RSP), F1
- FMOVD 232(RSP), F0
+ FLDPD 472(RSP), (F30, F31)
+ FLDPD 456(RSP), (F28, F29)
+ FLDPD 440(RSP), (F26, F27)
+ FLDPD 424(RSP), (F24, F25)
+ FLDPD 408(RSP), (F22, F23)
+ FLDPD 392(RSP), (F20, F21)
+ FLDPD 376(RSP), (F18, F19)
+ FLDPD 360(RSP), (F16, F17)
+ FLDPD 344(RSP), (F14, F15)
+ FLDPD 328(RSP), (F12, F13)
+ FLDPD 312(RSP), (F10, F11)
+ FLDPD 296(RSP), (F8, F9)
+ FLDPD 280(RSP), (F6, F7)
+ FLDPD 264(RSP), (F4, F5)
+ FLDPD 248(RSP), (F2, F3)
+ FLDPD 232(RSP), (F0, F1)
MOVD 224(RSP), R0
MOVD R0, FPSR
MOVD 216(RSP), R0
MOVD R0, NZCV
- MOVD 208(RSP), R26
- MOVD 200(RSP), R25
- MOVD 192(RSP), R24
- MOVD 184(RSP), R23
- MOVD 176(RSP), R22
- MOVD 168(RSP), R21
- MOVD 160(RSP), R20
- MOVD 152(RSP), R19
- MOVD 144(RSP), R17
- MOVD 136(RSP), R16
- MOVD 128(RSP), R15
- MOVD 120(RSP), R14
- MOVD 112(RSP), R13
- MOVD 104(RSP), R12
- MOVD 96(RSP), R11
- MOVD 88(RSP), R10
- MOVD 80(RSP), R9
- MOVD 72(RSP), R8
- MOVD 64(RSP), R7
- MOVD 56(RSP), R6
- MOVD 48(RSP), R5
- MOVD 40(RSP), R4
- MOVD 32(RSP), R3
- MOVD 24(RSP), R2
- MOVD 16(RSP), R1
- MOVD 8(RSP), R0
+ LDP 200(RSP), (R25, R26)
+ LDP 184(RSP), (R23, R24)
+ LDP 168(RSP), (R21, R22)
+ LDP 152(RSP), (R19, R20)
+ LDP 136(RSP), (R16, R17)
+ LDP 120(RSP), (R14, R15)
+ LDP 104(RSP), (R12, R13)
+ LDP 88(RSP), (R10, R11)
+ LDP 72(RSP), (R8, R9)
+ LDP 56(RSP), (R6, R7)
+ LDP 40(RSP), (R4, R5)
+ LDP 24(RSP), (R2, R3)
+ LDP 8(RSP), (R0, R1)
MOVD 496(RSP), R30
- #ifdef GOOS_linux
MOVD -8(RSP), R29
- #endif
MOVD (RSP), R27
ADD $512, RSP
JMP (R27)
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 1be7a60830..df16e0f9b6 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -167,10 +167,6 @@ func main() {
mainStarted = true
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
- // For runtime_syscall_doAllThreadsSyscall, we
- // register sysmon is not ready for the world to be
- // stopped.
- atomic.Store(&sched.sysmonStarting, 1)
systemstack(func() {
newm(sysmon, nil, -1)
})
@@ -187,7 +183,6 @@ func main() {
if g.m != &m0 {
throw("runtime.main not on m0")
}
- m0.doesPark = true
// Record when the world started.
// Must be before doInit for tracing init.
@@ -802,8 +797,18 @@ func mcommoninit(mp *m, id int64) {
mp.id = mReserveID()
}
- // cputicks is not very random in startup virtual machine
- mp.fastrand = uint64(int64Hash(uint64(mp.id), fastrandseed^uintptr(cputicks())))
+ lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
+ hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
+ if lo|hi == 0 {
+ hi = 1
+ }
+ // Same behavior as for 1.17.
+ // TODO: Simplify ths.
+ if goarch.BigEndian {
+ mp.fastrand = uint64(lo)<<32 | uint64(hi)
+ } else {
+ mp.fastrand = uint64(hi)<<32 | uint64(lo)
+ }
mpreinit(mp)
if mp.gsignal != nil {
@@ -1437,22 +1442,12 @@ func mstartm0() {
initsig(false)
}
-// mPark causes a thread to park itself - temporarily waking for
-// fixups but otherwise waiting to be fully woken. This is the
-// only way that m's should park themselves.
+// mPark causes a thread to park itself, returning once woken.
//go:nosplit
func mPark() {
- g := getg()
- for {
- notesleep(&g.m.park)
- // Note, because of signal handling by this parked m,
- // a preemptive mDoFixup() may actually occur via
- // mDoFixupAndOSYield(). (See golang.org/issue/44193)
- noteclear(&g.m.park)
- if !mDoFixup() {
- return
- }
- }
+ gp := getg()
+ notesleep(&gp.m.park)
+ noteclear(&gp.m.park)
}
// mexit tears down and exits the current thread.
@@ -1659,145 +1654,6 @@ func forEachP(fn func(*p)) {
releasem(mp)
}
-// syscall_runtime_doAllThreadsSyscall serializes Go execution and
-// executes a specified fn() call on all m's.
-//
-// The boolean argument to fn() indicates whether the function's
-// return value will be consulted or not. That is, fn(true) should
-// return true if fn() succeeds, and fn(true) should return false if
-// it failed. When fn(false) is called, its return status will be
-// ignored.
-//
-// syscall_runtime_doAllThreadsSyscall first invokes fn(true) on a
-// single, coordinating, m, and only if it returns true does it go on
-// to invoke fn(false) on all of the other m's known to the process.
-//
-//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
-func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
- if iscgo {
- panic("doAllThreadsSyscall not supported with cgo enabled")
- }
- if fn == nil {
- return
- }
- for atomic.Load(&sched.sysmonStarting) != 0 {
- osyield()
- }
-
- // We don't want this thread to handle signals for the
- // duration of this critical section. The underlying issue
- // being that this locked coordinating m is the one monitoring
- // for fn() execution by all the other m's of the runtime,
- // while no regular go code execution is permitted (the world
- // is stopped). If this present m were to get distracted to
- // run signal handling code, and find itself waiting for a
- // second thread to execute go code before being able to
- // return from that signal handling, a deadlock will result.
- // (See golang.org/issue/44193.)
- lockOSThread()
- var sigmask sigset
- sigsave(&sigmask)
- sigblock(false)
-
- stopTheWorldGC("doAllThreadsSyscall")
- if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
- // Ensure that there are no in-flight thread
- // creations: don't want to race with allm.
- lock(&newmHandoff.lock)
- for !newmHandoff.waiting {
- unlock(&newmHandoff.lock)
- osyield()
- lock(&newmHandoff.lock)
- }
- unlock(&newmHandoff.lock)
- }
- if netpollinited() {
- netpollBreak()
- }
- sigRecvPrepareForFixup()
- _g_ := getg()
- if raceenabled {
- // For m's running without racectx, we loan out the
- // racectx of this call.
- lock(&mFixupRace.lock)
- mFixupRace.ctx = _g_.racectx
- unlock(&mFixupRace.lock)
- }
- if ok := fn(true); ok {
- tid := _g_.m.procid
- for mp := allm; mp != nil; mp = mp.alllink {
- if mp.procid == tid {
- // This m has already completed fn()
- // call.
- continue
- }
- // Be wary of mp's without procid values if
- // they are known not to park. If they are
- // marked as parking with a zero procid, then
- // they will be racing with this code to be
- // allocated a procid and we will annotate
- // them with the need to execute the fn when
- // they acquire a procid to run it.
- if mp.procid == 0 && !mp.doesPark {
- // Reaching here, we are either
- // running Windows, or cgo linked
- // code. Neither of which are
- // currently supported by this API.
- throw("unsupported runtime environment")
- }
- // stopTheWorldGC() doesn't guarantee stopping
- // all the threads, so we lock here to avoid
- // the possibility of racing with mp.
- lock(&mp.mFixup.lock)
- mp.mFixup.fn = fn
- atomic.Store(&mp.mFixup.used, 1)
- if mp.doesPark {
- // For non-service threads this will
- // cause the wakeup to be short lived
- // (once the mutex is unlocked). The
- // next real wakeup will occur after
- // startTheWorldGC() is called.
- notewakeup(&mp.park)
- }
- unlock(&mp.mFixup.lock)
- }
- for {
- done := true
- for mp := allm; done && mp != nil; mp = mp.alllink {
- if mp.procid == tid {
- continue
- }
- done = atomic.Load(&mp.mFixup.used) == 0
- }
- if done {
- break
- }
- // if needed force sysmon and/or newmHandoff to wakeup.
- lock(&sched.lock)
- if atomic.Load(&sched.sysmonwait) != 0 {
- atomic.Store(&sched.sysmonwait, 0)
- notewakeup(&sched.sysmonnote)
- }
- unlock(&sched.lock)
- lock(&newmHandoff.lock)
- if newmHandoff.waiting {
- newmHandoff.waiting = false
- notewakeup(&newmHandoff.wake)
- }
- unlock(&newmHandoff.lock)
- osyield()
- }
- }
- if raceenabled {
- lock(&mFixupRace.lock)
- mFixupRace.ctx = 0
- unlock(&mFixupRace.lock)
- }
- startTheWorldGC()
- msigrestore(sigmask)
- unlockOSThread()
-}
-
// runSafePointFn runs the safe point function, if any, for this P.
// This should be called like
//
@@ -1847,8 +1703,14 @@ type cgothreadstart struct {
//
//go:yeswritebarrierrec
func allocm(_p_ *p, fn func(), id int64) *m {
+ allocmLock.rlock()
+
+ // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
+ // disable preemption to ensure it is not stolen, which would make the
+ // caller lose ownership.
+ acquirem()
+
_g_ := getg()
- acquirem() // disable GC because it can be called from sysmon
if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function
}
@@ -1894,8 +1756,9 @@ func allocm(_p_ *p, fn func(), id int64) *m {
if _p_ == _g_.m.p.ptr() {
releasep()
}
- releasem(_g_.m)
+ releasem(_g_.m)
+ allocmLock.runlock()
return mp
}
@@ -2172,9 +2035,17 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
-// execLock serializes exec and clone to avoid bugs or unspecified behaviour
-// around exec'ing while creating/destroying threads. See issue #19546.
-var execLock rwmutex
+var (
+ // allocmLock is locked for read when creating new Ms in allocm and their
+ // addition to allm. Thus acquiring this lock for write blocks the
+ // creation of new Ms.
+ allocmLock rwmutex
+
+ // execLock serializes exec and clone to avoid bugs or unspecified
+ // behaviour around exec'ing while creating/destroying threads. See
+ // issue #19546.
+ execLock rwmutex
+)
// newmHandoff contains a list of m structures that need new OS threads.
// This is used by newm in situations where newm itself can't safely
@@ -2204,8 +2075,19 @@ var newmHandoff struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//go:nowritebarrierrec
func newm(fn func(), _p_ *p, id int64) {
+ // allocm adds a new M to allm, but they do not start until created by
+ // the OS in newm1 or the template thread.
+ //
+ // doAllThreadsSyscall requires that every M in allm will eventually
+ // start and be signal-able, even with a STW.
+ //
+ // Disable preemption here until we start the thread to ensure that
+ // newm is not preempted between allocm and starting the new thread,
+ // ensuring that anything added to allm is guaranteed to eventually
+ // start.
+ acquirem()
+
mp := allocm(_p_, fn, id)
- mp.doesPark = (_p_ != nil)
mp.nextp.set(_p_)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
@@ -2231,9 +2113,14 @@ func newm(fn func(), _p_ *p, id int64) {
notewakeup(&newmHandoff.wake)
}
unlock(&newmHandoff.lock)
+ // The M has not started yet, but the template thread does not
+ // participate in STW, so it will always process queued Ms and
+ // it is safe to releasem.
+ releasem(getg().m)
return
}
newm1(mp)
+ releasem(getg().m)
}
func newm1(mp *m) {
@@ -2281,81 +2168,6 @@ func startTemplateThread() {
releasem(mp)
}
-// mFixupRace is used to temporarily borrow the race context from the
-// coordinating m during a syscall_runtime_doAllThreadsSyscall and
-// loan it out to each of the m's of the runtime so they can execute a
-// mFixup.fn in that context.
-var mFixupRace struct {
- lock mutex
- ctx uintptr
-}
-
-// mDoFixup runs any outstanding fixup function for the running m.
-// Returns true if a fixup was outstanding and actually executed.
-//
-// Note: to avoid deadlocks, and the need for the fixup function
-// itself to be async safe, signals are blocked for the working m
-// while it holds the mFixup lock. (See golang.org/issue/44193)
-//
-//go:nosplit
-func mDoFixup() bool {
- _g_ := getg()
- if used := atomic.Load(&_g_.m.mFixup.used); used == 0 {
- return false
- }
-
- // slow path - if fixup fn is used, block signals and lock.
- var sigmask sigset
- sigsave(&sigmask)
- sigblock(false)
- lock(&_g_.m.mFixup.lock)
- fn := _g_.m.mFixup.fn
- if fn != nil {
- if gcphase != _GCoff {
- // We can't have a write barrier in this
- // context since we may not have a P, but we
- // clear fn to signal that we've executed the
- // fixup. As long as fn is kept alive
- // elsewhere, technically we should have no
- // issues with the GC, but fn is likely
- // generated in a different package altogether
- // that may change independently. Just assert
- // the GC is off so this lack of write barrier
- // is more obviously safe.
- throw("GC must be disabled to protect validity of fn value")
- }
- if _g_.racectx != 0 || !raceenabled {
- fn(false)
- } else {
- // temporarily acquire the context of the
- // originator of the
- // syscall_runtime_doAllThreadsSyscall and
- // block others from using it for the duration
- // of the fixup call.
- lock(&mFixupRace.lock)
- _g_.racectx = mFixupRace.ctx
- fn(false)
- _g_.racectx = 0
- unlock(&mFixupRace.lock)
- }
- *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
- atomic.Store(&_g_.m.mFixup.used, 0)
- }
- unlock(&_g_.m.mFixup.lock)
- msigrestore(sigmask)
- return fn != nil
-}
-
-// mDoFixupAndOSYield is called when an m is unable to send a signal
-// because the allThreadsSyscall mechanism is in progress. That is, an
-// mPark() has been interrupted with this signal handler so we need to
-// ensure the fixup is executed from this context.
-//go:nosplit
-func mDoFixupAndOSYield() {
- mDoFixup()
- osyield()
-}
-
// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
// may not be in a good state.
@@ -2392,7 +2204,6 @@ func templateThread() {
noteclear(&newmHandoff.wake)
unlock(&newmHandoff.lock)
notesleep(&newmHandoff.wake)
- mDoFixup()
}
}
@@ -5239,10 +5050,6 @@ func sysmon() {
checkdead()
unlock(&sched.lock)
- // For syscall_runtime_doAllThreadsSyscall, sysmon is
- // sufficiently up to participate in fixups.
- atomic.Store(&sched.sysmonStarting, 0)
-
lasttrace := int64(0)
idle := 0 // how many cycles in succession we had not wokeup somebody
delay := uint32(0)
@@ -5257,7 +5064,6 @@ func sysmon() {
delay = 10 * 1000
}
usleep(delay)
- mDoFixup()
// sysmon should not enter deep sleep if schedtrace is enabled so that
// it can print that information at the right time.
@@ -5294,7 +5100,6 @@ func sysmon() {
osRelax(true)
}
syscallWake = notetsleep(&sched.sysmonnote, sleep)
- mDoFixup()
if shouldRelax {
osRelax(false)
}
@@ -5337,7 +5142,6 @@ func sysmon() {
incidlelocked(1)
}
}
- mDoFixup()
if GOOS == "netbsd" && needSysmonWorkaround {
// netpoll is responsible for waiting for timer
// expiration, so we typically don't have to worry
@@ -6334,7 +6138,7 @@ func (ord *randomOrder) start(i uint32) randomEnum {
return randomEnum{
count: ord.count,
pos: i % ord.count,
- inc: ord.coprimes[i%uint32(len(ord.coprimes))],
+ inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
}
}
diff --git a/src/runtime/proc_runtime_test.go b/src/runtime/proc_runtime_test.go
index a7bde2c6df..90aed83d46 100644
--- a/src/runtime/proc_runtime_test.go
+++ b/src/runtime/proc_runtime_test.go
@@ -30,4 +30,21 @@ func RunStealOrderTest() {
}
}
}
+ // Make sure that different arguments to ord.start don't generate the
+ // same pos+inc twice.
+ for procs := 2; procs <= 64; procs++ {
+ ord.reset(uint32(procs))
+ checked := make([]bool, procs*procs)
+ // We want at least procs*len(ord.coprimes) different pos+inc values
+ // before we start repeating.
+ for i := 0; i < procs*len(ord.coprimes); i++ {
+ enum := ord.start(uint32(i))
+ j := enum.pos*uint32(procs) + enum.inc
+ if checked[j] {
+ println("procs:", procs, "pos:", enum.pos, "inc:", enum.inc)
+ panic("duplicate pos+inc during enumeration")
+ }
+ checked[j] = true
+ }
+ }
}
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
index 798e23294a..95fec0b9c6 100644
--- a/src/runtime/race_arm64.s
+++ b/src/runtime/race_arm64.s
@@ -188,8 +188,12 @@ ret:
// func runtime·racefuncenter(pc uintptr)
// Called from instrumented code.
-TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
+TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R9 // callpc
+#else
MOVD callpc+0(FP), R9
+#endif
JMP racefuncenter<>(SB)
// Common code for racefuncenter
@@ -205,7 +209,7 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// func runtime·racefuncexit()
// Called from instrumented code.
-TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
load_g
MOVD g_racectx(g), R0 // race context
// void __tsan_func_exit(ThreadState *thr);
@@ -392,12 +396,12 @@ racecallatomic_ignore:
// Addr is outside the good range.
// Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
// An attempt to synchronize on the address would cause crash.
- MOVD R9, R20 // remember the original function
+ MOVD R9, R21 // remember the original function
MOVD $__tsan_go_ignore_sync_begin(SB), R9
load_g
MOVD g_racectx(g), R0 // goroutine context
BL racecall<>(SB)
- MOVD R20, R9 // restore the original function
+ MOVD R21, R9 // restore the original function
// Call the atomic function.
// racecall will call LLVM race code which might clobber R28 (g)
load_g
@@ -424,10 +428,12 @@ TEXT runtime·racecall(SB), NOSPLIT, $0-0
JMP racecall<>(SB)
// Switches SP to g0 stack and calls (R9). Arguments already set.
-TEXT racecall<>(SB), NOSPLIT, $0-0
+// Clobbers R19, R20.
+TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
MOVD g_m(g), R10
// Switch to g0 stack.
MOVD RSP, R19 // callee-saved, preserved across the CALL
+ MOVD R30, R20 // callee-saved, preserved across the CALL
MOVD m_g0(R10), R11
CMP R11, g
BEQ call // already on g0
@@ -436,7 +442,7 @@ TEXT racecall<>(SB), NOSPLIT, $0-0
call:
BL R9
MOVD R19, RSP
- RET
+ JMP (R20)
// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index 7e8723e15f..ee8c6c210f 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -427,6 +427,14 @@ func TestGdbBacktrace(t *testing.T) {
got, err := testenv.RunWithTimeout(t, exec.Command("gdb", args...))
t.Logf("gdb output:\n%s", got)
if err != nil {
+ if bytes.Contains(got, []byte("internal-error: wait returned unexpected status 0x0")) {
+ // GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=28551
+ testenv.SkipFlaky(t, 43068)
+ }
+ if bytes.Contains(got, []byte("Couldn't get registers: No such process.")) {
+ // GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=9086
+ testenv.SkipFlaky(t, 50838)
+ }
t.Fatalf("gdb exited with error: %v", err)
}
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 3eada37840..3d01ac5171 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -547,7 +547,6 @@ type m struct {
ncgo int32 // number of cgo calls currently in progress
cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
- doesPark bool // non-P running threads: sysmon and newmHandoff never use .park
park note
alllink *m // on allm
schedlink muintptr
@@ -564,16 +563,6 @@ type m struct {
syscalltick uint32
freelink *m // on sched.freem
- // mFixup is used to synchronize OS related m state
- // (credentials etc) use mutex to access. To avoid deadlocks
- // an atomic.Load() of used being zero in mDoFixupFn()
- // guarantees fn is nil.
- mFixup struct {
- lock mutex
- used uint32
- fn func(bool) bool
- }
-
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
@@ -817,10 +806,6 @@ type schedt struct {
sysmonwait uint32
sysmonnote note
- // While true, sysmon not ready for mFixup calls.
- // Accessed atomically.
- sysmonStarting uint32
-
// safepointFn should be called on each P at the next GC
// safepoint if p.runSafePointFn is set.
safePointFn func(*p)
@@ -838,8 +823,6 @@ type schedt struct {
// with the rest of the runtime.
sysmonlock mutex
- _ uint32 // ensure timeToRun has 8-byte alignment
-
// timeToRun is a distribution of scheduling latencies, defined
// as the sum of time a G spends in the _Grunnable state before
// it transitions to _Grunning.
@@ -856,7 +839,7 @@ const (
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
- _SigSetStack // add SA_ONSTACK to libc handler
+ _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
_SigUnblock // always unblock; see blockableSig
_SigIgn // _SIG_DFL action is to ignore the signal
)
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index 08f266cc67..2dd4cc51a3 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -161,6 +161,13 @@ func sigInstallGoHandler(sig uint32) bool {
}
}
+ if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
+ // sigPerThreadSyscall is the same signal used by glibc for
+ // per-thread syscalls on Linux. We use it for the same purpose
+ // in non-cgo binaries.
+ return true
+ }
+
t := &sigtable[sig]
if t.flags&_SigSetStack != 0 {
return false
@@ -616,6 +623,15 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
return
}
+ if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
+ // sigPerThreadSyscall is the same signal used by glibc for
+ // per-thread syscalls on Linux. We use it for the same purpose
+ // in non-cgo binaries. Since this signal is not _SigNotify,
+ // there is nothing more to do once we run the syscall.
+ runPerThreadSyscall()
+ return
+ }
+
if sig == sigPreempt && debug.asyncpreemptoff == 0 {
// Might be a preemption signal.
doSigPreempt(gp, c)
diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go
index 7b84a0ef65..fdf99d94a2 100644
--- a/src/runtime/sigqueue.go
+++ b/src/runtime/sigqueue.go
@@ -11,18 +11,18 @@
//
// sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal.
+//
// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
-// sigReceiving means that signal_recv is blocked on sig.Note and there are no
-// new pending signals.
-// sigSending means that sig.mask *may* contain new pending signals,
-// signal_recv can't be blocked in this state.
-// sigIdle means that there are no new pending signals and signal_recv is not blocked.
-// sigFixup is a transient state that can only exist as a short
-// transition from sigReceiving and then on to sigIdle: it is
-// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
-// occurs on the sleeping m, waiting to receive a signal.
+// variable. It can be in three states:
+// * sigReceiving means that signal_recv is blocked on sig.Note and there are
+// no new pending signals.
+// * sigSending means that sig.mask *may* contain new pending signals,
+// signal_recv can't be blocked in this state.
+// * sigIdle means that there are no new pending signals and signal_recv is not
+// blocked.
+//
// Transitions between states are done atomically with CAS.
+//
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to
// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
@@ -63,7 +63,6 @@ const (
sigIdle = iota
sigReceiving
sigSending
- sigFixup
)
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
@@ -117,9 +116,6 @@ Send:
notewakeup(&sig.note)
break Send
}
- case sigFixup:
- // nothing to do - we need to wait for sigIdle.
- mDoFixupAndOSYield()
}
}
@@ -127,19 +123,6 @@ Send:
return true
}
-// sigRecvPrepareForFixup is used to temporarily wake up the
-// signal_recv() running thread while it is blocked waiting for the
-// arrival of a signal. If it causes the thread to wake up, the
-// sig.state travels through this sequence: sigReceiving -> sigFixup
-// -> sigIdle -> sigReceiving and resumes. (This is only called while
-// GC is disabled.)
-//go:nosplit
-func sigRecvPrepareForFixup() {
- if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
- notewakeup(&sig.note)
- }
-}
-
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
@@ -167,16 +150,7 @@ func signal_recv() uint32 {
}
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
- if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
- break Receive
- }
- // Getting here, the code will
- // loop around again to sleep
- // in state sigReceiving. This
- // path is taken when
- // sigRecvPrepareForFixup()
- // has been called by another
- // thread.
+ break Receive
}
case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) {
diff --git a/src/runtime/sigqueue_plan9.go b/src/runtime/sigqueue_plan9.go
index aebd2060e7..d5fe8f8b35 100644
--- a/src/runtime/sigqueue_plan9.go
+++ b/src/runtime/sigqueue_plan9.go
@@ -92,13 +92,6 @@ func sendNote(s *byte) bool {
return true
}
-// sigRecvPrepareForFixup is a no-op on plan9. (This would only be
-// called while GC is disabled.)
-//
-//go:nosplit
-func sigRecvPrepareForFixup() {
-}
-
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 017b0a0749..ee4db47314 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -310,6 +310,7 @@ const (
_FUNCDATA_OpenCodedDeferInfo = 4
_FUNCDATA_ArgInfo = 5
_FUNCDATA_ArgLiveInfo = 6
+ _FUNCDATA_WrapInfo = 7
_ArgsSizeUnknown = -0x80000000
)
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index 80dd1a0378..58b3a9171c 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -17,87 +17,91 @@ import (
//go:linkname syscall_syscall syscall.syscall
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscall()
//go:linkname syscall_syscallX syscall.syscallX
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscallX()
//go:linkname syscall_syscall6 syscall.syscall6
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscall6()
//go:linkname syscall_syscall6X syscall.syscall6X
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscall6X()
//go:linkname syscall_syscallPtr syscall.syscallPtr
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscallPtr()
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:nosplit
-//go:cgo_unsafe_args
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
- return
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
+ return args.r1, args.r2, args.err
}
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
//go:nosplit
-//go:cgo_unsafe_args
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
- return
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
+ return args.r1, args.r2, args.err
}
// syscallNoErr is used in crypto/x509 to call into Security.framework and CF.
//go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall
//go:nosplit
-//go:cgo_unsafe_args
-func crypto_x509_syscall(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1 uintptr) {
+func crypto_x509_syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) (r1 uintptr) {
+ args := struct {
+ fn, a1, a2, a3, a4, a5 uintptr
+ f1 float64
+ r1 uintptr
+ }{fn, a1, a2, a3, a4, a5, f1, r1}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallNoErr)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_x509)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1
}
-func syscallNoErr()
+func syscall_x509()
// The *_trampoline functions convert from the Go calling convention to the C calling convention
// and then call the underlying libc function. They are defined in sys_darwin_$ARCH.s.
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 5d89cda8e6..db4715d2b7 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -831,9 +831,10 @@ ok:
POPQ BP
RET
-// syscallNoErr is like syscall6 but does not check for errors, and
-// only returns one value, for use with standard C ABI library functions.
-TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
+// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
+// takes 5 uintptrs and 1 float64, and only returns one value,
+// for use with standard C ABI functions.
+TEXT runtime·syscall_x509(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -842,7 +843,7 @@ TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
MOVQ (3*8)(DI), DX // a3
MOVQ (4*8)(DI), CX // a4
MOVQ (5*8)(DI), R8 // a5
- MOVQ (6*8)(DI), R9 // a6
+ MOVQ (6*8)(DI), X0 // f1
MOVQ DI, (SP)
MOVQ (1*8)(DI), DI // a1
XORL AX, AX // vararg: say "no float args"
diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s
index 96d2ed1076..e57ac53e10 100644
--- a/src/runtime/sys_darwin_arm64.s
+++ b/src/runtime/sys_darwin_arm64.s
@@ -736,9 +736,10 @@ TEXT runtime·syscall6X(SB),NOSPLIT,$0
ok:
RET
-// syscallNoErr is like syscall6 but does not check for errors, and
-// only returns one value, for use with standard C ABI library functions.
-TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
+// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
+// takes 5 uintptrs and 1 float64, and only returns one value,
+// for use with standard C ABI functions.
+TEXT runtime·syscall_x509(SB),NOSPLIT,$0
SUB $16, RSP // push structure pointer
MOVD R0, (RSP)
@@ -747,7 +748,7 @@ TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
MOVD 24(R0), R2 // a3
MOVD 32(R0), R3 // a4
MOVD 40(R0), R4 // a5
- MOVD 48(R0), R5 // a6
+ FMOVD 48(R0), F0 // f1
MOVD 8(R0), R0 // a1
BL (R12)
diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s
index d57bc2a7a4..684c9ab7f0 100644
--- a/src/runtime/sys_dragonfly_amd64.s
+++ b/src/runtime/sys_dragonfly_amd64.s
@@ -109,21 +109,6 @@ TEXT runtime·read(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- SYSCALL
- JCC pipeok
- MOVL $-1,r+0(FP)
- MOVL $-1,w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-pipeok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
MOVL $0, DI
@@ -402,18 +387,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVL $92, AX // fcntl
SYSCALL
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
- RET
diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s
index 97e6d9ab36..aceb6fe1bf 100644
--- a/src/runtime/sys_freebsd_386.s
+++ b/src/runtime/sys_freebsd_386.s
@@ -101,21 +101,6 @@ TEXT runtime·read(SB),NOSPLIT,$-4
MOVL AX, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$8-12
- MOVL $42, AX
- INT $0x80
- JAE ok
- MOVL $0, r+0(FP)
- MOVL $0, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-ok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$12-16
MOVL $542, AX
@@ -443,23 +428,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32
NEGL AX
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$16-4
- MOVL $92, AX // fcntl
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $3, 8(SP) // F_GETFL
- MOVL $0, 12(SP)
- INT $0x80
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $4, 8(SP) // F_SETFL
- ORL $4, AX // O_NONBLOCK
- MOVL AX, 12(SP)
- MOVL $92, AX // fcntl
- INT $0x80
- RET
-
// func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
TEXT runtime·cpuset_getaffinity(SB), NOSPLIT, $0-28
MOVL $487, AX
diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s
index 165e97c60d..cc95da7e64 100644
--- a/src/runtime/sys_freebsd_amd64.s
+++ b/src/runtime/sys_freebsd_amd64.s
@@ -102,21 +102,6 @@ TEXT runtime·read(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- SYSCALL
- JCC ok
- MOVL $0, r+0(FP)
- MOVL $0, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-ok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
@@ -491,21 +476,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
- RET
-
// func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
TEXT runtime·cpuset_getaffinity(SB), NOSPLIT, $0-44
MOVQ level+0(FP), DI
diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s
index b12e47c576..88ab0fc795 100644
--- a/src/runtime/sys_freebsd_arm.s
+++ b/src/runtime/sys_freebsd_arm.s
@@ -20,7 +20,6 @@
#define SYS_close (SYS_BASE + 6)
#define SYS_getpid (SYS_BASE + 20)
#define SYS_kill (SYS_BASE + 37)
-#define SYS_pipe (SYS_BASE + 42)
#define SYS_sigaltstack (SYS_BASE + 53)
#define SYS_munmap (SYS_BASE + 73)
#define SYS_madvise (SYS_BASE + 75)
@@ -123,23 +122,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
MOVW R0, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVW $SYS_pipe, R7
- SWI $0
- BCC ok
- MOVW $0, R1
- MOVW R1, r+0(FP)
- MOVW R1, w+4(FP)
- MOVW R0, errno+8(FP)
- RET
-ok:
- MOVW R0, r+0(FP)
- MOVW R1, w+4(FP)
- MOVW $0, R1
- MOVW R1, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R0
@@ -414,20 +396,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $0
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVW $3, R1 // F_GETFL
- MOVW $0, R2
- MOVW $SYS_fcntl, R7
- SWI $0
- ORR $0x4, R0, R2 // O_NONBLOCK
- MOVW fd+0(FP), R0 // fd
- MOVW $4, R1 // F_SETFL
- MOVW $SYS_fcntl, R7
- SWI $0
- RET
-
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
B runtime·armPublicationBarrier(SB)
diff --git a/src/runtime/sys_freebsd_arm64.s b/src/runtime/sys_freebsd_arm64.s
index 1aa09e87ca..59adf4e5f3 100644
--- a/src/runtime/sys_freebsd_arm64.s
+++ b/src/runtime/sys_freebsd_arm64.s
@@ -133,18 +133,6 @@ ok:
MOVW R0, ret+8(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R0
- MOVW $0, R1
- MOVD $SYS_pipe2, R8
- SVC
- BCC ok
- NEG R0, R0
-ok:
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVD $r+8(FP), R0
@@ -492,20 +480,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SVC
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0
- MOVD $F_GETFL, R1
- MOVD $0, R2
- MOVD $SYS_fcntl, R8
- SVC
- ORR $O_NONBLOCK, R0, R2
- MOVW fd+0(FP), R0
- MOVW $F_SETFL, R1
- MOVW $SYS_fcntl, R7
- SVC
- RET
-
// func getCntxct(physical bool) uint32
TEXT runtime·getCntxct(SB),NOSPLIT,$0
MOVB physical+0(FP), R0
diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s
index 6df812234c..fef68d51dc 100644
--- a/src/runtime/sys_linux_386.s
+++ b/src/runtime/sys_linux_386.s
@@ -32,7 +32,6 @@
#define SYS_getpid 20
#define SYS_access 33
#define SYS_kill 37
-#define SYS_pipe 42
#define SYS_brk 45
#define SYS_fcntl 55
#define SYS_munmap 91
@@ -130,14 +129,6 @@ TEXT runtime·read(SB),NOSPLIT,$0
MOVL AX, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $SYS_pipe, AX
- LEAL r+0(FP), BX
- INVOKE_SYSCALL
- MOVL AX, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVL $SYS_pipe2, AX
@@ -782,21 +773,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
INVOKE_SYSCALL
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL $SYS_fcntl, AX
- MOVL fd+0(FP), BX // fd
- MOVL $3, CX // F_GETFL
- MOVL $0, DX
- INVOKE_SYSCALL
- MOVL fd+0(FP), BX // fd
- MOVL $4, CX // F_SETFL
- MOVL $0x800, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $SYS_fcntl, AX
- INVOKE_SYSCALL
- RET
-
// int access(const char *name, int mode)
TEXT runtime·access(SB),NOSPLIT,$0
MOVL $SYS_access, AX
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index f0e58e11db..4be0801114 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -22,7 +22,6 @@
#define SYS_rt_sigaction 13
#define SYS_rt_sigprocmask 14
#define SYS_rt_sigreturn 15
-#define SYS_pipe 22
#define SYS_sched_yield 24
#define SYS_mincore 27
#define SYS_madvise 28
@@ -114,14 +113,6 @@ TEXT runtime·read(SB),NOSPLIT,$0-28
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- LEAQ r+0(FP), DI
- MOVL $SYS_pipe, AX
- SYSCALL
- MOVL AX, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
@@ -708,21 +699,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $SYS_fcntl, AX
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $0x800, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $SYS_fcntl, AX
- SYSCALL
- RET
-
// int access(const char *name, int mode)
TEXT runtime·access(SB),NOSPLIT,$0
// This uses faccessat instead of access, because Android O blocks access.
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index ca443b699f..201940b4e6 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -23,7 +23,6 @@
#define SYS_close (SYS_BASE + 6)
#define SYS_getpid (SYS_BASE + 20)
#define SYS_kill (SYS_BASE + 37)
-#define SYS_pipe (SYS_BASE + 42)
#define SYS_clone (SYS_BASE + 120)
#define SYS_rt_sigreturn (SYS_BASE + 173)
#define SYS_rt_sigaction (SYS_BASE + 174)
@@ -98,14 +97,6 @@ TEXT runtime·read(SB),NOSPLIT,$0
MOVW R0, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVW $r+0(FP), R0
- MOVW $SYS_pipe, R7
- SWI $0
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R0
@@ -717,20 +708,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $0
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVW $3, R1 // F_GETFL
- MOVW $0, R2
- MOVW $SYS_fcntl, R7
- SWI $0
- ORR $0x800, R0, R2 // O_NONBLOCK
- MOVW fd+0(FP), R0 // fd
- MOVW $4, R1 // F_SETFL
- MOVW $SYS_fcntl, R7
- SWI $0
- RET
-
// b __kuser_get_tls @ 0xffff0fe0
TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0
MOVW $0xffff0fe0, R0
diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s
index 1276c077d7..ca362ed552 100644
--- a/src/runtime/sys_linux_arm64.s
+++ b/src/runtime/sys_linux_arm64.s
@@ -113,15 +113,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R0, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R0
- MOVW $0, R1
- MOVW $SYS_pipe2, R8
- SVC
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVD $r+8(FP), R0
@@ -452,6 +443,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
BL (R11)
RET
+// Called from c-abi, R0: sig, R1: info, R2: cxt
TEXT runtime·sigtramp(SB),NOSPLIT,$192
// Save callee-save registers in the case of signal forwarding.
// Please refer to https://golang.org/issue/31827 .
@@ -511,9 +503,146 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192
RET
-TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
- MOVD $runtime·sigtramp(SB), R3
- B (R3)
+// Called from c-abi, R0: sig, R1: info, R2: cxt
+TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$192
+ // TODO(eric): In multiple places we need to save and restore the
+ // callee-saved registers, we can define a macro for this.
+ // Save callee-save registers because it's a callback from c code.
+ MOVD R19, 8*4(RSP)
+ MOVD R20, 8*5(RSP)
+ MOVD R21, 8*6(RSP)
+ MOVD R22, 8*7(RSP)
+ MOVD R23, 8*8(RSP)
+ MOVD R24, 8*9(RSP)
+ MOVD R25, 8*10(RSP)
+ MOVD R26, 8*11(RSP)
+ MOVD R27, 8*12(RSP)
+ MOVD g, 8*13(RSP)
+ MOVD R29, 8*14(RSP)
+ FMOVD F8, 8*15(RSP)
+ FMOVD F9, 8*16(RSP)
+ FMOVD F10, 8*17(RSP)
+ FMOVD F11, 8*18(RSP)
+ FMOVD F12, 8*19(RSP)
+ FMOVD F13, 8*20(RSP)
+ FMOVD F14, 8*21(RSP)
+ FMOVD F15, 8*22(RSP)
+
+ MOVW R0, 8(RSP) // sig
+ MOVD R1, 16(RSP) // info
+ MOVD R2, 24(RSP) // ctx
+ CALL runtime·sigprofNonGo(SB)
+
+ // Restore callee-save registers.
+ MOVD 8*4(RSP), R19
+ MOVD 8*5(RSP), R20
+ MOVD 8*6(RSP), R21
+ MOVD 8*7(RSP), R22
+ MOVD 8*8(RSP), R23
+ MOVD 8*9(RSP), R24
+ MOVD 8*10(RSP), R25
+ MOVD 8*11(RSP), R26
+ MOVD 8*12(RSP), R27
+ MOVD 8*13(RSP), g
+ MOVD 8*14(RSP), R29
+ FMOVD 8*15(RSP), F8
+ FMOVD 8*16(RSP), F9
+ FMOVD 8*17(RSP), F10
+ FMOVD 8*18(RSP), F11
+ FMOVD 8*19(RSP), F12
+ FMOVD 8*20(RSP), F13
+ FMOVD 8*21(RSP), F14
+ FMOVD 8*22(RSP), F15
+ RET
+
+// Called from c-abi, R0: sig, R1: info, R2: cxt
+TEXT runtime·cgoSigtramp(SB),NOSPLIT|NOFRAME,$0
+ // The stack unwinder, presumably written in C, may not be able to
+ // handle Go frame correctly. So, this function is NOFRAME, and we
+ // save/restore LR manually.
+ MOVD LR, R10
+ // Save R27, g because they will be clobbered,
+ // we need to restore them before jump to sigtramp.
+ MOVD R27, R11
+ MOVD g, R12
+
+ // If no traceback function, do usual sigtramp.
+ MOVD runtime·cgoTraceback(SB), R6
+ CBZ R6, sigtramp
+
+ // If no traceback support function, which means that
+ // runtime/cgo was not linked in, do usual sigtramp.
+ MOVD _cgo_callers(SB), R7
+ CBZ R7, sigtramp
+
+ // Figure out if we are currently in a cgo call.
+ // If not, just do usual sigtramp.
+ // first save R0, because runtime·load_g will clobber it.
+ MOVD R0, R8
+ // Set up g register.
+ CALL runtime·load_g(SB)
+ MOVD R8, R0
+
+ CBZ g, sigtrampnog // g == nil
+ MOVD g_m(g), R6
+ CBZ R6, sigtramp // g.m == nil
+ MOVW m_ncgo(R6), R7
+ CBZW R7, sigtramp // g.m.ncgo = 0
+ MOVD m_curg(R6), R8
+ CBZ R8, sigtramp // g.m.curg == nil
+ MOVD g_syscallsp(R8), R7
+ CBZ R7, sigtramp // g.m.curg.syscallsp == 0
+ MOVD m_cgoCallers(R6), R4 // R4 is the fifth arg in C calling convention.
+ CBZ R4, sigtramp // g.m.cgoCallers == nil
+ MOVW m_cgoCallersUse(R6), R8
+ CBNZW R8, sigtramp // g.m.cgoCallersUse != 0
+
+ // Jump to a function in runtime/cgo.
+ // That function, written in C, will call the user's traceback
+ // function with proper unwind info, and will then call back here.
+ // The first three arguments, and the fifth, are already in registers.
+ // Set the two remaining arguments now.
+ MOVD runtime·cgoTraceback(SB), R3
+ MOVD $runtime·sigtramp(SB), R5
+ MOVD _cgo_callers(SB), R13
+ MOVD R10, LR // restore
+ MOVD R11, R27
+ MOVD R12, g
+ B (R13)
+
+sigtramp:
+ MOVD R10, LR // restore
+ MOVD R11, R27
+ MOVD R12, g
+ B runtime·sigtramp(SB)
+
+sigtrampnog:
+ // Signal arrived on a non-Go thread. If this is SIGPROF, get a
+ // stack trace.
+ CMPW $27, R0 // 27 == SIGPROF
+ BNE sigtramp
+
+ // Lock sigprofCallersUse (cas from 0 to 1).
+ MOVW $1, R7
+ MOVD $runtime·sigprofCallersUse(SB), R8
+load_store_loop:
+ LDAXRW (R8), R9
+ CBNZW R9, sigtramp // Skip stack trace if already locked.
+ STLXRW R7, (R8), R9
+ CBNZ R9, load_store_loop
+
+ // Jump to the traceback function in runtime/cgo.
+ // It will call back to sigprofNonGo, which will ignore the
+ // arguments passed in registers.
+ // First three arguments to traceback function are in registers already.
+ MOVD runtime·cgoTraceback(SB), R3
+ MOVD $runtime·sigprofCallers(SB), R4
+ MOVD $runtime·sigprofNonGoWrapper<>(SB), R5
+ MOVD _cgo_callers(SB), R13
+ MOVD R10, LR // restore
+ MOVD R11, R27
+ MOVD R12, g
+ B (R13)
TEXT runtime·sysMmap(SB),NOSPLIT|NOFRAME,$0
MOVD addr+0(FP), R0
@@ -740,21 +869,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SVC
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVD $3, R1 // F_GETFL
- MOVD $0, R2
- MOVD $SYS_fcntl, R8
- SVC
- MOVD $0x800, R2 // O_NONBLOCK
- ORR R0, R2
- MOVW fd+0(FP), R0 // fd
- MOVD $4, R1 // F_SETFL
- MOVD $SYS_fcntl, R8
- SVC
- RET
-
// int access(const char *name, int mode)
TEXT runtime·access(SB),NOSPLIT,$0-20
MOVD $AT_FDCWD, R0
diff --git a/src/runtime/sys_linux_mips64x.s b/src/runtime/sys_linux_mips64x.s
index 0df2597993..3c7f0e7307 100644
--- a/src/runtime/sys_linux_mips64x.s
+++ b/src/runtime/sys_linux_mips64x.s
@@ -113,17 +113,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R2, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVV $r+0(FP), R4
- MOVV R0, R5
- MOVV $SYS_pipe2, R2
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVV $r+8(FP), R4
@@ -635,21 +624,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R4 // fd
- MOVV $3, R5 // F_GETFL
- MOVV $0, R6
- MOVV $SYS_fcntl, R2
- SYSCALL
- MOVW $0x80, R6 // O_NONBLOCK
- OR R2, R6
- MOVW fd+0(FP), R4 // fd
- MOVV $4, R5 // F_SETFL
- MOVV $SYS_fcntl, R2
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_mipsx.s b/src/runtime/sys_linux_mipsx.s
index 2207e9ab98..ab4e976ee4 100644
--- a/src/runtime/sys_linux_mipsx.s
+++ b/src/runtime/sys_linux_mipsx.s
@@ -19,7 +19,6 @@
#define SYS_close 4006
#define SYS_getpid 4020
#define SYS_kill 4037
-#define SYS_pipe 4042
#define SYS_brk 4045
#define SYS_fcntl 4055
#define SYS_mmap 4090
@@ -112,23 +111,6 @@ TEXT runtime·read(SB),NOSPLIT,$0-16
MOVW R2, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVW $SYS_pipe, R2
- SYSCALL
- BEQ R7, pipeok
- MOVW $-1, R1
- MOVW R1, r+0(FP)
- MOVW R1, w+4(FP)
- SUBU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+8(FP)
- RET
-pipeok:
- MOVW R2, r+0(FP)
- MOVW R3, w+4(FP)
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R4
@@ -559,21 +541,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0-4
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R4 // fd
- MOVW $3, R5 // F_GETFL
- MOVW $0, R6
- MOVW $SYS_fcntl, R2
- SYSCALL
- MOVW $0x80, R6 // O_NONBLOCK
- OR R2, R6
- MOVW fd+0(FP), R4 // fd
- MOVW $4, R5 // F_SETFL
- MOVW $SYS_fcntl, R2
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT,$0-4
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s
index dc3d89fae7..48f9334795 100644
--- a/src/runtime/sys_linux_ppc64x.s
+++ b/src/runtime/sys_linux_ppc64x.s
@@ -20,7 +20,6 @@
#define SYS_close 6
#define SYS_getpid 20
#define SYS_kill 37
-#define SYS_pipe 42
#define SYS_brk 45
#define SYS_fcntl 55
#define SYS_mmap 90
@@ -104,13 +103,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R3, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- ADD $FIXED_FRAME, R1, R3
- SYSCALL $SYS_pipe
- MOVW R3, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
ADD $FIXED_FRAME+8, R1, R3
@@ -933,18 +925,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SYSCALL $SYS_fcntl
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R3 // fd
- MOVD $3, R4 // F_GETFL
- MOVD $0, R5
- SYSCALL $SYS_fcntl
- OR $0x800, R3, R5 // O_NONBLOCK
- MOVW fd+0(FP), R3 // fd
- MOVD $4, R4 // F_SETFL
- SYSCALL $SYS_fcntl
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_riscv64.s b/src/runtime/sys_linux_riscv64.s
index a3da46d136..8dde29eb92 100644
--- a/src/runtime/sys_linux_riscv64.s
+++ b/src/runtime/sys_linux_riscv64.s
@@ -118,15 +118,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW A0, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOV $r+0(FP), A0
- MOV ZERO, A1
- MOV $SYS_pipe2, A7
- ECALL
- MOVW A0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOV $r+8(FP), A0
@@ -635,21 +626,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
ECALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), A0 // fd
- MOV $3, A1 // F_GETFL
- MOV $0, A2
- MOV $SYS_fcntl, A7
- ECALL
- MOV $0x800, A2 // O_NONBLOCK
- OR A0, A2
- MOVW fd+0(FP), A0 // fd
- MOV $4, A1 // F_SETFL
- MOV $SYS_fcntl, A7
- ECALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_s390x.s b/src/runtime/sys_linux_s390x.s
index 886add8b54..03ec7f03fd 100644
--- a/src/runtime/sys_linux_s390x.s
+++ b/src/runtime/sys_linux_s390x.s
@@ -16,7 +16,6 @@
#define SYS_close 6
#define SYS_getpid 20
#define SYS_kill 37
-#define SYS_pipe 42
#define SYS_brk 45
#define SYS_fcntl 55
#define SYS_mmap 90
@@ -103,14 +102,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R2, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R2
- MOVW $SYS_pipe, R1
- SYSCALL
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2() (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVD $r+8(FP), R2
@@ -497,21 +488,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R2 // fd
- MOVD $3, R3 // F_GETFL
- XOR R4, R4
- MOVW $SYS_fcntl, R1
- SYSCALL
- MOVD $0x800, R4 // O_NONBLOCK
- OR R2, R4
- MOVW fd+0(FP), R2 // fd
- MOVD $4, R3 // F_SETFL
- MOVW $SYS_fcntl, R1
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s
index 8a33894892..b7d4645af1 100644
--- a/src/runtime/sys_netbsd_386.s
+++ b/src/runtime/sys_netbsd_386.s
@@ -87,21 +87,6 @@ TEXT runtime·read(SB),NOSPLIT,$-4
MOVL AX, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- INT $0x80
- JCC pipeok
- MOVL $-1, r+0(FP)
- MOVL $-1, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-pipeok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$12-16
MOVL $453, AX
@@ -484,20 +469,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32
JAE 2(PC)
NEGL AX
RET
-
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$16-4
- MOVL $92, AX // fcntl
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $3, 8(SP) // F_GETFL
- MOVL $0, 12(SP)
- INT $0x80
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $4, 8(SP) // F_SETFL
- ORL $4, AX // O_NONBLOCK
- MOVL AX, 12(SP)
- MOVL $92, AX // fcntl
- INT $0x80
- RET
diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s
index 02f5b4ba3b..41eddf3735 100644
--- a/src/runtime/sys_netbsd_amd64.s
+++ b/src/runtime/sys_netbsd_amd64.s
@@ -163,21 +163,6 @@ TEXT runtime·read(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- SYSCALL
- JCC pipeok
- MOVL $-1, r+0(FP)
- MOVL $-1, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-pipeok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
@@ -449,18 +434,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVL $SYS_fcntl, AX
SYSCALL
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
- RET
diff --git a/src/runtime/sys_netbsd_arm.s b/src/runtime/sys_netbsd_arm.s
index 3a763b2a6a..bbca040994 100644
--- a/src/runtime/sys_netbsd_arm.s
+++ b/src/runtime/sys_netbsd_arm.s
@@ -96,22 +96,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
MOVW R0, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- SWI $0xa0002a
- BCC pipeok
- MOVW $-1,R2
- MOVW R2, r+0(FP)
- MOVW R2, w+4(FP)
- MOVW R0, errno+8(FP)
- RET
-pipeok:
- MOVW $0, R2
- MOVW R0, r+0(FP)
- MOVW R1, w+4(FP)
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R0
@@ -422,18 +406,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $SYS_fcntl
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVW $3, R1 // F_GETFL
- MOVW $0, R2
- SWI $0xa0005c // sys_fcntl
- ORR $0x4, R0, R2 // O_NONBLOCK
- MOVW fd+0(FP), R0 // fd
- MOVW $4, R1 // F_SETFL
- SWI $0xa0005c // sys_fcntl
- RET
-
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
B runtime·armPublicationBarrier(SB)
diff --git a/src/runtime/sys_netbsd_arm64.s b/src/runtime/sys_netbsd_arm64.s
index 8a0496e807..f7cce57c2d 100644
--- a/src/runtime/sys_netbsd_arm64.s
+++ b/src/runtime/sys_netbsd_arm64.s
@@ -154,17 +154,6 @@ ok:
MOVW R0, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- ADD $8, RSP, R0
- MOVW $0, R1
- SVC $SYS_pipe2
- BCC pipeok
- NEG R0, R0
-pipeok:
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
ADD $16, RSP, R0
@@ -466,16 +455,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVW $FD_CLOEXEC, R2
SVC $SYS_fcntl
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $F_GETFL, R1 // arg 2 - cmd
- MOVD $0, R2 // arg 3
- SVC $SYS_fcntl
- MOVD $O_NONBLOCK, R2
- EOR R0, R2 // arg 3 - flags
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $F_SETFL, R1 // arg 2 - cmd
- SVC $SYS_fcntl
- RET
diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go
index 4d50b4f6b1..d174d87a49 100644
--- a/src/runtime/sys_openbsd2.go
+++ b/src/runtime/sys_openbsd2.go
@@ -111,10 +111,6 @@ func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
}
func write_trampoline()
-func pipe() (r, w int32, errno int32) {
- return pipe2(0)
-}
-
func pipe2(flags int32) (r, w int32, errno int32) {
var p [2]int32
args := struct {
@@ -258,12 +254,6 @@ func closeonexec(fd int32) {
fcntl(fd, _F_SETFD, _FD_CLOEXEC)
}
-//go:nosplit
-func setNonblock(fd int32) {
- flags := fcntl(fd, _F_GETFL, 0)
- fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
-}
-
// Tell the linker that the libc_* functions are to be found
// in a system library, with the libc_ prefix missing.
diff --git a/src/runtime/sys_openbsd_mips64.s b/src/runtime/sys_openbsd_mips64.s
index f8ae8e7c30..3b18bdda7a 100644
--- a/src/runtime/sys_openbsd_mips64.s
+++ b/src/runtime/sys_openbsd_mips64.s
@@ -64,17 +64,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
MOVW R2, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVV $r+0(FP), R4
- MOVW $0, R5
- MOVV $101, R2 // sys_pipe2
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVV $r+8(FP), R4
@@ -383,18 +372,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVV $92, R2 // sys_fcntl
SYSCALL
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R4 // arg 1 - fd
- MOVV $3, R5 // arg 2 - cmd (F_GETFL)
- MOVV $0, R6 // arg 3
- MOVV $92, R2 // sys_fcntl
- SYSCALL
- MOVV $4, R6 // O_NONBLOCK
- OR R2, R6 // arg 3 - flags
- MOVW fd+0(FP), R4 // arg 1 - fd
- MOVV $4, R5 // arg 2 - cmd (F_SETFL)
- MOVV $92, R2 // sys_fcntl
- SYSCALL
- RET
diff --git a/src/runtime/testdata/testprogcgo/aprof.go b/src/runtime/testdata/testprogcgo/aprof.go
index c70d6333bb..16870144dd 100644
--- a/src/runtime/testdata/testprogcgo/aprof.go
+++ b/src/runtime/testdata/testprogcgo/aprof.go
@@ -10,7 +10,7 @@ package main
// This is a regression test for issue 14599, where profiling fails when the
// function is the first C function. Exported functions are the first C
// functions, so we use an exported function. Exported functions are created in
-// lexigraphical order of source files, so this file is named aprof.go to
+// lexicographical order of source files, so this file is named aprof.go to
// ensure its function is first.
// extern void CallGoNop();
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 71a29d4316..8f60de2b05 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -229,7 +229,7 @@ func StartTrace() error {
gp.traceseq = 0
gp.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
+ id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
}
if status == _Gwaiting {
@@ -1071,7 +1071,7 @@ func traceGoCreate(newg *g, pc uintptr) {
newg.traceseq = 0
newg.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
+ id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
}
@@ -1244,3 +1244,17 @@ func trace_userLog(id uint64, category, message string) {
traceReleaseBuffer(pid)
}
+
+// the start PC of a goroutine for tracing purposes. If pc is a wrapper,
+// it returns the PC of the wrapped function. Otherwise it returns pc.
+func startPCforTrace(pc uintptr) uintptr {
+ f := findfunc(pc)
+ if !f.valid() {
+ return pc // should not happen, but don't care
+ }
+ w := funcdata(f, _FUNCDATA_WrapInfo)
+ if w == nil {
+ return pc // not a wrapper
+ }
+ return f.datap.textAddr(*(*uint32)(w))
+}
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 73bd0e11a9..0cdd53cc93 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -1229,9 +1229,9 @@ func isSystemGoroutine(gp *g, fixed bool) bool {
//
// On all platforms, the traceback function is invoked when a call from
// Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le,
-// and freebsd/amd64, the traceback function is also invoked when a
-// signal is received by a thread that is executing a cgo call. The
-// traceback function should not make assumptions about when it is
+// linux/arm64, and freebsd/amd64, the traceback function is also invoked
+// when a signal is received by a thread that is executing a cgo call.
+// The traceback function should not make assumptions about when it is
// called, as future versions of Go may make additional calls.
//
// The symbolizer function will be called with a single argument, a
diff --git a/src/sort/gen_sort_variants.go b/src/sort/gen_sort_variants.go
new file mode 100644
index 0000000000..5f817221e1
--- /dev/null
+++ b/src/sort/gen_sort_variants.go
@@ -0,0 +1,526 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// This program is run via "go generate" (via a directive in sort.go)
+// to generate implementation variants of the underlying sorting algorithm.
+// When passed the -generic flag it generates generic variants of sorting;
+// otherwise it generates the non-generic variants used by the sort package.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+ "text/template"
+)
+
+type Variant struct {
+ // Name is the variant name: should be unique among variants.
+ Name string
+
+ // Path is the file path into which the generator will emit the code for this
+ // variant.
+ Path string
+
+ // Package is the package this code will be emitted into.
+ Package string
+
+ // Imports is the imports needed for this package.
+ Imports string
+
+ // FuncSuffix is appended to all function names in this variant's code. All
+ // suffixes should be unique within a package.
+ FuncSuffix string
+
+ // DataType is the type of the data parameter of functions in this variant's
+ // code.
+ DataType string
+
+ // TypeParam is the optional type parameter for the function.
+ TypeParam string
+
+ // ExtraParam is an extra parameter to pass to the function. Should begin with
+ // ", " to separate from other params.
+ ExtraParam string
+
+ // ExtraArg is an extra argument to pass to calls between functions; typically
+ // it invokes ExtraParam. Should begin with ", " to separate from other args.
+ ExtraArg string
+
+ // Funcs is a map of functions used from within the template. The following
+ // functions are expected to exist:
+ //
+ // Less (name, i, j):
+ // emits a comparison expression that checks if the value `name` at
+ // index `i` is smaller than at index `j`.
+ //
+ // Swap (name, i, j):
+ // emits a statement that performs a data swap between elements `i` and
+ // `j` of the value `name`.
+ Funcs template.FuncMap
+}
+
+func main() {
+ genGeneric := flag.Bool("generic", false, "generate generic versions")
+ flag.Parse()
+
+ if *genGeneric {
+ generate(&Variant{
+ Name: "generic_ordered",
+ Path: "zsortordered.go",
+ Package: "slices",
+ Imports: "import \"constraints\"\n",
+ FuncSuffix: "Ordered",
+ TypeParam: "[Elem constraints.Ordered]",
+ ExtraParam: "",
+ ExtraArg: "",
+ DataType: "[]Elem",
+ Funcs: template.FuncMap{
+ "Less": func(name, i, j string) string {
+ return fmt.Sprintf("(%s[%s] < %s[%s])", name, i, name, j)
+ },
+ "Swap": func(name, i, j string) string {
+ return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
+ },
+ },
+ })
+
+ generate(&Variant{
+ Name: "generic_func",
+ Path: "zsortanyfunc.go",
+ Package: "slices",
+ FuncSuffix: "LessFunc",
+ TypeParam: "[Elem any]",
+ ExtraParam: ", less func(a, b Elem) bool",
+ ExtraArg: ", less",
+ DataType: "[]Elem",
+ Funcs: template.FuncMap{
+ "Less": func(name, i, j string) string {
+ return fmt.Sprintf("less(%s[%s], %s[%s])", name, i, name, j)
+ },
+ "Swap": func(name, i, j string) string {
+ return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
+ },
+ },
+ })
+ } else {
+ generate(&Variant{
+ Name: "interface",
+ Path: "zsortinterface.go",
+ Package: "sort",
+ Imports: "",
+ FuncSuffix: "",
+ TypeParam: "",
+ ExtraParam: "",
+ ExtraArg: "",
+ DataType: "Interface",
+ Funcs: template.FuncMap{
+ "Less": func(name, i, j string) string {
+ return fmt.Sprintf("%s.Less(%s, %s)", name, i, j)
+ },
+ "Swap": func(name, i, j string) string {
+ return fmt.Sprintf("%s.Swap(%s, %s)", name, i, j)
+ },
+ },
+ })
+
+ generate(&Variant{
+ Name: "func",
+ Path: "zsortfunc.go",
+ Package: "sort",
+ Imports: "",
+ FuncSuffix: "_func",
+ TypeParam: "",
+ ExtraParam: "",
+ ExtraArg: "",
+ DataType: "lessSwap",
+ Funcs: template.FuncMap{
+ "Less": func(name, i, j string) string {
+ return fmt.Sprintf("%s.Less(%s, %s)", name, i, j)
+ },
+ "Swap": func(name, i, j string) string {
+ return fmt.Sprintf("%s.Swap(%s, %s)", name, i, j)
+ },
+ },
+ })
+ }
+}
+
+// generate generates the code for variant `v` into a file named by `v.Path`.
+func generate(v *Variant) {
+ // Parse templateCode anew for each variant because Parse requires Funcs to be
+ // registered, and it helps type-check the funcs.
+ tmpl, err := template.New("gen").Funcs(v.Funcs).Parse(templateCode)
+ if err != nil {
+ log.Fatal("template Parse:", err)
+ }
+
+ var out bytes.Buffer
+ err = tmpl.Execute(&out, v)
+ if err != nil {
+ log.Fatal("template Execute:", err)
+ }
+
+ formatted, err := format.Source(out.Bytes())
+ if err != nil {
+ log.Fatal("format:", err)
+ }
+
+ if err := os.WriteFile(v.Path, formatted, 0644); err != nil {
+ log.Fatal("WriteFile:", err)
+ }
+}
+
+var templateCode = `// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package {{.Package}}
+
+{{.Imports}}
+
+// insertionSort{{.FuncSuffix}} sorts data[a:b] using insertion sort.
+func insertionSort{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, a, b int {{.ExtraParam}}) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && {{Less "data" "j" "j-1"}}; j-- {
+ {{Swap "data" "j" "j-1"}}
+ }
+ }
+}
+
+// siftDown{{.FuncSuffix}} implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDown{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, lo, hi, first int {{.ExtraParam}}) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && {{Less "data" "first+child" "first+child+1"}} {
+ child++
+ }
+ if !{{Less "data" "first+root" "first+child"}} {
+ return
+ }
+ {{Swap "data" "first+root" "first+child"}}
+ root = child
+ }
+}
+
+func heapSort{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, a, b int {{.ExtraParam}}) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown{{.FuncSuffix}}(data, i, hi, first {{.ExtraArg}})
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ {{Swap "data" "first" "first+i"}}
+ siftDown{{.FuncSuffix}}(data, lo, i, first {{.ExtraArg}})
+ }
+}
+
+// Quicksort, loosely following Bentley and McIlroy,
+// "Engineering a Sort Function" SP&E November 1993.
+
+// medianOfThree{{.FuncSuffix}} moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, m1, m0, m2 int {{.ExtraParam}}) {
+ // sort 3 elements
+ if {{Less "data" "m1" "m0"}} {
+ {{Swap "data" "m1" "m0"}}
+ }
+ // data[m0] <= data[m1]
+ if {{Less "data" "m2" "m1"}} {
+ {{Swap "data" "m2" "m1"}}
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if {{Less "data" "m1" "m0"}} {
+ {{Swap "data" "m1" "m0"}}
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
+
+func swapRange{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, a, b, n int {{.ExtraParam}}) {
+ for i := 0; i < n; i++ {
+ {{Swap "data" "a+i" "b+i"}}
+ }
+}
+
+func doPivot{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, lo, hi int {{.ExtraParam}}) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's "Ninther" median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree{{.FuncSuffix}}(data, lo, lo+s, lo+2*s {{.ExtraArg}})
+ medianOfThree{{.FuncSuffix}}(data, m, m-s, m+s {{.ExtraArg}})
+ medianOfThree{{.FuncSuffix}}(data, hi-1, hi-1-s, hi-1-2*s {{.ExtraArg}})
+ }
+ medianOfThree{{.FuncSuffix}}(data, lo, m, hi-1 {{.ExtraArg}})
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && {{Less "data" "a" "pivot"}}; a++ {
+ }
+ b := a
+ for {
+ for ; b < c && !{{Less "data" "pivot" "b"}}; b++ { // data[b] <= pivot
+ }
+ for ; b < c && {{Less "data" "pivot" "c-1"}}; c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ {{Swap "data" "b" "c-1"}}
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if !{{Less "data" "pivot" "hi-1"}} { // data[hi-1] = pivot
+ {{Swap "data" "c" "hi-1"}}
+ c++
+ dups++
+ }
+ if !{{Less "data" "b-1" "pivot"}} { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if !{{Less "data" "m" "pivot"}} { // data[m] = pivot
+ {{Swap "data" "m" "b-1"}}
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && !{{Less "data" "b-1" "pivot"}}; b-- { // data[b] == pivot
+ }
+ for ; a < b && {{Less "data" "a" "pivot"}}; a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ {{Swap "data" "a" "b-1"}}
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ {{Swap "data" "pivot" "b-1"}}
+ return b - 1, c
+}
+
+func quickSort{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, a, b, maxDepth int {{.ExtraParam}}) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort{{.FuncSuffix}}(data, a, b {{.ExtraArg}})
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot{{.FuncSuffix}}(data, a, b {{.ExtraArg}})
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort{{.FuncSuffix}}(data, a, mlo, maxDepth {{.ExtraArg}})
+ a = mhi // i.e., quickSort{{.FuncSuffix}}(data, mhi, b)
+ } else {
+ quickSort{{.FuncSuffix}}(data, mhi, b, maxDepth {{.ExtraArg}})
+ b = mlo // i.e., quickSort{{.FuncSuffix}}(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if {{Less "data" "i" "i-6"}} {
+ {{Swap "data" "i" "i-6"}}
+ }
+ }
+ insertionSort{{.FuncSuffix}}(data, a, b {{.ExtraArg}})
+ }
+}
+
+func stable{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, n int {{.ExtraParam}}) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSort{{.FuncSuffix}}(data, a, b {{.ExtraArg}})
+ a = b
+ b += blockSize
+ }
+ insertionSort{{.FuncSuffix}}(data, a, n {{.ExtraArg}})
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMerge{{.FuncSuffix}}(data, a, a+blockSize, b {{.ExtraArg}})
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMerge{{.FuncSuffix}}(data, a, m, n {{.ExtraArg}})
+ }
+ blockSize *= 2
+ }
+}
+
+// symMerge{{.FuncSuffix}} merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMerge{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, a, m, b int {{.ExtraParam}}) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if {{Less "data" "h" "a"}} {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ {{Swap "data" "k" "k+1"}}
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !{{Less "data" "m" "h"}} {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ {{Swap "data" "k" "k-1"}}
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !{{Less "data" "p-c" "c"}} {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotate{{.FuncSuffix}}(data, start, m, end {{.ExtraArg}})
+ }
+ if a < start && start < mid {
+ symMerge{{.FuncSuffix}}(data, a, start, mid {{.ExtraArg}})
+ }
+ if mid < end && end < b {
+ symMerge{{.FuncSuffix}}(data, mid, end, b {{.ExtraArg}})
+ }
+}
+
+// rotate{{.FuncSuffix}} rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotate{{.FuncSuffix}}{{.TypeParam}}(data {{.DataType}}, a, m, b int {{.ExtraParam}}) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRange{{.FuncSuffix}}(data, m-i, m, j {{.ExtraArg}})
+ i -= j
+ } else {
+ swapRange{{.FuncSuffix}}(data, m-i, m+j-i, i {{.ExtraArg}})
+ j -= i
+ }
+ }
+ // i == j
+ swapRange{{.FuncSuffix}}(data, m-i, m, i {{.ExtraArg}})
+}
+`
diff --git a/src/sort/genzfunc.go b/src/sort/genzfunc.go
deleted file mode 100644
index ed04e33568..0000000000
--- a/src/sort/genzfunc.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build ignore
-// +build ignore
-
-// This program is run via "go generate" (via a directive in sort.go)
-// to generate zfuncversion.go.
-//
-// It copies sort.go to zfuncversion.go, only retaining funcs which
-// take a "data Interface" parameter, and renaming each to have a
-// "_func" suffix and taking a "data lessSwap" instead. It then rewrites
-// each internal function call to the appropriate _func variants.
-
-package main
-
-import (
- "bytes"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "log"
- "os"
- "regexp"
-)
-
-var fset = token.NewFileSet()
-
-func main() {
- af, err := parser.ParseFile(fset, "sort.go", nil, 0)
- if err != nil {
- log.Fatal(err)
- }
- af.Doc = nil
- af.Imports = nil
- af.Comments = nil
-
- var newDecl []ast.Decl
- for _, d := range af.Decls {
- fd, ok := d.(*ast.FuncDecl)
- if !ok {
- continue
- }
- if fd.Recv != nil || fd.Name.IsExported() {
- continue
- }
- typ := fd.Type
- if len(typ.Params.List) < 1 {
- continue
- }
- arg0 := typ.Params.List[0]
- arg0Name := arg0.Names[0].Name
- arg0Type := arg0.Type.(*ast.Ident)
- if arg0Name != "data" || arg0Type.Name != "Interface" {
- continue
- }
- arg0Type.Name = "lessSwap"
-
- newDecl = append(newDecl, fd)
- }
- af.Decls = newDecl
- ast.Walk(visitFunc(rewriteCalls), af)
-
- var out bytes.Buffer
- if err := format.Node(&out, fset, af); err != nil {
- log.Fatalf("format.Node: %v", err)
- }
-
- // Get rid of blank lines after removal of comments.
- src := regexp.MustCompile(`\n{2,}`).ReplaceAll(out.Bytes(), []byte("\n"))
-
- // Add comments to each func, for the lost reader.
- // This is so much easier than adding comments via the AST
- // and trying to get position info correct.
- src = regexp.MustCompile(`(?m)^func (\w+)`).ReplaceAll(src, []byte("\n// Auto-generated variant of sort.go:$1\nfunc ${1}_func"))
-
- // Final gofmt.
- src, err = format.Source(src)
- if err != nil {
- log.Fatalf("format.Source: %v on\n%s", err, src)
- }
-
- out.Reset()
- out.WriteString(`// Code generated from sort.go using genzfunc.go; DO NOT EDIT.
-
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-`)
- out.Write(src)
-
- const target = "zfuncversion.go"
- if err := os.WriteFile(target, out.Bytes(), 0644); err != nil {
- log.Fatal(err)
- }
-}
-
-type visitFunc func(ast.Node) ast.Visitor
-
-func (f visitFunc) Visit(n ast.Node) ast.Visitor { return f(n) }
-
-func rewriteCalls(n ast.Node) ast.Visitor {
- ce, ok := n.(*ast.CallExpr)
- if ok {
- rewriteCall(ce)
- }
- return visitFunc(rewriteCalls)
-}
-
-func rewriteCall(ce *ast.CallExpr) {
- ident, ok := ce.Fun.(*ast.Ident)
- if !ok {
- // e.g. skip SelectorExpr (data.Less(..) calls)
- return
- }
- // skip casts
- if ident.Name == "int" || ident.Name == "uint" {
- return
- }
- if len(ce.Args) < 1 {
- return
- }
- ident.Name += "_func"
-}
diff --git a/src/sort/sort.go b/src/sort/sort.go
index 749310764a..2c197afc03 100644
--- a/src/sort/sort.go
+++ b/src/sort/sort.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:generate go run genzfunc.go
+//go:generate go run gen_sort_variants.go
// Package sort provides primitives for sorting slices and user-defined collections.
package sort
@@ -34,195 +34,6 @@ type Interface interface {
Swap(i, j int)
}
-// insertionSort sorts data[a:b] using insertion sort.
-func insertionSort(data Interface, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && data.Less(j, j-1); j-- {
- data.Swap(j, j-1)
- }
- }
-}
-
-// siftDown implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDown(data Interface, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && data.Less(first+child, first+child+1) {
- child++
- }
- if !data.Less(first+root, first+child) {
- return
- }
- data.Swap(first+root, first+child)
- root = child
- }
-}
-
-func heapSort(data Interface, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDown(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data.Swap(first, first+i)
- siftDown(data, lo, i, first)
- }
-}
-
-// Quicksort, loosely following Bentley and McIlroy,
-// ``Engineering a Sort Function,'' SP&E November 1993.
-
-// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
-func medianOfThree(data Interface, m1, m0, m2 int) {
- // sort 3 elements
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- // data[m0] <= data[m1]
- if data.Less(m2, m1) {
- data.Swap(m2, m1)
- // data[m0] <= data[m2] && data[m1] < data[m2]
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- }
- // now data[m0] <= data[m1] <= data[m2]
-}
-
-func swapRange(data Interface, a, b, n int) {
- for i := 0; i < n; i++ {
- data.Swap(a+i, b+i)
- }
-}
-
-func doPivot(data Interface, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
- if hi-lo > 40 {
- // Tukey's ``Ninther,'' median of three medians of three.
- s := (hi - lo) / 8
- medianOfThree(data, lo, lo+s, lo+2*s)
- medianOfThree(data, m, m-s, m+s)
- medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThree(data, lo, m, hi-1)
-
- // Invariants are:
- // data[lo] = pivot (set up by ChoosePivot)
- // data[lo < i < a] < pivot
- // data[a <= i < b] <= pivot
- // data[b <= i < c] unexamined
- // data[c <= i < hi-1] > pivot
- // data[hi-1] >= pivot
- pivot := lo
- a, c := lo+1, hi-1
-
- for ; a < c && data.Less(a, pivot); a++ {
- }
- b := a
- for {
- for ; b < c && !data.Less(pivot, b); b++ { // data[b] <= pivot
- }
- for ; b < c && data.Less(pivot, c-1); c-- { // data[c-1] > pivot
- }
- if b >= c {
- break
- }
- // data[b] > pivot; data[c-1] <= pivot
- data.Swap(b, c-1)
- b++
- c--
- }
- // If hi-c<3 then there are duplicates (by property of median of nine).
- // Let's be a bit more conservative, and set border to 5.
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- // Lets test some points for equality to pivot
- dups := 0
- if !data.Less(pivot, hi-1) { // data[hi-1] = pivot
- data.Swap(c, hi-1)
- c++
- dups++
- }
- if !data.Less(b-1, pivot) { // data[b-1] = pivot
- b--
- dups++
- }
- // m-lo = (hi-lo)/2 > 6
- // b-lo > (hi-lo)*3/4-1 > 8
- // ==> m < b ==> data[m] <= pivot
- if !data.Less(m, pivot) { // data[m] = pivot
- data.Swap(m, b-1)
- b--
- dups++
- }
- // if at least 2 points are equal to pivot, assume skewed distribution
- protect = dups > 1
- }
- if protect {
- // Protect against a lot of duplicates
- // Add invariant:
- // data[a <= i < b] unexamined
- // data[b <= i < c] = pivot
- for {
- for ; a < b && !data.Less(b-1, pivot); b-- { // data[b] == pivot
- }
- for ; a < b && data.Less(a, pivot); a++ { // data[a] < pivot
- }
- if a >= b {
- break
- }
- // data[a] == pivot; data[b-1] < pivot
- data.Swap(a, b-1)
- a++
- b--
- }
- }
- // Swap pivot into middle
- data.Swap(pivot, b-1)
- return b - 1, c
-}
-
-func quickSort(data Interface, a, b, maxDepth int) {
- for b-a > 12 { // Use ShellSort for slices <= 12 elements
- if maxDepth == 0 {
- heapSort(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivot(data, a, b)
- // Avoiding recursion on the larger subproblem guarantees
- // a stack depth of at most lg(b-a).
- if mlo-a < b-mhi {
- quickSort(data, a, mlo, maxDepth)
- a = mhi // i.e., quickSort(data, mhi, b)
- } else {
- quickSort(data, mhi, b, maxDepth)
- b = mlo // i.e., quickSort(data, a, mlo)
- }
- }
- if b-a > 1 {
- // Do ShellSort pass with gap 6
- // It could be written in this simplified form cause b-a <= 12
- for i := a + 6; i < b; i++ {
- if data.Less(i, i-6) {
- data.Swap(i, i-6)
- }
- }
- insertionSort(data, a, b)
- }
-}
-
// Sort sorts data in ascending order as determined by the Less method.
// It makes one call to data.Len to determine n and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
@@ -379,152 +190,6 @@ func Stable(data Interface) {
stable(data, data.Len())
}
-func stable(data Interface, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSort(data, a, b)
- a = b
- b += blockSize
- }
- insertionSort(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMerge(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMerge(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMerge merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMerge(data Interface, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if data.Less(h, a) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data.Swap(k, k+1)
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !data.Less(m, h) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data.Swap(k, k-1)
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !data.Less(p-c, c) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotate(data, start, m, end)
- }
- if a < start && start < mid {
- symMerge(data, a, start, mid)
- }
- if mid < end && end < b {
- symMerge(data, mid, end, b)
- }
-}
-
-// rotate rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotate(data Interface, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRange(data, m-i, m, j)
- i -= j
- } else {
- swapRange(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRange(data, m-i, m, i)
-}
-
/*
Complexity of Stable Sorting
diff --git a/src/sort/zfuncversion.go b/src/sort/zfuncversion.go
deleted file mode 100644
index 30067cbe07..0000000000
--- a/src/sort/zfuncversion.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Code generated from sort.go using genzfunc.go; DO NOT EDIT.
-
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sort
-
-// Auto-generated variant of sort.go:insertionSort
-func insertionSort_func(data lessSwap, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && data.Less(j, j-1); j-- {
- data.Swap(j, j-1)
- }
- }
-}
-
-// Auto-generated variant of sort.go:siftDown
-func siftDown_func(data lessSwap, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && data.Less(first+child, first+child+1) {
- child++
- }
- if !data.Less(first+root, first+child) {
- return
- }
- data.Swap(first+root, first+child)
- root = child
- }
-}
-
-// Auto-generated variant of sort.go:heapSort
-func heapSort_func(data lessSwap, a, b int) {
- first := a
- lo := 0
- hi := b - a
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDown_func(data, i, hi, first)
- }
- for i := hi - 1; i >= 0; i-- {
- data.Swap(first, first+i)
- siftDown_func(data, lo, i, first)
- }
-}
-
-// Auto-generated variant of sort.go:medianOfThree
-func medianOfThree_func(data lessSwap, m1, m0, m2 int) {
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- if data.Less(m2, m1) {
- data.Swap(m2, m1)
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- }
-}
-
-// Auto-generated variant of sort.go:swapRange
-func swapRange_func(data lessSwap, a, b, n int) {
- for i := 0; i < n; i++ {
- data.Swap(a+i, b+i)
- }
-}
-
-// Auto-generated variant of sort.go:doPivot
-func doPivot_func(data lessSwap, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1)
- if hi-lo > 40 {
- s := (hi - lo) / 8
- medianOfThree_func(data, lo, lo+s, lo+2*s)
- medianOfThree_func(data, m, m-s, m+s)
- medianOfThree_func(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThree_func(data, lo, m, hi-1)
- pivot := lo
- a, c := lo+1, hi-1
- for ; a < c && data.Less(a, pivot); a++ {
- }
- b := a
- for {
- for ; b < c && !data.Less(pivot, b); b++ {
- }
- for ; b < c && data.Less(pivot, c-1); c-- {
- }
- if b >= c {
- break
- }
- data.Swap(b, c-1)
- b++
- c--
- }
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- dups := 0
- if !data.Less(pivot, hi-1) {
- data.Swap(c, hi-1)
- c++
- dups++
- }
- if !data.Less(b-1, pivot) {
- b--
- dups++
- }
- if !data.Less(m, pivot) {
- data.Swap(m, b-1)
- b--
- dups++
- }
- protect = dups > 1
- }
- if protect {
- for {
- for ; a < b && !data.Less(b-1, pivot); b-- {
- }
- for ; a < b && data.Less(a, pivot); a++ {
- }
- if a >= b {
- break
- }
- data.Swap(a, b-1)
- a++
- b--
- }
- }
- data.Swap(pivot, b-1)
- return b - 1, c
-}
-
-// Auto-generated variant of sort.go:quickSort
-func quickSort_func(data lessSwap, a, b, maxDepth int) {
- for b-a > 12 {
- if maxDepth == 0 {
- heapSort_func(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivot_func(data, a, b)
- if mlo-a < b-mhi {
- quickSort_func(data, a, mlo, maxDepth)
- a = mhi
- } else {
- quickSort_func(data, mhi, b, maxDepth)
- b = mlo
- }
- }
- if b-a > 1 {
- for i := a + 6; i < b; i++ {
- if data.Less(i, i-6) {
- data.Swap(i, i-6)
- }
- }
- insertionSort_func(data, a, b)
- }
-}
-
-// Auto-generated variant of sort.go:stable
-func stable_func(data lessSwap, n int) {
- blockSize := 20
- a, b := 0, blockSize
- for b <= n {
- insertionSort_func(data, a, b)
- a = b
- b += blockSize
- }
- insertionSort_func(data, a, n)
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMerge_func(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMerge_func(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// Auto-generated variant of sort.go:symMerge
-func symMerge_func(data lessSwap, a, m, b int) {
- if m-a == 1 {
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if data.Less(h, a) {
- i = h + 1
- } else {
- j = h
- }
- }
- for k := a; k < i-1; k++ {
- data.Swap(k, k+1)
- }
- return
- }
- if b-m == 1 {
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !data.Less(m, h) {
- i = h + 1
- } else {
- j = h
- }
- }
- for k := m; k > i; k-- {
- data.Swap(k, k-1)
- }
- return
- }
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
- for start < r {
- c := int(uint(start+r) >> 1)
- if !data.Less(p-c, c) {
- start = c + 1
- } else {
- r = c
- }
- }
- end := n - start
- if start < m && m < end {
- rotate_func(data, start, m, end)
- }
- if a < start && start < mid {
- symMerge_func(data, a, start, mid)
- }
- if mid < end && end < b {
- symMerge_func(data, mid, end, b)
- }
-}
-
-// Auto-generated variant of sort.go:rotate
-func rotate_func(data lessSwap, a, m, b int) {
- i := m - a
- j := b - m
- for i != j {
- if i > j {
- swapRange_func(data, m-i, m, j)
- i -= j
- } else {
- swapRange_func(data, m-i, m+j-i, i)
- j -= i
- }
- }
- swapRange_func(data, m-i, m, i)
-}
diff --git a/src/sort/zsortfunc.go b/src/sort/zsortfunc.go
new file mode 100644
index 0000000000..80c8a77995
--- /dev/null
+++ b/src/sort/zsortfunc.go
@@ -0,0 +1,342 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sort
+
+// insertionSort_func sorts data[a:b] using insertion sort.
+func insertionSort_func(data lessSwap, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data.Less(j, j-1); j-- {
+ data.Swap(j, j-1)
+ }
+ }
+}
+
+// siftDown_func implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDown_func(data lessSwap, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data.Less(first+child, first+child+1) {
+ child++
+ }
+ if !data.Less(first+root, first+child) {
+ return
+ }
+ data.Swap(first+root, first+child)
+ root = child
+ }
+}
+
+func heapSort_func(data lessSwap, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown_func(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data.Swap(first, first+i)
+ siftDown_func(data, lo, i, first)
+ }
+}
+
+// Quicksort, loosely following Bentley and McIlroy,
+// "Engineering a Sort Function" SP&E November 1993.
+
+// medianOfThree_func moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree_func(data lessSwap, m1, m0, m2 int) {
+ // sort 3 elements
+ if data.Less(m1, m0) {
+ data.Swap(m1, m0)
+ }
+ // data[m0] <= data[m1]
+ if data.Less(m2, m1) {
+ data.Swap(m2, m1)
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data.Less(m1, m0) {
+ data.Swap(m1, m0)
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
+
+func swapRange_func(data lessSwap, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data.Swap(a+i, b+i)
+ }
+}
+
+func doPivot_func(data lessSwap, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's "Ninther" median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree_func(data, lo, lo+s, lo+2*s)
+ medianOfThree_func(data, m, m-s, m+s)
+ medianOfThree_func(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree_func(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data.Less(a, pivot); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && !data.Less(pivot, b); b++ { // data[b] <= pivot
+ }
+ for ; b < c && data.Less(pivot, c-1); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data.Swap(b, c-1)
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if !data.Less(pivot, hi-1) { // data[hi-1] = pivot
+ data.Swap(c, hi-1)
+ c++
+ dups++
+ }
+ if !data.Less(b-1, pivot) { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if !data.Less(m, pivot) { // data[m] = pivot
+ data.Swap(m, b-1)
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && !data.Less(b-1, pivot); b-- { // data[b] == pivot
+ }
+ for ; a < b && data.Less(a, pivot); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data.Swap(a, b-1)
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data.Swap(pivot, b-1)
+ return b - 1, c
+}
+
+func quickSort_func(data lessSwap, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort_func(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot_func(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort_func(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort_func(data, mhi, b)
+ } else {
+ quickSort_func(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort_func(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data.Less(i, i-6) {
+ data.Swap(i, i-6)
+ }
+ }
+ insertionSort_func(data, a, b)
+ }
+}
+
+func stable_func(data lessSwap, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSort_func(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSort_func(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMerge_func(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMerge_func(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMerge_func merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMerge_func(data lessSwap, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data.Less(h, a) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data.Swap(k, k+1)
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !data.Less(m, h) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data.Swap(k, k-1)
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !data.Less(p-c, c) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotate_func(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMerge_func(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMerge_func(data, mid, end, b)
+ }
+}
+
+// rotate_func rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotate_func(data lessSwap, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRange_func(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRange_func(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRange_func(data, m-i, m, i)
+}
diff --git a/src/sort/zsortinterface.go b/src/sort/zsortinterface.go
new file mode 100644
index 0000000000..e0d7093678
--- /dev/null
+++ b/src/sort/zsortinterface.go
@@ -0,0 +1,342 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sort
+
+// insertionSort sorts data[a:b] using insertion sort.
+func insertionSort(data Interface, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data.Less(j, j-1); j-- {
+ data.Swap(j, j-1)
+ }
+ }
+}
+
+// siftDown implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data Interface, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data.Less(first+child, first+child+1) {
+ child++
+ }
+ if !data.Less(first+root, first+child) {
+ return
+ }
+ data.Swap(first+root, first+child)
+ root = child
+ }
+}
+
+func heapSort(data Interface, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data.Swap(first, first+i)
+ siftDown(data, lo, i, first)
+ }
+}
+
+// Quicksort, loosely following Bentley and McIlroy,
+// "Engineering a Sort Function" SP&E November 1993.
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data Interface, m1, m0, m2 int) {
+ // sort 3 elements
+ if data.Less(m1, m0) {
+ data.Swap(m1, m0)
+ }
+ // data[m0] <= data[m1]
+ if data.Less(m2, m1) {
+ data.Swap(m2, m1)
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data.Less(m1, m0) {
+ data.Swap(m1, m0)
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
+
+func swapRange(data Interface, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data.Swap(a+i, b+i)
+ }
+}
+
+func doPivot(data Interface, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's "Ninther" median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree(data, lo, lo+s, lo+2*s)
+ medianOfThree(data, m, m-s, m+s)
+ medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data.Less(a, pivot); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && !data.Less(pivot, b); b++ { // data[b] <= pivot
+ }
+ for ; b < c && data.Less(pivot, c-1); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data.Swap(b, c-1)
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if !data.Less(pivot, hi-1) { // data[hi-1] = pivot
+ data.Swap(c, hi-1)
+ c++
+ dups++
+ }
+ if !data.Less(b-1, pivot) { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if !data.Less(m, pivot) { // data[m] = pivot
+ data.Swap(m, b-1)
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && !data.Less(b-1, pivot); b-- { // data[b] == pivot
+ }
+ for ; a < b && data.Less(a, pivot); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data.Swap(a, b-1)
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data.Swap(pivot, b-1)
+ return b - 1, c
+}
+
+func quickSort(data Interface, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort(data, mhi, b)
+ } else {
+ quickSort(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data.Less(i, i-6) {
+ data.Swap(i, i-6)
+ }
+ }
+ insertionSort(data, a, b)
+ }
+}
+
+func stable(data Interface, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSort(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSort(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMerge(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMerge(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMerge merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMerge(data Interface, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data.Less(h, a) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data.Swap(k, k+1)
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !data.Less(m, h) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data.Swap(k, k-1)
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !data.Less(p-c, c) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotate(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMerge(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMerge(data, mid, end, b)
+ }
+}
+
+// rotate rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotate(data Interface, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRange(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRange(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRange(data, m-i, m, i)
+}
diff --git a/src/strings/builder.go b/src/strings/builder.go
index 547e52e84d..ba4df618bf 100644
--- a/src/strings/builder.go
+++ b/src/strings/builder.go
@@ -17,10 +17,9 @@ type Builder struct {
buf []byte
}
-// noescape hides a pointer from escape analysis. noescape is
-// the identity function but escape analysis doesn't think the
-// output depends on the input. noescape is inlined and currently
-// compiles down to zero instructions.
+// noescape hides a pointer from escape analysis. It is the identity function
+// but escape analysis doesn't think the output depends on the input.
+// noescape is inlined and currently compiles down to zero instructions.
// USE CAREFULLY!
// This was copied from the runtime; see issues 23382 and 7921.
//go:nosplit
diff --git a/src/strings/strings.go b/src/strings/strings.go
index c5a29e95f6..5793d9e26f 100644
--- a/src/strings/strings.go
+++ b/src/strings/strings.go
@@ -270,6 +270,8 @@ func genSplit(s, sep string, sepSave, n int) []string {
//
// Edge cases for s and sep (for example, empty strings) are handled
// as described in the documentation for Split.
+//
+// To split around the first instance of a separator, see Cut.
func SplitN(s, sep string, n int) []string { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into substrings after each instance of sep and
@@ -296,6 +298,8 @@ func SplitAfterN(s, sep string, n int) []string {
// and sep are empty, Split returns an empty slice.
//
// It is equivalent to SplitN with a count of -1.
+//
+// To split around the first instance of a separator, see Cut.
func Split(s, sep string) []string { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all substrings after each instance of sep and
diff --git a/src/syscall/asm_linux_ppc64x.s b/src/syscall/asm_linux_ppc64x.s
index 044a479c00..1f5cb37ffe 100644
--- a/src/syscall/asm_linux_ppc64x.s
+++ b/src/syscall/asm_linux_ppc64x.s
@@ -30,7 +30,7 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
RET
ok:
MOVD R3, r1+32(FP) // r1
- MOVD R4, r2+40(FP) // r2
+ MOVD R0, r2+40(FP) // r2
MOVD R0, err+48(FP) // errno
BL runtime·exitsyscall(SB)
RET
@@ -54,7 +54,7 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-80
RET
ok6:
MOVD R3, r1+56(FP) // r1
- MOVD R4, r2+64(FP) // r2
+ MOVD R0, r2+64(FP) // r2
MOVD R0, err+72(FP) // errno
BL runtime·exitsyscall(SB)
RET
@@ -76,7 +76,7 @@ TEXT ·RawSyscall(SB),NOSPLIT,$0-56
RET
ok1:
MOVD R3, r1+32(FP) // r1
- MOVD R4, r2+40(FP) // r2
+ MOVD R0, r2+40(FP) // r2
MOVD R0, err+48(FP) // errno
RET
@@ -97,7 +97,7 @@ TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
RET
ok2:
MOVD R3, r1+56(FP) // r1
- MOVD R4, r2+64(FP) // r2
+ MOVD R0, r2+64(FP) // r2
MOVD R0, err+72(FP) // errno
RET
@@ -131,5 +131,5 @@ TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
- MOVD R4, r2+40(FP)
+ MOVD R0, r2+40(FP)
RET
diff --git a/src/syscall/syscall_linux.go b/src/syscall/syscall_linux.go
index abcf1d5dfe..e3891b0855 100644
--- a/src/syscall/syscall_linux.go
+++ b/src/syscall/syscall_linux.go
@@ -958,62 +958,11 @@ func Getpgrp() (pid int) {
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tv *Timeval) (err error)
-// allThreadsCaller holds the input and output state for performing a
-// allThreadsSyscall that needs to synchronize all OS thread state. Linux
-// generally does not always support this natively, so we have to
-// manipulate the runtime to fix things up.
-type allThreadsCaller struct {
- // arguments
- trap, a1, a2, a3, a4, a5, a6 uintptr
-
- // return values (only set by 0th invocation)
- r1, r2 uintptr
-
- // err is the error code
- err Errno
-}
-
-// doSyscall is a callback for executing a syscall on the current m
-// (OS thread).
-//go:nosplit
-//go:norace
-func (pc *allThreadsCaller) doSyscall(initial bool) bool {
- r1, r2, err := RawSyscall(pc.trap, pc.a1, pc.a2, pc.a3)
- if initial {
- pc.r1 = r1
- pc.r2 = r2
- pc.err = err
- } else if pc.r1 != r1 || (archHonorsR2 && pc.r2 != r2) || pc.err != err {
- print("trap:", pc.trap, ", a123=[", pc.a1, ",", pc.a2, ",", pc.a3, "]\n")
- print("results: got {r1=", r1, ",r2=", r2, ",err=", err, "}, want {r1=", pc.r1, ",r2=", pc.r2, ",r3=", pc.err, "}\n")
- panic("AllThreadsSyscall results differ between threads; runtime corrupted")
- }
- return err == 0
-}
-
-// doSyscall6 is a callback for executing a syscall6 on the current m
-// (OS thread).
-//go:nosplit
-//go:norace
-func (pc *allThreadsCaller) doSyscall6(initial bool) bool {
- r1, r2, err := RawSyscall6(pc.trap, pc.a1, pc.a2, pc.a3, pc.a4, pc.a5, pc.a6)
- if initial {
- pc.r1 = r1
- pc.r2 = r2
- pc.err = err
- } else if pc.r1 != r1 || (archHonorsR2 && pc.r2 != r2) || pc.err != err {
- print("trap:", pc.trap, ", a123456=[", pc.a1, ",", pc.a2, ",", pc.a3, ",", pc.a4, ",", pc.a5, ",", pc.a6, "]\n")
- print("results: got {r1=", r1, ",r2=", r2, ",err=", err, "}, want {r1=", pc.r1, ",r2=", pc.r2, ",r3=", pc.err, "}\n")
- panic("AllThreadsSyscall6 results differ between threads; runtime corrupted")
- }
- return err == 0
-}
-
-// Provided by runtime.syscall_runtime_doAllThreadsSyscall which
-// serializes the world and invokes the fn on each OS thread (what the
-// runtime refers to as m's). Once this function returns, all threads
-// are in sync.
-func runtime_doAllThreadsSyscall(fn func(bool) bool)
+// Provided by runtime.syscall_runtime_doAllThreadsSyscall which stops the
+// world and invokes the syscall on each OS thread. Once this function returns,
+// all threads are in sync.
+//go:uintptrescapes
+func runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
// AllThreadsSyscall performs a syscall on each OS thread of the Go
// runtime. It first invokes the syscall on one thread. Should that
@@ -1035,17 +984,8 @@ func AllThreadsSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
if cgo_libc_setegid != nil {
return minus1, minus1, ENOTSUP
}
- pc := &allThreadsCaller{
- trap: trap,
- a1: a1,
- a2: a2,
- a3: a3,
- }
- runtime_doAllThreadsSyscall(pc.doSyscall)
- r1 = pc.r1
- r2 = pc.r2
- err = pc.err
- return
+ r1, r2, errno := runtime_doAllThreadsSyscall(trap, a1, a2, a3, 0, 0, 0)
+ return r1, r2, Errno(errno)
}
// AllThreadsSyscall6 is like AllThreadsSyscall, but extended to six
@@ -1055,20 +995,8 @@ func AllThreadsSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, e
if cgo_libc_setegid != nil {
return minus1, minus1, ENOTSUP
}
- pc := &allThreadsCaller{
- trap: trap,
- a1: a1,
- a2: a2,
- a3: a3,
- a4: a4,
- a5: a5,
- a6: a6,
- }
- runtime_doAllThreadsSyscall(pc.doSyscall6)
- r1 = pc.r1
- r2 = pc.r2
- err = pc.err
- return
+ r1, r2, errno := runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6)
+ return r1, r2, Errno(errno)
}
// linked by runtime.cgocall.go
diff --git a/src/syscall/syscall_linux_386.go b/src/syscall/syscall_linux_386.go
index 98442055d8..ef0f53468a 100644
--- a/src/syscall/syscall_linux_386.go
+++ b/src/syscall/syscall_linux_386.go
@@ -6,12 +6,6 @@ package syscall
import "unsafe"
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS32
func setTimespec(sec, nsec int64) Timespec {
@@ -189,14 +183,6 @@ const (
func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
- if e != 0 {
- err = e
- }
- return
-}
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
if e != 0 {
diff --git a/src/syscall/syscall_linux_amd64.go b/src/syscall/syscall_linux_amd64.go
index 04acd063fa..ea5229e8a0 100644
--- a/src/syscall/syscall_linux_amd64.go
+++ b/src/syscall/syscall_linux_amd64.go
@@ -4,12 +4,6 @@
package syscall
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS
//sys Dup2(oldfd int, newfd int) (err error)
@@ -43,7 +37,6 @@ const _SYS_setgroups = SYS_SETGROUPS
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_arm.go b/src/syscall/syscall_linux_arm.go
index f2f342e7ed..f00149a1d4 100644
--- a/src/syscall/syscall_linux_arm.go
+++ b/src/syscall/syscall_linux_arm.go
@@ -6,12 +6,6 @@ package syscall
import "unsafe"
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall". [EABI assumed.]
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS32
func setTimespec(sec, nsec int64) Timespec {
@@ -34,7 +28,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
return newoffset, nil
}
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_arm64.go b/src/syscall/syscall_linux_arm64.go
index 990e732f35..9ed20f43ed 100644
--- a/src/syscall/syscall_linux_arm64.go
+++ b/src/syscall/syscall_linux_arm64.go
@@ -6,12 +6,6 @@ package syscall
import "unsafe"
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS
func EpollCreate(size int) (fd int, err error) {
@@ -60,7 +54,6 @@ func Lstat(path string, stat *Stat_t) (err error) {
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2
//sys Truncate(path string, length int64) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_mips64x.go b/src/syscall/syscall_linux_mips64x.go
index 7c9dd80614..b56b8f06b6 100644
--- a/src/syscall/syscall_linux_mips64x.go
+++ b/src/syscall/syscall_linux_mips64x.go
@@ -6,12 +6,6 @@
package syscall
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS
//sys Dup2(oldfd int, newfd int) (err error)
@@ -43,7 +37,6 @@ const _SYS_setgroups = SYS_SETGROUPS
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_mipsx.go b/src/syscall/syscall_linux_mipsx.go
index 741eeb14bb..c9c9f94e42 100644
--- a/src/syscall/syscall_linux_mipsx.go
+++ b/src/syscall/syscall_linux_mipsx.go
@@ -8,12 +8,6 @@ package syscall
import "unsafe"
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
@@ -43,7 +37,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_ppc64x.go b/src/syscall/syscall_linux_ppc64x.go
index cc1b72e0e7..4180a17f3b 100644
--- a/src/syscall/syscall_linux_ppc64x.go
+++ b/src/syscall/syscall_linux_ppc64x.go
@@ -6,12 +6,6 @@
package syscall
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = false
-
const _SYS_setgroups = SYS_SETGROUPS
//sys Dup2(oldfd int, newfd int) (err error)
@@ -49,7 +43,6 @@ const _SYS_setgroups = SYS_SETGROUPS
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys Truncate(path string, length int64) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_riscv64.go b/src/syscall/syscall_linux_riscv64.go
index bcb89c6e9a..a5fb18aa85 100644
--- a/src/syscall/syscall_linux_riscv64.go
+++ b/src/syscall/syscall_linux_riscv64.go
@@ -6,12 +6,6 @@ package syscall
import "unsafe"
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS
func EpollCreate(size int) (fd int, err error) {
@@ -64,7 +58,6 @@ func Lstat(path string, stat *Stat_t) (err error) {
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
diff --git a/src/syscall/syscall_linux_s390x.go b/src/syscall/syscall_linux_s390x.go
index 123664f5b2..5d6f4d2526 100644
--- a/src/syscall/syscall_linux_s390x.go
+++ b/src/syscall/syscall_linux_s390x.go
@@ -6,12 +6,6 @@ package syscall
import "unsafe"
-// archHonorsR2 captures the fact that r2 is honored by the
-// runtime.GOARCH. Syscall conventions are generally r1, r2, err :=
-// syscall(trap, ...). Not all architectures define r2 in their
-// ABI. See "man syscall".
-const archHonorsR2 = true
-
const _SYS_setgroups = SYS_SETGROUPS
//sys Dup2(oldfd int, newfd int) (err error)
@@ -118,14 +112,6 @@ const (
func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
- if e != 0 {
- err = e
- }
- return
-}
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
if e != 0 {
diff --git a/src/syscall/syscall_linux_test.go b/src/syscall/syscall_linux_test.go
index 8d828be015..0444b64266 100644
--- a/src/syscall/syscall_linux_test.go
+++ b/src/syscall/syscall_linux_test.go
@@ -15,6 +15,7 @@ import (
"sort"
"strconv"
"strings"
+ "sync"
"syscall"
"testing"
"unsafe"
@@ -565,3 +566,73 @@ func TestSetuidEtc(t *testing.T) {
}
}
}
+
+// TestAllThreadsSyscallError verifies that errors are properly returned when
+// the syscall fails on the original thread.
+func TestAllThreadsSyscallError(t *testing.T) {
+ // SYS_CAPGET takes pointers as the first two arguments. Since we pass
+ // 0, we expect to get EFAULT back.
+ r1, r2, err := syscall.AllThreadsSyscall(syscall.SYS_CAPGET, 0, 0, 0)
+ if err == syscall.ENOTSUP {
+ t.Skip("AllThreadsSyscall disabled with cgo")
+ }
+ if err != syscall.EFAULT {
+ t.Errorf("AllThreadSyscall(SYS_CAPGET) got %d, %d, %v, want err %v", r1, r2, err, syscall.EFAULT)
+ }
+}
+
+// TestAllThreadsSyscallBlockedSyscall confirms that AllThreadsSyscall
+// can interrupt threads in long-running system calls. This test will
+// deadlock if this doesn't work correctly.
+func TestAllThreadsSyscallBlockedSyscall(t *testing.T) {
+ if _, _, err := syscall.AllThreadsSyscall(syscall.SYS_PRCTL, PR_SET_KEEPCAPS, 0, 0); err == syscall.ENOTSUP {
+ t.Skip("AllThreadsSyscall disabled with cgo")
+ }
+
+ rd, wr, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("unable to obtain a pipe: %v", err)
+ }
+
+ // Perform a blocking read on the pipe.
+ var wg sync.WaitGroup
+ ready := make(chan bool)
+ wg.Add(1)
+ go func() {
+ data := make([]byte, 1)
+
+ // To narrow the window we have to wait for this
+ // goroutine to block in read, synchronize just before
+ // calling read.
+ ready <- true
+
+ // We use syscall.Read directly to avoid the poller.
+ // This will return when the write side is closed.
+ n, err := syscall.Read(int(rd.Fd()), data)
+ if !(n == 0 && err == nil) {
+ t.Errorf("expected read to return 0, got %d, %s", n, err)
+ }
+
+ // Clean up rd and also ensure rd stays reachable so
+ // it doesn't get closed by GC.
+ rd.Close()
+ wg.Done()
+ }()
+ <-ready
+
+ // Loop here to give the goroutine more time to block in read.
+ // Generally this will trigger on the first iteration anyway.
+ pid := syscall.Getpid()
+ for i := 0; i < 100; i++ {
+ if id, _, e := syscall.AllThreadsSyscall(syscall.SYS_GETPID, 0, 0, 0); e != 0 {
+ t.Errorf("[%d] getpid failed: %v", i, e)
+ } else if int(id) != pid {
+ t.Errorf("[%d] getpid got=%d, want=%d", i, id, pid)
+ }
+ // Provide an explicit opportunity for this goroutine
+ // to change Ms.
+ runtime.Gosched()
+ }
+ wr.Close()
+ wg.Wait()
+}
diff --git a/src/syscall/syscall_unix_test.go b/src/syscall/syscall_unix_test.go
index e4af0ba4a5..1ef2634fa1 100644
--- a/src/syscall/syscall_unix_test.go
+++ b/src/syscall/syscall_unix_test.go
@@ -328,8 +328,7 @@ func TestUnixRightsRoundtrip(t *testing.T) {
func TestRlimit(t *testing.T) {
var rlimit, zero syscall.Rlimit
- err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
- if err != nil {
+ if err := syscall.Getrlimit(syscall.RLIMIT_CPU, &rlimit); err != nil {
t.Fatalf("Getrlimit: save failed: %v", err)
}
if zero == rlimit {
@@ -337,31 +336,19 @@ func TestRlimit(t *testing.T) {
}
set := rlimit
set.Cur = set.Max - 1
- if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && set.Cur > 4096 {
- // rlim_min for RLIMIT_NOFILE should be equal to
- // or lower than kern.maxfilesperproc, which on
- // some machines are 4096. See #40564.
- set.Cur = 4096
- }
- err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &set)
- if err != nil {
+ if err := syscall.Setrlimit(syscall.RLIMIT_CPU, &set); err != nil {
t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
}
var get syscall.Rlimit
- err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &get)
- if err != nil {
+ if err := syscall.Getrlimit(syscall.RLIMIT_CPU, &get); err != nil {
t.Fatalf("Getrlimit: get failed: %v", err)
}
set = rlimit
set.Cur = set.Max - 1
- if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && set.Cur > 4096 {
- set.Cur = 4096
- }
if set != get {
t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get)
}
- err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit)
- if err != nil {
+ if err := syscall.Setrlimit(syscall.RLIMIT_CPU, &rlimit); err != nil {
t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err)
}
}
diff --git a/src/syscall/zsyscall_linux_amd64.go b/src/syscall/zsyscall_linux_amd64.go
index 2059271324..3e22e20907 100644
--- a/src/syscall/zsyscall_linux_amd64.go
+++ b/src/syscall/zsyscall_linux_amd64.go
@@ -1399,17 +1399,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_arm.go b/src/syscall/zsyscall_linux_arm.go
index 50498c6eb6..38907a0b35 100644
--- a/src/syscall/zsyscall_linux_arm.go
+++ b/src/syscall/zsyscall_linux_arm.go
@@ -1057,17 +1057,6 @@ func Munlockall() (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_arm64.go b/src/syscall/zsyscall_linux_arm64.go
index 6714123f9c..f335c062d5 100644
--- a/src/syscall/zsyscall_linux_arm64.go
+++ b/src/syscall/zsyscall_linux_arm64.go
@@ -1363,17 +1363,6 @@ func Truncate(path string, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_mips.go b/src/syscall/zsyscall_linux_mips.go
index c6f4878ff7..f5f73895cc 100644
--- a/src/syscall/zsyscall_linux_mips.go
+++ b/src/syscall/zsyscall_linux_mips.go
@@ -1332,17 +1332,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_mips64.go b/src/syscall/zsyscall_linux_mips64.go
index 5187c28ebb..32f3c32b9b 100644
--- a/src/syscall/zsyscall_linux_mips64.go
+++ b/src/syscall/zsyscall_linux_mips64.go
@@ -1388,17 +1388,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_mips64le.go b/src/syscall/zsyscall_linux_mips64le.go
index f3dacfeeda..62dcff45a1 100644
--- a/src/syscall/zsyscall_linux_mips64le.go
+++ b/src/syscall/zsyscall_linux_mips64le.go
@@ -1388,17 +1388,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_mipsle.go b/src/syscall/zsyscall_linux_mipsle.go
index fbc543709f..4761246536 100644
--- a/src/syscall/zsyscall_linux_mipsle.go
+++ b/src/syscall/zsyscall_linux_mipsle.go
@@ -1332,17 +1332,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_ppc64.go b/src/syscall/zsyscall_linux_ppc64.go
index b71dca2b37..c9b1365e74 100644
--- a/src/syscall/zsyscall_linux_ppc64.go
+++ b/src/syscall/zsyscall_linux_ppc64.go
@@ -1466,17 +1466,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_ppc64le.go b/src/syscall/zsyscall_linux_ppc64le.go
index 193fbbc541..0807390894 100644
--- a/src/syscall/zsyscall_linux_ppc64le.go
+++ b/src/syscall/zsyscall_linux_ppc64le.go
@@ -1466,17 +1466,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/syscall/zsyscall_linux_riscv64.go b/src/syscall/zsyscall_linux_riscv64.go
index 33b1e9b431..1661d04221 100644
--- a/src/syscall/zsyscall_linux_riscv64.go
+++ b/src/syscall/zsyscall_linux_riscv64.go
@@ -1363,17 +1363,6 @@ func Truncate(path string, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
diff --git a/src/testing/fuzz.go b/src/testing/fuzz.go
index e1d7544f7a..b5e1339deb 100644
--- a/src/testing/fuzz.go
+++ b/src/testing/fuzz.go
@@ -227,6 +227,9 @@ func (f *F) Fuzz(ff any) {
if fnType.NumIn() < 2 || fnType.In(0) != reflect.TypeOf((*T)(nil)) {
panic("testing: fuzz target must receive at least two arguments, where the first argument is a *T")
}
+ if fnType.NumOut() != 0 {
+ panic("testing: fuzz target must not return a value")
+ }
// Save the types of the function to compare against the corpus.
var types []reflect.Type
diff --git a/src/testing/testing.go b/src/testing/testing.go
index df4dfe4490..05d8f22aff 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -1122,7 +1122,7 @@ func removeAll(path string) error {
)
for {
err := os.RemoveAll(path)
- if !isWindowsAccessDenied(err) {
+ if !isWindowsRetryable(err) {
return err
}
if start.IsZero() {
diff --git a/src/testing/testing_other.go b/src/testing/testing_other.go
index 29496d81bc..99a6276a4a 100644
--- a/src/testing/testing_other.go
+++ b/src/testing/testing_other.go
@@ -6,8 +6,8 @@
package testing
-// isWindowsAccessDenied reports whether err is ERROR_ACCESS_DENIED,
-// which is defined only on Windows.
-func isWindowsAccessDenied(err error) bool {
+// isWindowsRetryable reports whether err is a Windows error code
+// that may be fixed by retrying a failed filesystem operation.
+func isWindowsRetryable(err error) bool {
return false
}
diff --git a/src/testing/testing_windows.go b/src/testing/testing_windows.go
index bc76cb80cc..fd48ae9579 100644
--- a/src/testing/testing_windows.go
+++ b/src/testing/testing_windows.go
@@ -8,11 +8,25 @@ package testing
import (
"errors"
+ "internal/syscall/windows"
"syscall"
)
-// isWindowsAccessDenied reports whether err is ERROR_ACCESS_DENIED,
-// which is defined only on Windows.
-func isWindowsAccessDenied(err error) bool {
- return errors.Is(err, syscall.ERROR_ACCESS_DENIED)
+// isWindowsRetryable reports whether err is a Windows error code
+// that may be fixed by retrying a failed filesystem operation.
+func isWindowsRetryable(err error) bool {
+ for {
+ unwrapped := errors.Unwrap(err)
+ if unwrapped == nil {
+ break
+ }
+ err = unwrapped
+ }
+ if err == syscall.ERROR_ACCESS_DENIED {
+ return true // Observed in https://go.dev/issue/50051.
+ }
+ if err == windows.ERROR_SHARING_VIOLATION {
+ return true // Observed in https://go.dev/issue/51442.
+ }
+ return false
}
diff --git a/src/time/format.go b/src/time/format.go
index 5fb9cdc969..33e6543289 100644
--- a/src/time/format.go
+++ b/src/time/format.go
@@ -914,6 +914,7 @@ func skip(value, prefix string) (string, error) {
// field immediately after the seconds field, even if the layout does not
// signify its presence. In that case either a comma or a decimal point
// followed by a maximal series of digits is parsed as a fractional second.
+// Fractional seconds are truncated to nanosecond precision.
//
// Elements omitted from the layout are assumed to be zero or, when
// zero is impossible, one, so parsing "3:04pm" returns the time
diff --git a/src/unicode/utf8/utf8.go b/src/unicode/utf8/utf8.go
index 6938c7e6a7..1e9f666e23 100644
--- a/src/unicode/utf8/utf8.go
+++ b/src/unicode/utf8/utf8.go
@@ -475,6 +475,11 @@ func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
func Valid(p []byte) bool {
+ // This optimization avoids the need to recompute the capacity
+ // when generating code for p[8:], bringing it to parity with
+ // ValidString, which was 20% faster on long ASCII strings.
+ p = p[:len(p):len(p)]
+
// Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
for len(p) >= 8 {
// Combining two 32 bit loads allows the same code to be used
diff --git a/src/unicode/utf8/utf8_test.go b/src/unicode/utf8/utf8_test.go
index e9be4d2d63..e7c31222cc 100644
--- a/src/unicode/utf8/utf8_test.go
+++ b/src/unicode/utf8/utf8_test.go
@@ -6,6 +6,7 @@ package utf8_test
import (
"bytes"
+ "strings"
"testing"
"unicode"
. "unicode/utf8"
@@ -554,6 +555,8 @@ func BenchmarkRuneCountInStringTenJapaneseChars(b *testing.B) {
}
}
+var ascii100000 = strings.Repeat("0123456789", 10000)
+
func BenchmarkValidTenASCIIChars(b *testing.B) {
s := []byte("0123456789")
for i := 0; i < b.N; i++ {
@@ -561,12 +564,32 @@ func BenchmarkValidTenASCIIChars(b *testing.B) {
}
}
+func BenchmarkValid100KASCIIChars(b *testing.B) {
+ s := []byte(ascii100000)
+ for i := 0; i < b.N; i++ {
+ Valid(s)
+ }
+}
+
func BenchmarkValidTenJapaneseChars(b *testing.B) {
s := []byte("日本語日本語日本語日")
for i := 0; i < b.N; i++ {
Valid(s)
}
}
+func BenchmarkValidLongMostlyASCII(b *testing.B) {
+ longMostlyASCII := []byte(longStringMostlyASCII)
+ for i := 0; i < b.N; i++ {
+ Valid(longMostlyASCII)
+ }
+}
+
+func BenchmarkValidLongJapanese(b *testing.B) {
+ longJapanese := []byte(longStringJapanese)
+ for i := 0; i < b.N; i++ {
+ Valid(longJapanese)
+ }
+}
func BenchmarkValidStringTenASCIIChars(b *testing.B) {
for i := 0; i < b.N; i++ {
@@ -574,12 +597,47 @@ func BenchmarkValidStringTenASCIIChars(b *testing.B) {
}
}
+func BenchmarkValidString100KASCIIChars(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ValidString(ascii100000)
+ }
+}
+
func BenchmarkValidStringTenJapaneseChars(b *testing.B) {
for i := 0; i < b.N; i++ {
ValidString("日本語日本語日本語日")
}
}
+func BenchmarkValidStringLongMostlyASCII(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ValidString(longStringMostlyASCII)
+ }
+}
+
+func BenchmarkValidStringLongJapanese(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ValidString(longStringJapanese)
+ }
+}
+
+var longStringMostlyASCII string // ~100KB, ~97% ASCII
+var longStringJapanese string // ~100KB, non-ASCII
+
+func init() {
+ const japanese = "日本語日本語日本語日"
+ var b bytes.Buffer
+ for i := 0; b.Len() < 100_000; i++ {
+ if i%100 == 0 {
+ b.WriteString(japanese)
+ } else {
+ b.WriteString("0123456789")
+ }
+ }
+ longStringMostlyASCII = b.String()
+ longStringJapanese = strings.Repeat(japanese, 100_000/len(japanese))
+}
+
func BenchmarkEncodeASCIIRune(b *testing.B) {
buf := make([]byte, UTFMax)
for i := 0; i < b.N; i++ {