From 22ac328856ae4c0dcd3d770f50aac5a2df498989 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 15 Sep 2025 22:50:51 -0400 Subject: cmd/link: make -w behavior consistent on Windows On UNIX-like platforms, the -w flag disables DWARF, and the -s flag implies -w (so it disables both the symbol table and DWARF). The implied -w can be negated with -w=0, i.e. -s -w=0 disables the symbol table but keeps the DWARF. Currently, this negation doesn't work on Windows. This CL makes it so, so it is consistent on all platforms (that support DWARF). Change-Id: I19764a15768433afe333b37061cea16f06cb901b Reviewed-on: https://go-review.googlesource.com/c/go/+/703998 Reviewed-by: Than McIntosh LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/link/dwarf_test.go | 50 ++++++++++++++++++++++++++++++++++++++++++ src/cmd/link/internal/ld/pe.go | 3 --- 2 files changed, 50 insertions(+), 3 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index d269aa70c6..5a464fccf3 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -358,3 +358,53 @@ func TestDWARFLocationList(t *testing.T) { } } } + +func TestFlagW(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + tmpdir := t.TempDir() + src := filepath.Join(tmpdir, "a.go") + err := os.WriteFile(src, []byte(helloSrc), 0666) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + flag string + wantDWARF bool + }{ + {"-w", false}, // -w flag disables DWARF + {"-s", false}, // -s implies -w + {"-s -w=0", true}, // -w=0 negates the implied -w + } + for _, test := range tests { + name := strings.ReplaceAll(test.flag, " ", "_") + t.Run(name, func(t *testing.T) { + ldflags := "-ldflags=" + test.flag + exe := filepath.Join(t.TempDir(), "a.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", ldflags, "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + + f, err := objfile.Open(exe) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + d, err := f.DWARF() + if test.wantDWARF { + if err != nil { + t.Errorf("want binary with DWARF, got error %v", err) + } + } else { + if d != nil { + t.Errorf("want binary with no DWARF, got DWARF") + } + } + }) + } +} diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index c290410b0e..5219a98dd4 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -487,9 +487,6 @@ func (f *peFile) addDWARFSection(name string, size int) *peSection { // addDWARF adds DWARF information to the COFF file f. func (f *peFile) addDWARF() { - if *FlagS { // disable symbol table - return - } if *FlagW { // disable dwarf return } -- cgit v1.3-5-g9baa From a27261c42fcebf601587725714b9ef53c47b06b3 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 17 Sep 2025 22:17:41 -0400 Subject: go/types,types2: allow new(expr) For #45624 Change-Id: I6d77a2a1d6095cac0edc36060cbf98c72b749404 Reviewed-on: https://go-review.googlesource.com/c/go/+/704935 Auto-Submit: Alan Donovan Reviewed-by: Robert Findley LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/types2/builtins.go | 27 +++++++++++-- src/cmd/compile/internal/types2/version.go | 1 + src/go/types/builtins.go | 27 +++++++++++-- src/go/types/version.go | 1 + src/internal/types/testdata/check/builtins0.go | 46 ++++++++++++++++------ src/internal/types/testdata/check/go1_25.go | 13 ++++++ .../types/testdata/fixedbugs/issue43125.go | 8 ---- test/used.go | 2 +- 8 files changed, 97 insertions(+), 28 deletions(-) create mode 100644 src/internal/types/testdata/check/go1_25.go delete mode 100644 src/internal/types/testdata/fixedbugs/issue43125.go (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index 4bb2135755..3de2857ed4 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -636,11 +636,30 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } case _New: - // new(T) + // new(T) or new(expr) // (no argument evaluated yet) - T := check.varType(argList[0]) - if !isValid(T) { - return + arg := argList[0] + check.exprOrType(x, arg, true) + var T Type + switch x.mode { + case builtin: + check.errorf(x, UncalledBuiltin, "%s must be called", x) + x.mode = invalid + case typexpr: + // new(T) + T = x.typ + if !isValid(T) { + return + } + default: + // new(expr) + check.verifyVersionf(call.Fun, go1_26, "new(expr)") + T = Default(x.typ) + if T != x.typ { + // untyped constant: check for overflow. + check.assignment(x, T, "argument to new") + } + check.validVarType(arg, T) } x.mode = value diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go index b555f398da..765b0f7e9a 100644 --- a/src/cmd/compile/internal/types2/version.go +++ b/src/cmd/compile/internal/types2/version.go @@ -43,6 +43,7 @@ var ( go1_21 = asGoVersion("go1.21") go1_22 = asGoVersion("go1.22") go1_23 = asGoVersion("go1.23") + go1_26 = asGoVersion("go1.26") // current (deployed) Go version go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version)) diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index e9f2b3e21d..1163321ecd 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -639,11 +639,30 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b } case _New: - // new(T) + // new(T) or new(expr) // (no argument evaluated yet) - T := check.varType(argList[0]) - if !isValid(T) { - return + arg := argList[0] + check.exprOrType(x, arg, true) + var T Type + switch x.mode { + case builtin: + check.errorf(x, UncalledBuiltin, "%s must be called", x) + x.mode = invalid + case typexpr: + // new(T) + T = x.typ + if !isValid(T) { + return + } + default: + // new(expr) + check.verifyVersionf(call.Fun, go1_26, "new(expr)") + T = Default(x.typ) + if T != x.typ { + // untyped constant: check for overflow. + check.assignment(x, T, "argument to new") + } + check.validVarType(arg, T) } x.mode = value diff --git a/src/go/types/version.go b/src/go/types/version.go index 2a2d341205..8133110398 100644 --- a/src/go/types/version.go +++ b/src/go/types/version.go @@ -43,6 +43,7 @@ var ( go1_21 = asGoVersion("go1.21") go1_22 = asGoVersion("go1.22") go1_23 = asGoVersion("go1.23") + go1_26 = asGoVersion("go1.26") // current (deployed) Go version go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version)) diff --git a/src/internal/types/testdata/check/builtins0.go b/src/internal/types/testdata/check/builtins0.go index ea30fbcbe7..e326b92ac7 100644 --- a/src/internal/types/testdata/check/builtins0.go +++ b/src/internal/types/testdata/check/builtins0.go @@ -609,24 +609,48 @@ func min2() { ) } -func new1() { - _ = new() // ERROR "not enough arguments" +func newInvalid() { + f2 := func() (x, y int) { return } + + _ = new() // ERROR "not enough arguments" _ = new(1, 2) // ERROR "too many arguments" - _ = new("foo" /* ERROR "not a type" */) - p := new(float64) + new /* ERROR "not used" */ (int) + _ = &new /* ERROR "cannot take address" */ (int) + _ = new(int... /* ERROR "invalid use of ..." */) + _ = new(f0 /* ERROR "f0() (no value) used as value or type" */ ()) + _ = new(len /* ERROR "len (built-in) must be called" */) + _ = new(1 /* ERROR "argument to new (overflows)" */ << 70) + _ = new(f2 /* ERRORx "multiple-value.*in single-value context" */ ()) +} + +// new(T) +func newType() { _ = new(struct{ x, y int }) + + p := new(float64) q := new(*float64) _ = *p == **q - new /* ERROR "not used" */ (int) - _ = &new /* ERROR "cannot take address" */ (int) - - _ = new(int... /* ERROR "invalid use of ..." */ ) } -func new2() { +// new(expr), added in go1.26 +func newExpr() { f1 := func() (x []int) { return } - _ = new(f0 /* ERROR "not a type" */ ()) - _ = new(f1 /* ERROR "not a type" */ ()) + var ( + _ *[]int = new(f1()) + _ *func() []int = new(f1) + _ *bool = new(false) + _ *int = new(123) + _ *float64 = new(1.0) + _ *uint = new(uint(3)) + _ *rune = new('a') + _ *string = new("A") + _ *struct{} = new(struct{}{}) + _ *any = new(any) + + // from issue 43125 + _ = new(-1) + _ = new(1 + 1) + ) } func panic1() { diff --git a/src/internal/types/testdata/check/go1_25.go b/src/internal/types/testdata/check/go1_25.go new file mode 100644 index 0000000000..b2ace83343 --- /dev/null +++ b/src/internal/types/testdata/check/go1_25.go @@ -0,0 +1,13 @@ +// -lang=go1.25 + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.25 + +package p + +var _ = new /* ERROR "new(expr) requires go1.26 or later" */ (123) diff --git a/src/internal/types/testdata/fixedbugs/issue43125.go b/src/internal/types/testdata/fixedbugs/issue43125.go deleted file mode 100644 index d0d6feb2a8..0000000000 --- a/src/internal/types/testdata/fixedbugs/issue43125.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package p - -var _ = new(- /* ERROR "not a type" */ 1) -var _ = new(1 /* ERROR "not a type" */ + 1) diff --git a/test/used.go b/test/used.go index 5bdc5a7318..516f5968a8 100644 --- a/test/used.go +++ b/test/used.go @@ -139,7 +139,7 @@ func _() { unsafe.Sizeof(t) // ERROR "unsafe.Sizeof\(t\) .* not used" _ = int // ERROR "type int is not an expression|not an expression" (x) // ERROR "x .* not used|not used" - _ = new(x2) // ERROR "x2 is not a type|not a type" + _ = new(len) // ERROR "len.*must be called" // Disabled due to issue #43125. // _ = new(1 + 1) // DISABLED "1 \+ 1 is not a type" } -- cgit v1.3-5-g9baa From a5866ebe40207c4c64f0522721825b10887356e0 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Mon, 15 Sep 2025 17:31:46 +0700 Subject: cmd/compile: prevent shapifying of pointer shape type CL 641955 changes the Unified IR reader to not doing shapify when reading reshaping expression, prevent losing of the original type. This is an oversight, as the main problem isn't about shaping during the reshaping process itself, but about the specific case of shaping a pointer shape type. This bug occurs when instantiating a generic function within another generic function with a pointer shape type as type parameter, which will convert `*[]go.shape.T` to `*go.shape.uint8`, resulting in the loss of the original expression's type. This commit changes Unified IR reader to avoid pointer shaping for `*[]go.shape.T`, ensures that the original type is preserved when processing reshaping expressions. Updates #71184 Updates #73947 Fixes #74260 Fixes #75461 Change-Id: Icede6b73247d0d367bb485619f2dafb60ad66806 Reviewed-on: https://go-review.googlesource.com/c/go/+/704095 Auto-Submit: Cuong Manh Le LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/noder/reader.go | 46 +++++---------- src/cmd/compile/testdata/script/issue75461.txt | 78 ++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 33 deletions(-) create mode 100644 src/cmd/compile/testdata/script/issue75461.txt (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 3cbc7989a7..45e2bfd727 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -49,9 +49,6 @@ type pkgReader struct { // but bitwise inverted so we can detect if we're missing the entry // or not. newindex []index - - // indicates whether the data is reading during reshaping. - reshaping bool } func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader { @@ -119,10 +116,6 @@ type reader struct { // find parameters/results. funarghack bool - // reshaping is used during reading exprReshape code, preventing - // the reader from shapifying the re-shaped type. - reshaping bool - // methodSym is the name of method's name, if reading a method. // It's nil if reading a normal function or closure body. methodSym *types.Sym @@ -937,8 +930,19 @@ func shapify(targ *types.Type, basic bool) *types.Type { // types, and discarding struct field names and tags. However, we'll // need to start tracking how type parameters are actually used to // implement some of these optimizations. + pointerShaping := basic && targ.IsPtr() && !targ.Elem().NotInHeap() + // The exception is when the type parameter is a pointer to a type + // which `Type.HasShape()` returns true, but `Type.IsShape()` returns + // false, like `*[]go.shape.T`. This is because the type parameter is + // used to instantiate a generic function inside another generic function. + // In this case, we want to keep the targ as-is, otherwise, we may lose the + // original type after `*[]go.shape.T` is shapified to `*go.shape.uint8`. + // See issue #54535, #71184. + if pointerShaping && !targ.Elem().IsShape() && targ.Elem().HasShape() { + return targ + } under := targ.Underlying() - if basic && targ.IsPtr() && !targ.Elem().NotInHeap() { + if pointerShaping { under = types.NewPtr(types.Types[types.TUINT8]) } @@ -1014,25 +1018,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx index, implicits, explicits // arguments. for i, targ := range dict.targs { basic := r.Bool() - isPointerShape := basic && targ.IsPtr() && !targ.Elem().NotInHeap() - // We should not do shapify during the reshaping process, see #71184. - // However, this only matters for shapify a pointer type, which will - // lose the original underlying type. - // - // Example with a pointer type: - // - // - First, shapifying *[]T -> *uint8 - // - During the reshaping process, *uint8 is shapified to *go.shape.uint8 - // - This ends up with a different type with the original *[]T - // - // For a non-pointer type: - // - // - int -> go.shape.int - // - go.shape.int -> go.shape.int - // - // We always end up with the identical type. - canShapify := !pr.reshaping || !isPointerShape - if dict.shaped && canShapify { + if dict.shaped { dict.targs[i] = shapify(targ, basic) } } @@ -2470,10 +2456,7 @@ func (r *reader) expr() (res ir.Node) { case exprReshape: typ := r.typ() - old := r.reshaping - r.reshaping = true x := r.expr() - r.reshaping = old if types.IdenticalStrict(x.Type(), typ) { return x @@ -2596,10 +2579,7 @@ func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) { info := r.dict.subdicts[idx] explicits := r.p.typListIdx(info.explicits, r.dict) - old := r.p.reshaping - r.p.reshaping = r.reshaping baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) - r.p.reshaping = old // TODO(mdempsky): Is there a more robust way to get the // dictionary pointer type here? diff --git a/src/cmd/compile/testdata/script/issue75461.txt b/src/cmd/compile/testdata/script/issue75461.txt new file mode 100644 index 0000000000..05f0fd4cfa --- /dev/null +++ b/src/cmd/compile/testdata/script/issue75461.txt @@ -0,0 +1,78 @@ +go build main.go +! stdout . +! stderr . + +-- main.go -- +package main + +import ( + "demo/registry" +) + +func main() { + _ = registry.NewUserRegistry() +} + +-- go.mod -- +module demo + +go 1.24 + +-- model/user.go -- +package model + +type User struct { + ID int +} + +func (c *User) String() string { + return "" +} + +-- ordered/map.go -- +package ordered + +type OrderedMap[K comparable, V any] struct { + m map[K]V +} + +func New[K comparable, V any](options ...any) *OrderedMap[K, V] { + orderedMap := &OrderedMap[K, V]{} + return orderedMap +} + +-- registry/user.go -- +package registry + +import ( + "demo/model" + "demo/ordered" +) + +type baseRegistry = Registry[model.User, *model.User] + +type UserRegistry struct { + *baseRegistry +} + +type Registry[T any, P PStringer[T]] struct { + m *ordered.OrderedMap[string, P] +} + +type PStringer[T any] interface { + *T + String() string +} + +func NewRegistry[T any, P PStringer[T]]() *Registry[T, P] { + r := &Registry[T, P]{ + m: ordered.New[string, P](), + } + return r +} + +func NewUserRegistry() *UserRegistry { + return &UserRegistry{ + baseRegistry: NewRegistry[model.User](), + } +} -- cgit v1.3-5-g9baa From 3df27cd21aab3d3bcdc8ac56e7653ab023dc1112 Mon Sep 17 00:00:00 2001 From: mohanson Date: Sat, 20 Sep 2025 19:32:29 +0800 Subject: cmd/compile: fix typo in comment Fix typo for omitted. Change-Id: Ia633abe7f3d28f15f1f538425cdce9e6d9ef48c0 Reviewed-on: https://go-review.googlesource.com/c/go/+/705735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Keith Randall Reviewed-by: Keith Randall Auto-Submit: Keith Randall --- src/cmd/compile/internal/noder/doc.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/noder/doc.go b/src/cmd/compile/internal/noder/doc.go index a5d5533168..8eb67e92f0 100644 --- a/src/cmd/compile/internal/noder/doc.go +++ b/src/cmd/compile/internal/noder/doc.go @@ -87,7 +87,7 @@ constant for file bases and hence not encoded. [ Sync ] StringRef // the (absolute) file name for the base Bool // true if a file base, else a line base - // The below is ommitted for file bases. + // The below is omitted for file bases. [ Pos Uint64 // line Uint64 ] // column @@ -99,7 +99,7 @@ without a PosBase have no line or column. Pos = [ Sync ] Bool // true if the position has a base - // The below is ommitted if the position has no base. + // The below is omitted if the position has no base. [ Ref[PosBase] Uint64 // line Uint64 ] // column @@ -125,7 +125,7 @@ packages. The below package paths have special meaning. Pkg = RefTable [ Sync ] StringRef // path - // The below is ommitted for the special package paths + // The below is omitted for the special package paths // "builtin" and "unsafe". [ StringRef // name Imports ] -- cgit v1.3-5-g9baa From 61bf26a9eef5e0c1a5c319f60dfe1e3c51766474 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Mon, 15 Sep 2025 14:23:44 +0200 Subject: cmd/link: fix Macho-O X86_64_RELOC_SUBTRACTOR in internal linking X86_64_RELOC_SUBTRACTOR is handled as a generic R_PCREL relocations, which gets the relocation size subtracted from the relocated value. This is not supposed to happen for this particular relocation, so compensate by adding the size to the addend. Cq-Include-Trybots: luci.golang.try:gotip-darwin-amd64-race Change-Id: I6e6889d63bb03b8076e3e409722601dfebec57e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/703776 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao --- src/cmd/link/internal/amd64/asm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/cmd') diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index b8127a2538..5424de800c 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -208,7 +208,7 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } // The second relocation has the target symbol we want su.SetRelocType(rIdx+1, objabi.R_PCREL) - su.SetRelocAdd(rIdx+1, r.Add()+int64(r.Off())-off) + su.SetRelocAdd(rIdx+1, r.Add()+int64(r.Off())+int64(r.Siz())-off) // Remove the other relocation su.SetRelocSiz(rIdx, 0) return true -- cgit v1.3-5-g9baa From a13d085a5b66e4d9f3ecfae91c40f62034cfb481 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Mon, 22 Sep 2025 12:02:13 +0200 Subject: cmd/cgo: don't hardcode section name in TestNumberOfExportedFunctions TestNumberOfExportedFunctions checks the number of exported functions announced in the PE export table, getting it from the .edata section. If the section is not found, the test is skipped. However, the PE spec doesn't mandate that the export table be in a section named .edata, making this test prone to being skipped unnecessarily. While here, remove a check in cmd/go.testBuildmodePIE that was testing the same thing in order to verify that the binary had a relocation table . Not only the test is duplicated, but also it in unnecessary because it already testing that the PE characteristics doesn't contain the IMAGE_FILE_RELOCS_STRIPPED flag. Closes #46719 Cq-Include-Trybots: luci.golang.try:gotip-windows-arm64 Change-Id: I28d1e261b38388868dd3c19ef6ddddad7bf105ef Reviewed-on: https://go-review.googlesource.com/c/go/+/705755 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/cgo/internal/testcshared/cshared_test.go | 116 +++++++++++++++-------- src/cmd/go/go_test.go | 33 ------- 2 files changed, 75 insertions(+), 74 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go index 2c4d33f599..f1c30f8f9a 100644 --- a/src/cmd/cgo/internal/testcshared/cshared_test.go +++ b/src/cmd/cgo/internal/testcshared/cshared_test.go @@ -375,26 +375,7 @@ func TestExportedSymbols(t *testing.T) { } } -func checkNumberOfExportedFunctionsWindows(t *testing.T, exportAllSymbols bool) { - const prog = ` -package main - -import "C" - -//export GoFunc -func GoFunc() { - println(42) -} - -//export GoFunc2 -func GoFunc2() { - println(24) -} - -func main() { -} -` - +func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFunctions int, wantAll bool) { tmpdir := t.TempDir() srcfile := filepath.Join(tmpdir, "test.go") @@ -403,7 +384,7 @@ func main() { t.Fatal(err) } argv := []string{"build", "-buildmode=c-shared"} - if exportAllSymbols { + if wantAll { argv = append(argv, "-ldflags", "-extldflags=-Wl,--export-all-symbols") } argv = append(argv, "-o", objfile, srcfile) @@ -417,10 +398,36 @@ func main() { t.Fatalf("pe.Open failed: %v", err) } defer f.Close() - section := f.Section(".edata") + + _, pe64 := f.OptionalHeader.(*pe.OptionalHeader64) + // grab the export data directory entry + var idd pe.DataDirectory + if pe64 { + idd = f.OptionalHeader.(*pe.OptionalHeader64).DataDirectory[pe.IMAGE_DIRECTORY_ENTRY_EXPORT] + } else { + idd = f.OptionalHeader.(*pe.OptionalHeader32).DataDirectory[pe.IMAGE_DIRECTORY_ENTRY_EXPORT] + } + + // figure out which section contains the import directory table + var section *pe.Section + for _, s := range f.Sections { + if s.Offset == 0 { + continue + } + if s.VirtualAddress <= idd.VirtualAddress && idd.VirtualAddress-s.VirtualAddress < s.VirtualSize { + section = s + break + } + } if section == nil { - t.Skip(".edata section is not present") + t.Fatal("no section contains export directory") } + d, err := section.Data() + if err != nil { + t.Fatal(err) + } + // seek to the virtual address specified in the export data directory + d = d[idd.VirtualAddress-section.VirtualAddress:] // TODO: deduplicate this struct from cmd/link/internal/ld/pe.go type IMAGE_EXPORT_DIRECTORY struct { @@ -432,26 +439,22 @@ func main() { _ [3]uint32 } var e IMAGE_EXPORT_DIRECTORY - if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil { + if err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &e); err != nil { t.Fatalf("binary.Read failed: %v", err) } - // Only the two exported functions and _cgo_dummy_export should be exported - expectedNumber := uint32(3) - - if exportAllSymbols { - if e.NumberOfFunctions <= expectedNumber { - t.Fatalf("missing exported functions: %v", e.NumberOfFunctions) - } - if e.NumberOfNames <= expectedNumber { - t.Fatalf("missing exported names: %v", e.NumberOfNames) + // Only the two exported functions and _cgo_dummy_export should be exported. + // NumberOfNames is the number of functions exported with a unique name. + // NumberOfFunctions can be higher than that because it also counts + // functions exported only by ordinal, a unique number asigned by the linker, + // and linkers might add an unknown number of their own ordinal-only functions. + if wantAll { + if e.NumberOfNames <= uint32(exportedFunctions) { + t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedFunctions) } } else { - if e.NumberOfFunctions != expectedNumber { - t.Fatalf("got %d exported functions; want %d", e.NumberOfFunctions, expectedNumber) - } - if e.NumberOfNames != expectedNumber { - t.Fatalf("got %d exported names; want %d", e.NumberOfNames, expectedNumber) + if e.NumberOfNames > uint32(exportedFunctions) { + t.Errorf("got %d exported names, want <= %d", e.NumberOfNames, exportedFunctions) } } } @@ -467,11 +470,42 @@ func TestNumberOfExportedFunctions(t *testing.T) { t.Parallel() - t.Run("OnlyExported", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, false) + const prog0 = ` +package main + +import "C" + +func main() { +} +` + + const prog2 = ` +package main + +import "C" + +//export GoFunc +func GoFunc() { + println(42) +} + +//export GoFunc2 +func GoFunc2() { + println(24) +} + +func main() { +} +` + // All programs export _cgo_dummy_export, so add 1 to the expected counts. + t.Run("OnlyExported/0", func(t *testing.T) { + checkNumberOfExportedFunctionsWindows(t, prog0, 0+1, false) + }) + t.Run("OnlyExported/2", func(t *testing.T) { + checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, false) }) t.Run("All", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, true) + checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, true) }) } diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 3e691abe41..e4ee9bd1e8 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -9,7 +9,6 @@ import ( "debug/elf" "debug/macho" "debug/pe" - "encoding/binary" "flag" "fmt" "go/format" @@ -2131,38 +2130,6 @@ func testBuildmodePIE(t *testing.T, useCgo, setBuildmodeToPIE bool) { if (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 { t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") } - if useCgo { - // Test that only one symbol is exported (#40795). - // PIE binaries don´t require .edata section but unfortunately - // binutils doesn´t generate a .reloc section unless there is - // at least one symbol exported. - // See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 - section := f.Section(".edata") - if section == nil { - t.Skip(".edata section is not present") - } - // TODO: deduplicate this struct from cmd/link/internal/ld/pe.go - type IMAGE_EXPORT_DIRECTORY struct { - _ [2]uint32 - _ [2]uint16 - _ [2]uint32 - NumberOfFunctions uint32 - NumberOfNames uint32 - _ [3]uint32 - } - var e IMAGE_EXPORT_DIRECTORY - if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil { - t.Fatalf("binary.Read failed: %v", err) - } - - // Only _cgo_dummy_export should be exported - if e.NumberOfFunctions != 1 { - t.Fatalf("got %d exported functions; want 1", e.NumberOfFunctions) - } - if e.NumberOfNames != 1 { - t.Fatalf("got %d exported names; want 1", e.NumberOfNames) - } - } default: // testBuildmodePIE opens object files, so it needs to understand the object // file format. -- cgit v1.3-5-g9baa From 7bc1935db55c9d182617aba074f048f9c7573680 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 18 Sep 2025 14:04:43 -0400 Subject: cmd/compile/internal: support new(expr) This CL adds compiler support for new(expr), a feature of go1.26 that allows the user to specify the initial value of the variable instead of its type. Also, a basic test of dynamic behavior. See CL 704737 for spec change and CL 704935 for type-checker changes. For #45624 Change-Id: I65d27de1ee3aabb819b57cce8ea77f3073447757 Reviewed-on: https://go-review.googlesource.com/c/go/+/705157 Reviewed-by: Keith Randall Reviewed-by: Mateusz Poliwczak Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ir/node.go | 2 +- src/cmd/compile/internal/ir/type.go | 4 ++++ src/cmd/compile/internal/noder/reader.go | 13 +++++++++++-- src/cmd/compile/internal/noder/writer.go | 8 +++++++- test/newexpr.go | 32 ++++++++++++++++++++++++++++++++ test/used.go | 3 +-- 6 files changed, 56 insertions(+), 6 deletions(-) create mode 100644 test/newexpr.go (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 003ec15de1..8c61bb6ed5 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -215,7 +215,7 @@ const ( ORSH // X >> Y OAND // X & Y OANDNOT // X &^ Y - ONEW // new(X); corresponds to calls to new in source code + ONEW // new(X); corresponds to calls to new(T) in source code ONOT // !X OBITNOT // ^X OPLUS // +X diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 6daca856a6..0f44cf8d04 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -42,6 +42,10 @@ func TypeNode(t *types.Type) Node { // A DynamicType represents a type expression whose exact type must be // computed dynamically. +// +// TODO(adonovan): I think "dynamic" is a misnomer here; it's really a +// type with free type parameters that needs to be instantiated to obtain +// a ground type for which an rtype can exist. type DynamicType struct { miniExpr diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 45e2bfd727..ca7c6bf151 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -2431,8 +2431,16 @@ func (r *reader) expr() (res ir.Node) { case exprNew: pos := r.pos() - typ := r.exprType() - return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ)) + if r.Bool() { + // new(expr) -> tmp := expr; &tmp + x := r.expr() + var init ir.Nodes + addr := ir.NewAddrExpr(pos, r.tempCopy(pos, x, &init)) + addr.SetInit(init) + return typecheck.Expr(addr) + } + // new(T) + return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, r.exprType())) case exprSizeof: return ir.NewUintptr(r.pos(), r.typ().Size()) @@ -3239,6 +3247,7 @@ func (r *reader) exprType() ir.Node { var rtype, itab ir.Node if r.Bool() { + // non-empty interface typ, rtype, _, _, itab = r.itab(pos) if !typ.IsInterface() { rtype = nil // TODO(mdempsky): Leave set? diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go index 54e5f1ea5f..9c90d221c2 100644 --- a/src/cmd/compile/internal/noder/writer.go +++ b/src/cmd/compile/internal/noder/writer.go @@ -2035,10 +2035,16 @@ func (w *writer) expr(expr syntax.Expr) { case "new": assert(len(expr.ArgList) == 1) assert(!expr.HasDots) + arg := expr.ArgList[0] w.Code(exprNew) w.pos(expr) - w.exprType(nil, expr.ArgList[0]) + tv := w.p.typeAndValue(arg) + if w.Bool(!tv.IsType()) { + w.expr(arg) // new(expr), go1.26 + } else { + w.exprType(nil, arg) // new(T) + } return case "Sizeof": diff --git a/test/newexpr.go b/test/newexpr.go new file mode 100644 index 0000000000..7deffae38f --- /dev/null +++ b/test/newexpr.go @@ -0,0 +1,32 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Issue #45624 is the proposal to accept new(expr) in go1.26. +// Here we test its run-time behavior. +func main() { + { + p := new(123) // untyped constant expr + if *p != 123 { + panic("wrong value") + } + } + { + x := 42 + p := new(x) // non-constant expr + if *p != x { + panic("wrong value") + } + } + { + x := [2]int{123, 456} + p := new(x) // composite value + if *p != x { + panic("wrong value") + } + } +} diff --git a/test/used.go b/test/used.go index 516f5968a8..33e1140cef 100644 --- a/test/used.go +++ b/test/used.go @@ -140,6 +140,5 @@ func _() { _ = int // ERROR "type int is not an expression|not an expression" (x) // ERROR "x .* not used|not used" _ = new(len) // ERROR "len.*must be called" - // Disabled due to issue #43125. - // _ = new(1 + 1) // DISABLED "1 \+ 1 is not a type" + _ = new(1 + 1) // ok } -- cgit v1.3-5-g9baa From 411c250d64304033181c46413a6e9381e8fe9b82 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 17 Mar 2025 11:45:52 -0400 Subject: runtime: add specialized malloc functions for sizes up to 512 bytes This CL adds a generator function in runtime/_mkmalloc to generate specialized mallocgc functions for sizes up throuht 512 bytes. (That's the limit where it's possible to end up in the no header case when there are scan bits, and where the benefits of the specialized functions significantly diminish according to microbenchmarks). If the specializedmalloc GOEXPERIMENT is turned on, mallocgc will call one of these functions in the no header case. malloc_generated.go is the generated file containing the specialized malloc functions. malloc_stubs.go contains the templates that will be stamped to create the specialized malloc functions. malloc_tables_generated contains the tables that mallocgc will use to select the specialized function to call. I've had to update the two stdlib_test.go files to account for the new submodule mkmalloc is in. mprof_test accounts for the changes in the stacks since different functions can be called in some cases. I still need to investigate heapsampling.go. Change-Id: Ia0f68dccdf1c6a200554ae88657cf4d686ace819 Reviewed-on: https://go-review.googlesource.com/c/go/+/665835 Reviewed-by: Michael Knyszek Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/types2/stdlib_test.go | 1 + src/go/types/stdlib_test.go | 1 + src/internal/runtime/gc/sizeclasses.go | 2 + src/runtime/_mkmalloc/constants.go | 29 + src/runtime/_mkmalloc/go.mod | 5 + src/runtime/_mkmalloc/go.sum | 2 + src/runtime/_mkmalloc/mkmalloc.go | 605 ++ src/runtime/_mkmalloc/mkmalloc_test.go | 36 + src/runtime/_mkmalloc/mksizeclasses.go | 59 +- src/runtime/malloc.go | 63 +- src/runtime/malloc_generated.go | 8468 ++++++++++++++++++++++++ src/runtime/malloc_stubs.go | 586 ++ src/runtime/malloc_tables_generated.go | 1038 +++ src/runtime/malloc_tables_plan9.go | 14 + src/runtime/malloc_test.go | 10 + 15 files changed, 10860 insertions(+), 59 deletions(-) create mode 100644 src/runtime/_mkmalloc/constants.go create mode 100644 src/runtime/_mkmalloc/go.mod create mode 100644 src/runtime/_mkmalloc/go.sum create mode 100644 src/runtime/_mkmalloc/mkmalloc.go create mode 100644 src/runtime/_mkmalloc/mkmalloc_test.go create mode 100644 src/runtime/malloc_generated.go create mode 100644 src/runtime/malloc_stubs.go create mode 100644 src/runtime/malloc_tables_generated.go create mode 100644 src/runtime/malloc_tables_plan9.go (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 365bc97462..a579c8184e 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -360,6 +360,7 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "runtime/_mkmalloc": true, } // printPackageMu synchronizes the printing of type-checked package files in diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 79ccbc6fcf..eb838b2c88 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -362,6 +362,7 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "runtime/_mkmalloc": true, } // printPackageMu synchronizes the printing of type-checked package files in diff --git a/src/internal/runtime/gc/sizeclasses.go b/src/internal/runtime/gc/sizeclasses.go index 3ef13834e4..befba425cc 100644 --- a/src/internal/runtime/gc/sizeclasses.go +++ b/src/internal/runtime/gc/sizeclasses.go @@ -91,6 +91,8 @@ const ( PageShift = 13 MaxObjsPerSpan = 1024 MaxSizeClassNPages = 10 + TinySize = 16 + TinySizeClass = 2 ) var SizeClassToSize = [NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768} diff --git a/src/runtime/_mkmalloc/constants.go b/src/runtime/_mkmalloc/constants.go new file mode 100644 index 0000000000..ad20c7b52b --- /dev/null +++ b/src/runtime/_mkmalloc/constants.go @@ -0,0 +1,29 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +const ( + // Constants that we use and will transfer to the runtime. + minHeapAlign = 8 + maxSmallSize = 32 << 10 + smallSizeDiv = 8 + smallSizeMax = 1024 + largeSizeDiv = 128 + pageShift = 13 + tinySize = 16 + + // Derived constants. + pageSize = 1 << pageShift +) + +const ( + maxPtrSize = max(4, 8) + maxPtrBits = 8 * maxPtrSize + + // Maximum size smallScanNoHeader would be called for, which is the + // maximum value gc.MinSizeForMallocHeader can have on any platform. + // gc.MinSizeForMallocHeader is defined as goarch.PtrSize * goarch.PtrBits. + smallScanNoHeaderMax = maxPtrSize * maxPtrBits +) diff --git a/src/runtime/_mkmalloc/go.mod b/src/runtime/_mkmalloc/go.mod new file mode 100644 index 0000000000..623c341769 --- /dev/null +++ b/src/runtime/_mkmalloc/go.mod @@ -0,0 +1,5 @@ +module runtime/_mkmalloc + +go 1.24 + +require golang.org/x/tools v0.33.0 diff --git a/src/runtime/_mkmalloc/go.sum b/src/runtime/_mkmalloc/go.sum new file mode 100644 index 0000000000..bead5223ca --- /dev/null +++ b/src/runtime/_mkmalloc/go.sum @@ -0,0 +1,2 @@ +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= diff --git a/src/runtime/_mkmalloc/mkmalloc.go b/src/runtime/_mkmalloc/mkmalloc.go new file mode 100644 index 0000000000..986b0aa9f8 --- /dev/null +++ b/src/runtime/_mkmalloc/mkmalloc.go @@ -0,0 +1,605 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "log" + "os" + "strings" + + "golang.org/x/tools/go/ast/astutil" + + internalastutil "runtime/_mkmalloc/astutil" +) + +var stdout = flag.Bool("stdout", false, "write sizeclasses source to stdout instead of sizeclasses.go") + +func makeSizeToSizeClass(classes []class) []uint8 { + sc := uint8(0) + ret := make([]uint8, smallScanNoHeaderMax+1) + for i := range ret { + if i > classes[sc].size { + sc++ + } + ret[i] = sc + } + return ret +} + +func main() { + log.SetFlags(0) + log.SetPrefix("mkmalloc: ") + + classes := makeClasses() + sizeToSizeClass := makeSizeToSizeClass(classes) + + if *stdout { + if _, err := os.Stdout.Write(mustFormat(generateSizeClasses(classes))); err != nil { + log.Fatal(err) + } + return + } + + sizeclasesesfile := "../../internal/runtime/gc/sizeclasses.go" + if err := os.WriteFile(sizeclasesesfile, mustFormat(generateSizeClasses(classes)), 0666); err != nil { + log.Fatal(err) + } + + outfile := "../malloc_generated.go" + if err := os.WriteFile(outfile, mustFormat(inline(specializedMallocConfig(classes, sizeToSizeClass))), 0666); err != nil { + log.Fatal(err) + } + + tablefile := "../malloc_tables_generated.go" + if err := os.WriteFile(tablefile, mustFormat(generateTable(sizeToSizeClass)), 0666); err != nil { + log.Fatal(err) + } +} + +// withLineNumbers returns b with line numbers added to help debugging. +func withLineNumbers(b []byte) []byte { + var buf bytes.Buffer + i := 1 + for line := range bytes.Lines(b) { + fmt.Fprintf(&buf, "%d: %s", i, line) + i++ + } + return buf.Bytes() +} + +// mustFormat formats the input source, or exits if there's an error. +func mustFormat(b []byte) []byte { + formatted, err := format.Source(b) + if err != nil { + log.Fatalf("error formatting source: %v\nsource:\n%s\n", err, withLineNumbers(b)) + } + return formatted +} + +// generatorConfig is the configuration for the generator. It uses the given file to find +// its templates, and generates each of the functions specified by specs. +type generatorConfig struct { + file string + specs []spec +} + +// spec is the specification for a function for the inliner to produce. The function gets +// the given name, and is produced by starting with the function with the name given by +// templateFunc and applying each of the ops. +type spec struct { + name string + templateFunc string + ops []op +} + +// replacementKind specifies the operation to ben done by a op. +type replacementKind int + +const ( + inlineFunc = replacementKind(iota) + subBasicLit +) + +// op is a single inlining operation for the inliner. Any calls to the function +// from are replaced with the inlined body of to. For non-functions, uses of from are +// replaced with the basic literal expression given by to. +type op struct { + kind replacementKind + from string + to string +} + +func smallScanNoHeaderSCFuncName(sc, scMax uint8) string { + if sc == 0 || sc > scMax { + return "mallocPanic" + } + return fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", sc) +} + +func tinyFuncName(size uintptr) string { + if size == 0 || size > smallScanNoHeaderMax { + return "mallocPanic" + } + return fmt.Sprintf("mallocTiny%d", size) +} + +func smallNoScanSCFuncName(sc, scMax uint8) string { + if sc < 2 || sc > scMax { + return "mallocPanic" + } + return fmt.Sprintf("mallocgcSmallNoScanSC%d", sc) +} + +// specializedMallocConfig produces an inlining config to stamp out the definitions of the size-specialized +// malloc functions to be written by mkmalloc. +func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generatorConfig { + config := generatorConfig{file: "../malloc_stubs.go"} + + // Only generate specialized functions for sizes that don't have + // a header on 64-bit platforms. (They may have a header on 32-bit, but + // we will fall back to the non-specialized versions in that case) + scMax := sizeToSizeClass[smallScanNoHeaderMax] + + str := fmt.Sprint + + // allocations with pointer bits + { + const noscan = 0 + for sc := uint8(0); sc <= scMax; sc++ { + if sc == 0 { + continue + } + name := smallScanNoHeaderSCFuncName(sc, scMax) + elemsize := classes[sc].size + config.specs = append(config.specs, spec{ + templateFunc: "mallocStub", + name: name, + ops: []op{ + {inlineFunc, "inlinedMalloc", "smallScanNoHeaderStub"}, + {inlineFunc, "heapSetTypeNoHeaderStub", "heapSetTypeNoHeaderStub"}, + {inlineFunc, "nextFreeFastStub", "nextFreeFastStub"}, + {inlineFunc, "writeHeapBitsSmallStub", "writeHeapBitsSmallStub"}, + {subBasicLit, "elemsize_", str(elemsize)}, + {subBasicLit, "sizeclass_", str(sc)}, + {subBasicLit, "noscanint_", str(noscan)}, + }, + }) + } + } + + // allocations without pointer bits + { + const noscan = 1 + + // tiny + tinySizeClass := sizeToSizeClass[tinySize] + for s := range uintptr(16) { + if s == 0 { + continue + } + name := tinyFuncName(s) + elemsize := classes[tinySizeClass].size + config.specs = append(config.specs, spec{ + templateFunc: "mallocStub", + name: name, + ops: []op{ + {inlineFunc, "inlinedMalloc", "tinyStub"}, + {inlineFunc, "nextFreeFastTiny", "nextFreeFastTiny"}, + {subBasicLit, "elemsize_", str(elemsize)}, + {subBasicLit, "sizeclass_", str(tinySizeClass)}, + {subBasicLit, "size_", str(s)}, + {subBasicLit, "noscanint_", str(noscan)}, + }, + }) + } + + // non-tiny + for sc := uint8(tinySizeClass); sc <= scMax; sc++ { + name := smallNoScanSCFuncName(sc, scMax) + elemsize := classes[sc].size + config.specs = append(config.specs, spec{ + templateFunc: "mallocStub", + name: name, + ops: []op{ + {inlineFunc, "inlinedMalloc", "smallNoScanStub"}, + {inlineFunc, "nextFreeFastStub", "nextFreeFastStub"}, + {subBasicLit, "elemsize_", str(elemsize)}, + {subBasicLit, "sizeclass_", str(sc)}, + {subBasicLit, "noscanint_", str(noscan)}, + }, + }) + } + } + + return config +} + +// inline applies the inlining operations given by the config. +func inline(config generatorConfig) []byte { + var out bytes.Buffer + + // Read the template file in. + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, config.file, nil, 0) + if err != nil { + log.Fatalf("parsing %s: %v", config.file, err) + } + + // Collect the function and import declarations. The function + // declarations in the template file provide both the templates + // that will be stamped out, and the functions that will be inlined + // into them. The imports from the template file will be copied + // straight to the output. + funcDecls := map[string]*ast.FuncDecl{} + importDecls := []*ast.GenDecl{} + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + funcDecls[decl.Name.Name] = decl + case *ast.GenDecl: + if decl.Tok.String() == "import" { + importDecls = append(importDecls, decl) + continue + } + } + } + + // Write out the package and import declarations. + out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n\n") + out.WriteString("package " + f.Name.Name + "\n\n") + for _, importDecl := range importDecls { + out.Write(mustFormatNode(fset, importDecl)) + out.WriteString("\n\n") + } + + // Produce each of the inlined functions specified by specs. + for _, spec := range config.specs { + // Start with a renamed copy of the template function. + containingFuncCopy := internalastutil.CloneNode(funcDecls[spec.templateFunc]) + if containingFuncCopy == nil { + log.Fatal("did not find", spec.templateFunc) + } + containingFuncCopy.Name.Name = spec.name + + // Apply each of the ops given by the specs + stamped := ast.Node(containingFuncCopy) + for _, repl := range spec.ops { + if toDecl, ok := funcDecls[repl.to]; ok { + stamped = inlineFunction(stamped, repl.from, toDecl) + } else { + stamped = substituteWithBasicLit(stamped, repl.from, repl.to) + } + } + + out.Write(mustFormatNode(fset, stamped)) + out.WriteString("\n\n") + } + + return out.Bytes() +} + +// substituteWithBasicLit recursively renames identifiers in the provided AST +// according to 'from' and 'to'. +func substituteWithBasicLit(node ast.Node, from, to string) ast.Node { + // The op is a substitution of an identifier with an basic literal. + toExpr, err := parser.ParseExpr(to) + if err != nil { + log.Fatalf("parsing expr %q: %v", to, err) + } + if _, ok := toExpr.(*ast.BasicLit); !ok { + log.Fatalf("op 'to' expr %q is not a basic literal", to) + } + return astutil.Apply(node, func(cursor *astutil.Cursor) bool { + if isIdentWithName(cursor.Node(), from) { + cursor.Replace(toExpr) + } + return true + }, nil) +} + +// inlineFunction recursively replaces calls to the function 'from' with the body of the function +// 'toDecl'. All calls to 'from' must appear in assignment statements. +// The replacement is very simple: it doesn't substitute the arguments for the parameters, so the +// arguments to the function call must be the same identifier as the parameters to the function +// declared by 'toDecl'. If there are any calls to from where that's not the case there will be a fatal error. +func inlineFunction(node ast.Node, from string, toDecl *ast.FuncDecl) ast.Node { + return astutil.Apply(node, func(cursor *astutil.Cursor) bool { + switch node := cursor.Node().(type) { + case *ast.AssignStmt: + // TODO(matloob) CHECK function args have same name + // as parameters (or parameter is "_"). + if len(node.Rhs) == 1 && isCallTo(node.Rhs[0], from) { + args := node.Rhs[0].(*ast.CallExpr).Args + if !argsMatchParameters(args, toDecl.Type.Params) { + log.Fatalf("applying op: arguments to %v don't match parameter names of %v: %v", from, toDecl.Name, debugPrint(args...)) + } + replaceAssignment(cursor, node, toDecl) + } + return false + case *ast.CallExpr: + // double check that all calls to from appear within an assignment + if isCallTo(node, from) { + if _, ok := cursor.Parent().(*ast.AssignStmt); !ok { + log.Fatalf("applying op: all calls to function %q being replaced must appear in an assignment statement, appears in %T", from, cursor.Parent()) + } + } + } + return true + }, nil) +} + +// argsMatchParameters reports whether the arguments given by args are all identifiers +// whose names are the same as the corresponding parameters in params. +func argsMatchParameters(args []ast.Expr, params *ast.FieldList) bool { + var paramIdents []*ast.Ident + for _, f := range params.List { + paramIdents = append(paramIdents, f.Names...) + } + + if len(args) != len(paramIdents) { + return false + } + + for i := range args { + if !isIdentWithName(args[i], paramIdents[i].Name) { + return false + } + } + + return true +} + +// isIdentWithName reports whether the expression is an identifier with the given name. +func isIdentWithName(expr ast.Node, name string) bool { + ident, ok := expr.(*ast.Ident) + if !ok { + return false + } + return ident.Name == name +} + +// isCallTo reports whether the expression is a call expression to the function with the given name. +func isCallTo(expr ast.Expr, name string) bool { + callexpr, ok := expr.(*ast.CallExpr) + if !ok { + return false + } + return isIdentWithName(callexpr.Fun, name) +} + +// replaceAssignment replaces an assignment statement where the right hand side is a function call +// whose arguments have the same names as the parameters to funcdecl with the body of funcdecl. +// It sets the left hand side of the assignment to the return values of the function. +func replaceAssignment(cursor *astutil.Cursor, assign *ast.AssignStmt, funcdecl *ast.FuncDecl) { + if !hasTerminatingReturn(funcdecl.Body) { + log.Fatal("function being inlined must have a return at the end") + } + + body := internalastutil.CloneNode(funcdecl.Body) + if hasTerminatingAndNonterminatingReturn(funcdecl.Body) { + // The function has multiple return points. Add the code that we'd continue with in the caller + // after each of the return points. The calling function must have a terminating return + // so we don't continue execution in the replaced function after we finish executing the + // continue block that we add. + body = addContinues(cursor, assign, body, everythingFollowingInParent(cursor)).(*ast.BlockStmt) + } + + if len(body.List) < 1 { + log.Fatal("replacing with empty bodied function") + } + + // The op happens in two steps: first we insert the body of the function being inlined (except for + // the final return) before the assignment, and then we change the assignment statement to replace the function call + // with the expressions being returned. + + // Determine the expressions being returned. + beforeReturn, ret := body.List[:len(body.List)-1], body.List[len(body.List)-1] + returnStmt, ok := ret.(*ast.ReturnStmt) + if !ok { + log.Fatal("last stmt in function we're replacing with should be a return") + } + results := returnStmt.Results + + // Insert the body up to the final return. + for _, stmt := range beforeReturn { + cursor.InsertBefore(stmt) + } + + // Rewrite the assignment statement. + replaceWithAssignment(cursor, assign.Lhs, results, assign.Tok) +} + +// hasTerminatingReturn reparts whether the block ends in a return statement. +func hasTerminatingReturn(block *ast.BlockStmt) bool { + _, ok := block.List[len(block.List)-1].(*ast.ReturnStmt) + return ok +} + +// hasTerminatingAndNonterminatingReturn reports whether the block ends in a return +// statement, and also has a return elsewhere in it. +func hasTerminatingAndNonterminatingReturn(block *ast.BlockStmt) bool { + if !hasTerminatingReturn(block) { + return false + } + var ret bool + for i := range block.List[:len(block.List)-1] { + ast.Inspect(block.List[i], func(node ast.Node) bool { + _, ok := node.(*ast.ReturnStmt) + if ok { + ret = true + return false + } + return true + }) + } + return ret +} + +// everythingFollowingInParent returns a block with everything in the parent block node of the cursor after +// the cursor itself. The cursor must point to an element in a block node's list. +func everythingFollowingInParent(cursor *astutil.Cursor) *ast.BlockStmt { + parent := cursor.Parent() + block, ok := parent.(*ast.BlockStmt) + if !ok { + log.Fatal("internal error: in everythingFollowingInParent, cursor doesn't point to element in block list") + } + + blockcopy := internalastutil.CloneNode(block) // get a clean copy + blockcopy.List = blockcopy.List[cursor.Index()+1:] // and remove everything before and including stmt + + if _, ok := blockcopy.List[len(blockcopy.List)-1].(*ast.ReturnStmt); !ok { + log.Printf("%s", mustFormatNode(token.NewFileSet(), blockcopy)) + log.Fatal("internal error: parent doesn't end in a return") + } + return blockcopy +} + +// in the case that there's a return in the body being inlined (toBlock), addContinues +// replaces those returns that are not at the end of the function with the code in the +// caller after the function call that execution would continue with after the return. +// The block being added must end in a return. +func addContinues(cursor *astutil.Cursor, assignNode *ast.AssignStmt, toBlock *ast.BlockStmt, continueBlock *ast.BlockStmt) ast.Node { + if !hasTerminatingReturn(continueBlock) { + log.Fatal("the block being continued to in addContinues must end in a return") + } + applyFunc := func(cursor *astutil.Cursor) bool { + ret, ok := cursor.Node().(*ast.ReturnStmt) + if !ok { + return true + } + + if cursor.Parent() == toBlock && cursor.Index() == len(toBlock.List)-1 { + return false + } + + // This is the opposite of replacing a function call with the body. First + // we replace the return statement with the assignment from the caller, and + // then add the code we continue with. + replaceWithAssignment(cursor, assignNode.Lhs, ret.Results, assignNode.Tok) + cursor.InsertAfter(internalastutil.CloneNode(continueBlock)) + + return false + } + return astutil.Apply(toBlock, applyFunc, nil) +} + +// debugPrint prints out the expressions given by nodes for debugging. +func debugPrint(nodes ...ast.Expr) string { + var b strings.Builder + for i, node := range nodes { + b.Write(mustFormatNode(token.NewFileSet(), node)) + if i != len(nodes)-1 { + b.WriteString(", ") + } + } + return b.String() +} + +// mustFormatNode produces the formatted Go code for the given node. +func mustFormatNode(fset *token.FileSet, node any) []byte { + var buf bytes.Buffer + format.Node(&buf, fset, node) + return buf.Bytes() +} + +// mustMatchExprs makes sure that the expression lists have the same length, +// and returns the lists of the expressions on the lhs and rhs where the +// identifiers are not the same. These are used to produce assignment statements +// where the expressions on the right are assigned to the identifiers on the left. +func mustMatchExprs(lhs []ast.Expr, rhs []ast.Expr) ([]ast.Expr, []ast.Expr) { + if len(lhs) != len(rhs) { + log.Fatal("exprs don't match", debugPrint(lhs...), debugPrint(rhs...)) + } + + var newLhs, newRhs []ast.Expr + for i := range lhs { + lhsIdent, ok1 := lhs[i].(*ast.Ident) + rhsIdent, ok2 := rhs[i].(*ast.Ident) + if ok1 && ok2 && lhsIdent.Name == rhsIdent.Name { + continue + } + newLhs = append(newLhs, lhs[i]) + newRhs = append(newRhs, rhs[i]) + } + + return newLhs, newRhs +} + +// replaceWithAssignment replaces the node pointed to by the cursor with an assignment of the +// left hand side to the righthand side, removing any redundant assignments of a variable to itself, +// and replacing an assignment to a single basic literal with a constant declaration. +func replaceWithAssignment(cursor *astutil.Cursor, lhs, rhs []ast.Expr, tok token.Token) { + newLhs, newRhs := mustMatchExprs(lhs, rhs) + if len(newLhs) == 0 { + cursor.Delete() + return + } + if len(newRhs) == 1 { + if lit, ok := newRhs[0].(*ast.BasicLit); ok { + constDecl := &ast.DeclStmt{ + Decl: &ast.GenDecl{ + Tok: token.CONST, + Specs: []ast.Spec{ + &ast.ValueSpec{ + Names: []*ast.Ident{newLhs[0].(*ast.Ident)}, + Values: []ast.Expr{lit}, + }, + }, + }, + } + cursor.Replace(constDecl) + return + } + } + newAssignment := &ast.AssignStmt{ + Lhs: newLhs, + Rhs: newRhs, + Tok: tok, + } + cursor.Replace(newAssignment) +} + +// generateTable generates the file with the jump tables for the specialized malloc functions. +func generateTable(sizeToSizeClass []uint8) []byte { + scMax := sizeToSizeClass[smallScanNoHeaderMax] + + var b bytes.Buffer + fmt.Fprintln(&b, `// Code generated by mkmalloc.go; DO NOT EDIT. +//go:build !plan9 + +package runtime + +import "unsafe" + +var mallocScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{`) + + for i := range uintptr(smallScanNoHeaderMax + 1) { + fmt.Fprintf(&b, "%s,\n", smallScanNoHeaderSCFuncName(sizeToSizeClass[i], scMax)) + } + + fmt.Fprintln(&b, ` +} + +var mallocNoScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{`) + for i := range uintptr(smallScanNoHeaderMax + 1) { + if i < 16 { + fmt.Fprintf(&b, "%s,\n", tinyFuncName(i)) + } else { + fmt.Fprintf(&b, "%s,\n", smallNoScanSCFuncName(sizeToSizeClass[i], scMax)) + } + } + + fmt.Fprintln(&b, ` +}`) + + return b.Bytes() +} diff --git a/src/runtime/_mkmalloc/mkmalloc_test.go b/src/runtime/_mkmalloc/mkmalloc_test.go new file mode 100644 index 0000000000..bd15c3226a --- /dev/null +++ b/src/runtime/_mkmalloc/mkmalloc_test.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "os" + "testing" +) + +func TestNoChange(t *testing.T) { + classes := makeClasses() + sizeToSizeClass := makeSizeToSizeClass(classes) + + outfile := "../malloc_generated.go" + want, err := os.ReadFile(outfile) + if err != nil { + t.Fatal(err) + } + got := mustFormat(inline(specializedMallocConfig(classes, sizeToSizeClass))) + if !bytes.Equal(want, got) { + t.Fatalf("want:\n%s\ngot:\n%s\n", withLineNumbers(want), withLineNumbers(got)) + } + + tablefile := "../malloc_tables_generated.go" + wanttable, err := os.ReadFile(tablefile) + if err != nil { + t.Fatal(err) + } + gotTable := mustFormat(generateTable(sizeToSizeClass)) + if !bytes.Equal(wanttable, gotTable) { + t.Fatalf("want:\n%s\ngot:\n%s\n", withLineNumbers(wanttable), withLineNumbers(gotTable)) + } +} diff --git a/src/runtime/_mkmalloc/mksizeclasses.go b/src/runtime/_mkmalloc/mksizeclasses.go index a8d2d2db1e..2c39617c6b 100644 --- a/src/runtime/_mkmalloc/mksizeclasses.go +++ b/src/runtime/_mkmalloc/mksizeclasses.go @@ -31,19 +31,14 @@ import ( "bytes" "flag" "fmt" - "go/format" "io" - "log" "math" "math/bits" - "os" ) // Generate internal/runtime/gc/msize.go -var stdout = flag.Bool("stdout", false, "write to stdout instead of sizeclasses.go") - -func main() { +func generateSizeClasses(classes []class) []byte { flag.Parse() var b bytes.Buffer @@ -51,39 +46,14 @@ func main() { fmt.Fprintln(&b, "//go:generate go -C ../../../runtime/_mkmalloc run mksizeclasses.go") fmt.Fprintln(&b) fmt.Fprintln(&b, "package gc") - classes := makeClasses() printComment(&b, classes) printClasses(&b, classes) - out, err := format.Source(b.Bytes()) - if err != nil { - log.Fatal(err) - } - if *stdout { - _, err = os.Stdout.Write(out) - } else { - err = os.WriteFile("../../internal/runtime/gc/sizeclasses.go", out, 0666) - } - if err != nil { - log.Fatal(err) - } + return b.Bytes() } -const ( - // Constants that we use and will transfer to the runtime. - minHeapAlign = 8 - maxSmallSize = 32 << 10 - smallSizeDiv = 8 - smallSizeMax = 1024 - largeSizeDiv = 128 - pageShift = 13 - - // Derived constants. - pageSize = 1 << pageShift -) - type class struct { size int // max size npages int // number of pages @@ -294,6 +264,15 @@ func maxNPages(classes []class) int { } func printClasses(w io.Writer, classes []class) { + sizeToSizeClass := func(size int) int { + for j, c := range classes { + if c.size >= size { + return j + } + } + panic("unreachable") + } + fmt.Fprintln(w, "const (") fmt.Fprintf(w, "MinHeapAlign = %d\n", minHeapAlign) fmt.Fprintf(w, "MaxSmallSize = %d\n", maxSmallSize) @@ -304,6 +283,8 @@ func printClasses(w io.Writer, classes []class) { fmt.Fprintf(w, "PageShift = %d\n", pageShift) fmt.Fprintf(w, "MaxObjsPerSpan = %d\n", maxObjsPerSpan(classes)) fmt.Fprintf(w, "MaxSizeClassNPages = %d\n", maxNPages(classes)) + fmt.Fprintf(w, "TinySize = %d\n", tinySize) + fmt.Fprintf(w, "TinySizeClass = %d\n", sizeToSizeClass(tinySize)) fmt.Fprintln(w, ")") fmt.Fprint(w, "var SizeClassToSize = [NumSizeClasses]uint16 {") @@ -332,12 +313,7 @@ func printClasses(w io.Writer, classes []class) { sc := make([]int, smallSizeMax/smallSizeDiv+1) for i := range sc { size := i * smallSizeDiv - for j, c := range classes { - if c.size >= size { - sc[i] = j - break - } - } + sc[i] = sizeToSizeClass(size) } fmt.Fprint(w, "var SizeToSizeClass8 = [SmallSizeMax/SmallSizeDiv+1]uint8 {") for _, v := range sc { @@ -349,12 +325,7 @@ func printClasses(w io.Writer, classes []class) { sc = make([]int, (maxSmallSize-smallSizeMax)/largeSizeDiv+1) for i := range sc { size := smallSizeMax + i*largeSizeDiv - for j, c := range classes { - if c.size >= size { - sc[i] = j - break - } - } + sc[i] = sizeToSizeClass(size) } fmt.Fprint(w, "var SizeToSizeClass128 = [(MaxSmallSize-SmallSizeMax)/LargeSizeDiv+1]uint8 {") for _, v := range sc { diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 5b5a633d9a..db91e89359 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -127,8 +127,8 @@ const ( _64bit = 1 << (^uintptr(0) >> 63) / 2 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. - _TinySize = 16 - _TinySizeClass = int8(2) + _TinySize = gc.TinySize + _TinySizeClass = int8(gc.TinySizeClass) _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc @@ -1080,6 +1080,12 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger // at scale. const doubleCheckMalloc = false +// sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized +// mallocgc implementation: the experiment must be enabled, and none of the sanitizers should +// be enabled. The tables used to select the size-specialized malloc function do not compile +// properly on plan9, so size-specialized malloc is also disabled on plan9. +const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled + // Allocate an object of size bytes. // Small objects are allocated from the per-P cache's free lists. // Large objects (> 32 kB) are allocated straight from the heap. @@ -1110,6 +1116,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { return unsafe.Pointer(&zerobase) } + if sizeSpecializedMallocEnabled && heapBitsInSpan(size) { + if typ == nil || !typ.Pointers() { + return mallocNoScanTable[size](size, typ, needzero) + } else { + if !needzero { + throw("objects with pointers must be zeroed") + } + return mallocScanTable[size](size, typ, needzero) + } + } + // It's possible for any malloc to trigger sweeping, which may in // turn queue finalizers. Record this dynamic lock edge. // N.B. Compiled away if lockrank experiment is not enabled. @@ -1138,25 +1155,41 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Actually do the allocation. var x unsafe.Pointer var elemsize uintptr - if size <= maxSmallSize-gc.MallocHeaderSize { - if typ == nil || !typ.Pointers() { - if size < maxTinySize { - x, elemsize = mallocgcTiny(size, typ) - } else { + if sizeSpecializedMallocEnabled { + // we know that heapBitsInSpan is true. + if size <= maxSmallSize-gc.MallocHeaderSize { + if typ == nil || !typ.Pointers() { x, elemsize = mallocgcSmallNoscan(size, typ, needzero) - } - } else { - if !needzero { - throw("objects with pointers must be zeroed") - } - if heapBitsInSpan(size) { - x, elemsize = mallocgcSmallScanNoHeader(size, typ) } else { + if !needzero { + throw("objects with pointers must be zeroed") + } x, elemsize = mallocgcSmallScanHeader(size, typ) } + } else { + x, elemsize = mallocgcLarge(size, typ, needzero) } } else { - x, elemsize = mallocgcLarge(size, typ, needzero) + if size <= maxSmallSize-gc.MallocHeaderSize { + if typ == nil || !typ.Pointers() { + if size < maxTinySize { + x, elemsize = mallocgcTiny(size, typ) + } else { + x, elemsize = mallocgcSmallNoscan(size, typ, needzero) + } + } else { + if !needzero { + throw("objects with pointers must be zeroed") + } + if heapBitsInSpan(size) { + x, elemsize = mallocgcSmallScanNoHeader(size, typ) + } else { + x, elemsize = mallocgcSmallScanHeader(size, typ) + } + } + } else { + x, elemsize = mallocgcLarge(size, typ, needzero) + } } // Notify sanitizers, if enabled. diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go new file mode 100644 index 0000000000..600048c675 --- /dev/null +++ b/src/runtime/malloc_generated.go @@ -0,0 +1,8468 @@ +// Code generated by mkmalloc.go; DO NOT EDIT. + +package runtime + +import ( + "internal/goarch" + "internal/runtime/sys" + "unsafe" +) + +func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 1 + + const elemsize = 8 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallScanNoHeader(size, typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(0) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 8 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 8 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 16 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 24 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 24 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 32 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 32 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 48 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 48 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 64 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 64 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 80 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 80 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 96 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 96 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 112 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 112 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 128 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 128 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 144 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 144 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 160 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 160 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 176 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 176 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 192 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 192 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 208 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 208 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 224 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 224 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 240 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 240 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 256 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 256 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 288 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 288 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 320 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 320 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 352 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 352 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 384 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 384 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 416 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 416 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 448 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 448 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 480 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 480 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 512 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 512 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 2 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 3 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 4 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 5 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 6 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 7 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 8 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 9 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 10 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 11 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 12 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 13 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 14 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 15 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 2 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 3 + + const elemsize = 24 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 24 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 4 + + const elemsize = 32 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 32 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 5 + + const elemsize = 48 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 48 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 6 + + const elemsize = 64 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 64 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 7 + + const elemsize = 80 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 80 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 8 + + const elemsize = 96 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 96 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 9 + + const elemsize = 112 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 112 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 10 + + const elemsize = 128 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 128 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 11 + + const elemsize = 144 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 144 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 12 + + const elemsize = 160 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 160 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 13 + + const elemsize = 176 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 176 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 14 + + const elemsize = 192 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 192 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 15 + + const elemsize = 208 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 208 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 16 + + const elemsize = 224 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 224 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 17 + + const elemsize = 240 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 240 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 18 + + const elemsize = 256 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 256 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 19 + + const elemsize = 288 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 288 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 20 + + const elemsize = 320 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 320 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 21 + + const elemsize = 352 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 352 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 22 + + const elemsize = 384 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 384 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 23 + + const elemsize = 416 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 416 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 24 + + const elemsize = 448 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 448 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 25 + + const elemsize = 480 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 480 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 26 + + const elemsize = 512 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 512 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go new file mode 100644 index 0000000000..7fd1444189 --- /dev/null +++ b/src/runtime/malloc_stubs.go @@ -0,0 +1,586 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains stub functions that are not meant to be called directly, +// but that will be assembled together using the inlining logic in runtime/_mkmalloc +// to produce a full mallocgc function that's specialized for a span class +// or specific size in the case of the tiny allocator. +// +// To assemble a mallocgc function, the mallocStub function is cloned, and the call to +// inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub, +// smallNoScanStub or tinyStub, depending on the parameters being specialized. +// +// The size_ (for the tiny case) and elemsize_, sizeclass_, and noscanint_ (for all three cases) +// identifiers are replaced with the value of the parameter in the specialized case. +// The nextFreeFastStub, nextFreeFastTiny, heapSetTypeNoHeaderStub, and writeHeapBitsSmallStub +// functions are also inlined by _mkmalloc. + +package runtime + +import ( + "internal/goarch" + "internal/runtime/sys" + "unsafe" +) + +// These identifiers will all be replaced by the inliner. So their values don't +// really matter: they just need to be set so that the stub functions, which +// will never be used on their own, can compile. elemsize_ can't be set to +// zero because we divide by it in nextFreeFastTiny, and the compiler would +// complain about a division by zero. Its replaced value will always be greater +// than zero. +const elemsize_ = 8 +const sizeclass_ = 0 +const noscanint_ = 0 +const size_ = 0 + +func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + // Short-circuit zero-sized allocation requests. + return unsafe.Pointer(&zerobase) +} + +func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + panic("not defined for sizeclass") +} + +func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + // It's possible for any malloc to trigger sweeping, which may in + // turn queue finalizers. Record this dynamic lock edge. + // N.B. Compiled away if lockrank experiment is not enabled. + lockRankMayQueueFinalizer() + + // Pre-malloc debug hooks. + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + // Assist the GC if needed. + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + // Actually do the allocation. + x, elemsize := inlinedMalloc(size, typ, needzero) + + // Adjust our GC assist debt to account for internal fragmentation. + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + // Post-malloc debug hooks. + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +// inlinedMalloc will never be called. It is defined just so that the compiler can compile +// the mallocStub function, which will also never be called, but instead used as a template +// to generate a size-specialized malloc function. The call to inlinedMalloc in mallocStub +// will be replaced with the inlined body of smallScanNoHeaderStub, smallNoScanStub, or tinyStub +// when generating the size-specialized malloc function. See the comment at the top of this +// file for more information. +func inlinedMalloc(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + return unsafe.Pointer(uintptr(0)), 0 +} + +func doubleCheckSmallScanNoHeader(size uintptr, typ *_type, mp *m) { + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + if typ == nil || !typ.Pointers() { + throw("noscan allocated in scan-only path") + } + if !heapBitsInSpan(size) { + throw("heap bits in not in span for non-header-only path") + } +} + +func smallScanNoHeaderStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + const sizeclass = sizeclass_ + const elemsize = elemsize_ + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallScanNoHeader(size, typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(noscanint_) + span := c.alloc[spc] + v := nextFreeFastStub(span) + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + // initHeapBits already set the pointer bits for the 8-byte sizeclass + // on 64-bit platforms. + c.scanAlloc += 8 + } else { + dataSize := size // make the inliner happy + x := uintptr(x) + scanSize := heapSetTypeNoHeaderStub(x, dataSize, typ, span) + c.scanAlloc += scanSize + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } else { + // Track the last free index before the mark phase. This field + // is only used by the garbage collector. During the mark phase + // this is used by the conservative scanner to filter out objects + // that are both free and recently-allocated. It's safe to do that + // because we allocate-black if the GC is enabled. The conservative + // scanner produces pointers out of thin air, so without additional + // synchronization it might otherwise observe a partially-initialized + // object, which could crash the program. + span.freeIndexForScan = span.freeindex + } + + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something. The code below just pretends it is + // internal fragmentation and matches the GC's accounting by + // using the whole allocation slot. + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + return x, elemsize +} + +func doubleCheckSmallNoScan(typ *_type, mp *m) { + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + if typ != nil && typ.Pointers() { + throw("expected noscan type for noscan alloc") + } +} + +func smallNoScanStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + // TODO(matloob): Add functionality to mkmalloc to allow us to inline a non-constant + // sizeclass_ and elemsize_ value (instead just set to the expressions to look up the size class + // and elemsize. We'd also need to teach mkmalloc that values that are touched by these (specifically + // spc below) should turn into vars. This would allow us to generate mallocgcSmallNoScan itself, + // so that its code could not diverge from the generated functions. + const sizeclass = sizeclass_ + const elemsize = elemsize_ + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(noscanint_) + span := c.alloc[spc] + v := nextFreeFastStub(span) + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } else { + // Track the last free index before the mark phase. This field + // is only used by the garbage collector. During the mark phase + // this is used by the conservative scanner to filter out objects + // that are both free and recently-allocated. It's safe to do that + // because we allocate-black if the GC is enabled. The conservative + // scanner produces pointers out of thin air, so without additional + // synchronization it might otherwise observe a partially-initialized + // object, which could crash the program. + span.freeIndexForScan = span.freeindex + } + + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something. The code below just pretends it is + // internal fragmentation and matches the GC's accounting by + // using the whole allocation slot. + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + return x, elemsize +} + +func doubleCheckTiny(size uintptr, typ *_type, mp *m) { + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + if typ != nil && typ.Pointers() { + throw("expected noscan for tiny alloc") + } +} + +func tinyStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + const constsize = size_ + const elemsize = elemsize_ + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + // Tiny allocator. + // + // Tiny allocator combines several tiny allocation requests + // into a single memory block. The resulting memory block + // is freed when all subobjects are unreachable. The subobjects + // must be noscan (don't have pointers), this ensures that + // the amount of potentially wasted memory is bounded. + // + // Size of the memory block used for combining (maxTinySize) is tunable. + // Current setting is 16 bytes, which relates to 2x worst case memory + // wastage (when all but one subobjects are unreachable). + // 8 bytes would result in no wastage at all, but provides less + // opportunities for combining. + // 32 bytes provides more opportunities for combining, + // but can lead to 4x worst case wastage. + // The best case winning is 8x regardless of block size. + // + // Objects obtained from tiny allocator must not be freed explicitly. + // So when an object will be freed explicitly, we ensure that + // its size >= maxTinySize. + // + // SetFinalizer has a special case for objects potentially coming + // from tiny allocator, it such case it allows to set finalizers + // for an inner byte of a memory block. + // + // The main targets of tiny allocator are small strings and + // standalone escaping variables. On a json benchmark + // the allocator reduces number of allocations by ~12% and + // reduces heap size by ~20%. + c := getMCache(mp) + off := c.tinyoffset + // Align tiny pointer for required (conservative) alignment. + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + // Conservatively align 12-byte objects to 8 bytes on 32-bit + // systems so that objects whose first field is a 64-bit + // value is aligned to 8 bytes and does not cause a fault on + // atomic access. See issue 37262. + // TODO(mknyszek): Remove this workaround if/when issue 36606 + // is resolved. + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + // The object fits into existing tiny block. + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + return x, 0 + } + // Allocate a new maxTinySize block. + checkGCTrigger := false + span := c.alloc[tinySpanClass] + v := nextFreeFastTiny(span) + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 // Always zero + (*[2]uint64)(x)[1] = 0 + // See if we need to replace the existing tiny block with the new one + // based on amount of remaining free space. + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + // Note: disabled when race detector is on, see comment near end of this function. + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } else { + // Track the last free index before the mark phase. This field + // is only used by the garbage collector. During the mark phase + // this is used by the conservative scanner to filter out objects + // that are both free and recently-allocated. It's safe to do that + // because we allocate-black if the GC is enabled. The conservative + // scanner produces pointers out of thin air, so without additional + // synchronization it might otherwise observe a partially-initialized + // object, which could crash the program. + span.freeIndexForScan = span.freeindex + } + + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something. The code below just pretends it is + // internal fragmentation and matches the GC's accounting by + // using the whole allocation slot. + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + // Pad tinysize allocations so they are aligned with the end + // of the tinyalloc region. This ensures that any arithmetic + // that goes off the top end of the object will be detectable + // by checkptr (issue 38872). + // Note that we disable tinyalloc when raceenabled for this to work. + // TODO: This padding is only performed when the race detector + // is enabled. It would be nice to enable it if any package + // was compiled with checkptr, but there's no easy way to + // detect that (especially at compile time). + // TODO: enable this padding for all allocations, not just + // tinyalloc ones. It's tricky because of pointer maps. + // Maybe just all noscan objects? + x = add(x, elemsize-constsize) + } + return x, elemsize +} + +// TODO(matloob): Should we let the go compiler inline this instead of using mkmalloc? +// We won't be able to use elemsize_ but that's probably ok. +func nextFreeFastTiny(span *mspan) gclinkptr { + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / elemsize_) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache? + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base()) + } + } + } + return nextFreeFastResult +} + +func nextFreeFastStub(span *mspan) gclinkptr { + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache? + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base()) + } + } + } + return nextFreeFastResult +} + +func heapSetTypeNoHeaderStub(x, dataSize uintptr, typ *_type, span *mspan) uintptr { + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(elemsize_)) { + throw("tried to write heap bits, but no heap bits in span") + } + scanSize := writeHeapBitsSmallStub(span, x, dataSize, typ) + if doubleCheckHeapSetType { + doubleCheckHeapType(x, dataSize, typ, nil, span) + } + return scanSize +} + +// writeHeapBitsSmallStub writes the heap bits for small objects whose ptr/scalar data is +// stored as a bitmap at the end of the span. +// +// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. +// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. +// +//go:nosplit +func writeHeapBitsSmallStub(span *mspan, x, dataSize uintptr, typ *_type) uintptr { + // The objects here are always really small, so a single load is sufficient. + src0 := readUintptr(getGCMask(typ)) + + const elemsize = elemsize_ + + // Create repetitions of the bitmap if we have a small slice backing store. + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + // N.B. We rely on dataSize being an exact multiple of the type size. + // The alternative is to be defensive and mask out src to the length + // of dataSize. The purpose is to save on one additional masking operation. + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + // Since we're never writing more than one uintptr's worth of bits, we're either going + // to do one or two writes. + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + // In the if statement below, we have to do two uintptr writes if the bits + // we need to write straddle across two different memory locations. But if + // the number of bits we're writing divides evenly into the number of bits + // in the uintptr we're writing, this can never happen. Since bitsIsPowerOfTwo + // is a compile-time constant in the generated code, in the case where the size is + // a power of two less than or equal to ptrBits, the compiler can remove the + // 'two writes' branch of the if statement and always do only one write without + // the check. + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + // Two writes. + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + // One write. + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)< ptrbits we always take the other branch + } + + const doubleCheck = false + if doubleCheck { + writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) + } + return scanSize +} + +func writeHeapBitsDoubleCheck(span *mspan, x, dataSize, src, src0, i, j, bits uintptr, typ *_type) { + srcRead := span.heapBitsSmallForAddr(x) + if srcRead != src { + print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n") + print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n") + print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n") + throw("bad pointer bits written for small object") + } +} diff --git a/src/runtime/malloc_tables_generated.go b/src/runtime/malloc_tables_generated.go new file mode 100644 index 0000000000..36650881fe --- /dev/null +++ b/src/runtime/malloc_tables_generated.go @@ -0,0 +1,1038 @@ +// Code generated by mkmalloc.go; DO NOT EDIT. +//go:build !plan9 + +package runtime + +import "unsafe" + +var mallocScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{ + mallocPanic, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, +} + +var mallocNoScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{ + mallocPanic, + mallocTiny1, + mallocTiny2, + mallocTiny3, + mallocTiny4, + mallocTiny5, + mallocTiny6, + mallocTiny7, + mallocTiny8, + mallocTiny9, + mallocTiny10, + mallocTiny11, + mallocTiny12, + mallocTiny13, + mallocTiny14, + mallocTiny15, + mallocgcSmallNoScanSC2, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, +} diff --git a/src/runtime/malloc_tables_plan9.go b/src/runtime/malloc_tables_plan9.go new file mode 100644 index 0000000000..4d2740bbb2 --- /dev/null +++ b/src/runtime/malloc_tables_plan9.go @@ -0,0 +1,14 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 + +package runtime + +import "unsafe" + +var ( + mallocScanTable []func(size uintptr, typ *_type, needzero bool) unsafe.Pointer + mallocNoScanTable []func(size uintptr, typ *_type, needzero bool) unsafe.Pointer +) diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go index 6cd525d5e9..bf58947bbc 100644 --- a/src/runtime/malloc_test.go +++ b/src/runtime/malloc_test.go @@ -452,3 +452,13 @@ func BenchmarkGoroutineIdle(b *testing.B) { close(quit) time.Sleep(10 * time.Millisecond) } + +func TestMkmalloc(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveExternalNetwork(t) // To download the golang.org/x/tools dependency. + output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput() + t.Logf("test output:\n%s", output) + if err != nil { + t.Errorf("_mkmalloc tests failed: %v", err) + } +} -- cgit v1.3-5-g9baa From 371c1d2fcb48fa79ac30812231ecef0e26f539dc Mon Sep 17 00:00:00 2001 From: wangboyao Date: Thu, 24 Jul 2025 14:49:44 +0800 Subject: cmd/internal/obj/riscv: add support for vector unit-stride fault-only-first load instructions Add support for vector unit-stride fault-only-first load instructions to the RISC-V assembler. This includes vle8ff, vle16ff, vle32ff and vle64ff. Change-Id: I5575a1ea155663852f92194fb79f08b5d52203de Reviewed-on: https://go-review.googlesource.com/c/go/+/690115 Reviewed-by: Junyang Shao Reviewed-by: Meng Zhuo Reviewed-by: Cherry Mui Reviewed-by: Joel Sing LUCI-TryBot-Result: Go LUCI --- src/cmd/asm/internal/asm/testdata/riscv64.s | 10 ++++++++++ src/cmd/asm/internal/asm/testdata/riscv64error.s | 1 + src/cmd/asm/internal/asm/testdata/riscv64validation.s | 2 ++ src/cmd/internal/obj/riscv/obj.go | 8 +++++++- 4 files changed, 20 insertions(+), 1 deletion(-) (limited to 'src/cmd') diff --git a/src/cmd/asm/internal/asm/testdata/riscv64.s b/src/cmd/asm/internal/asm/testdata/riscv64.s index 07a898465f..702b82223b 100644 --- a/src/cmd/asm/internal/asm/testdata/riscv64.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64.s @@ -549,6 +549,16 @@ start: VSOXEI64V V3, V2, (X10) // a771250e VSOXEI64V V3, V2, V0, (X10) // a771250c + // 31.7.7: Unit-stride Fault-Only-First Loads + VLE8FFV (X10), V8 // 07040503 + VLE16FFV (X10), V8 // 07540503 + VLE32FFV (X10), V8 // 07640503 + VLE64FFV (X10), V8 // 07740503 + VLE8FFV (X10), V0, V8 // 07040501 + VLE16FFV (X10), V0, V8 // 07540501 + VLE32FFV (X10), V0, V8 // 07640501 + VLE64FFV (X10), V0, V8 // 07740501 + // 31.7.8: Vector Load/Store Segment Instructions // 31.7.8.1: Vector Unit-Stride Segment Loads and Stores diff --git a/src/cmd/asm/internal/asm/testdata/riscv64error.s b/src/cmd/asm/internal/asm/testdata/riscv64error.s index 113b4ad2d6..3c09770d2a 100644 --- a/src/cmd/asm/internal/asm/testdata/riscv64error.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64error.s @@ -73,6 +73,7 @@ TEXT errors(SB),$0 // VSETIVLI X10, E32, M2, TA, MA, X12 // ERROR "expected immediate value" VLE8V (X10), V1, V3 // ERROR "invalid vector mask register" + VLE8FFV (X10), V1, V3 // ERROR "invalid vector mask register" VSE8V V3, V1, (X10) // ERROR "invalid vector mask register" VLSE8V (X10), X10, V1, V3 // ERROR "invalid vector mask register" VSSE8V V3, X11, V1, (X10) // ERROR "invalid vector mask register" diff --git a/src/cmd/asm/internal/asm/testdata/riscv64validation.s b/src/cmd/asm/internal/asm/testdata/riscv64validation.s index eac1a992c3..6549765916 100644 --- a/src/cmd/asm/internal/asm/testdata/riscv64validation.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64validation.s @@ -20,6 +20,8 @@ TEXT validation(SB),$0 VSETVL X10, X11 // ERROR "expected integer register in rs1 position" VLE8V (X10), X10 // ERROR "expected vector register in vd position" VLE8V (V1), V3 // ERROR "expected integer register in rs1 position" + VLE8FFV (X10), X10 // ERROR "expected vector register in vd position" + VLE8FFV (V1), V3 // ERROR "expected integer register in rs1 position" VSE8V X10, (X10) // ERROR "expected vector register in vs1 position" VSE8V V3, (V1) // ERROR "expected integer register in rd position" VLSE8V (X10), V3 // ERROR "expected integer register in rs2 position" diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 9d595f301c..91642ffbcb 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -2176,6 +2176,12 @@ var instructions = [ALAST & obj.AMask]instructionData{ AVSOXEI32V & obj.AMask: {enc: sVIVEncoding}, AVSOXEI64V & obj.AMask: {enc: sVIVEncoding}, + // 31.7.7: Unit-stride Fault-Only-First Loads + AVLE8FFV & obj.AMask: {enc: iVEncoding}, + AVLE16FFV & obj.AMask: {enc: iVEncoding}, + AVLE32FFV & obj.AMask: {enc: iVEncoding}, + AVLE64FFV & obj.AMask: {enc: iVEncoding}, + // 31.7.8: Vector Load/Store Segment Instructions AVLSEG2E8V & obj.AMask: {enc: iVEncoding}, AVLSEG3E8V & obj.AMask: {enc: iVEncoding}, @@ -3839,7 +3845,7 @@ func instructionsForProg(p *obj.Prog) []*instruction { ins.rs1 = uint32(p.From.Offset) } - case AVLE8V, AVLE16V, AVLE32V, AVLE64V, AVSE8V, AVSE16V, AVSE32V, AVSE64V, AVLMV, AVSMV, + case AVLE8V, AVLE16V, AVLE32V, AVLE64V, AVSE8V, AVSE16V, AVSE32V, AVSE64V, AVLE8FFV, AVLE16FFV, AVLE32FFV, AVLE64FFV, AVLMV, AVSMV, AVLSEG2E8V, AVLSEG3E8V, AVLSEG4E8V, AVLSEG5E8V, AVLSEG6E8V, AVLSEG7E8V, AVLSEG8E8V, AVLSEG2E16V, AVLSEG3E16V, AVLSEG4E16V, AVLSEG5E16V, AVLSEG6E16V, AVLSEG7E16V, AVLSEG8E16V, AVLSEG2E32V, AVLSEG3E32V, AVLSEG4E32V, AVLSEG5E32V, AVLSEG6E32V, AVLSEG7E32V, AVLSEG8E32V, -- cgit v1.3-5-g9baa From f15cd63ec4860c4f2c23cc992843546e0265c332 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 23 Sep 2025 16:31:26 -0700 Subject: cmd/compile: don't rely on loop info when there are irreducible loops Loop information is sketchy when there are irreducible loops. Sometimes blocks inside 2 loops can be recorded as only being part of the outer loop. That causes tighten to move values that want to move into such a block to move out of the loop altogether, breaking the invariant that operations have to be scheduled after their args. Fixes #75569 Change-Id: Idd80e6d2268094b8ae6387563081fdc1e211856a Reviewed-on: https://go-review.googlesource.com/c/go/+/706355 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/tighten.go | 27 +++++++----- test/fixedbugs/issue75569.go | 77 +++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 12 deletions(-) create mode 100644 test/fixedbugs/issue75569.go (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go index 48efdb5609..b1f787e03b 100644 --- a/src/cmd/compile/internal/ssa/tighten.go +++ b/src/cmd/compile/internal/ssa/tighten.go @@ -123,18 +123,21 @@ func tighten(f *Func) { // If the target location is inside a loop, // move the target location up to just before the loop head. - for _, b := range f.Blocks { - origloop := loops.b2l[b.ID] - for _, v := range b.Values { - t := target[v.ID] - if t == nil { - continue - } - targetloop := loops.b2l[t.ID] - for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { - t = idom[targetloop.header.ID] - target[v.ID] = t - targetloop = loops.b2l[t.ID] + if !loops.hasIrreducible { + // Loop info might not be correct for irreducible loops. See issue 75569. + for _, b := range f.Blocks { + origloop := loops.b2l[b.ID] + for _, v := range b.Values { + t := target[v.ID] + if t == nil { + continue + } + targetloop := loops.b2l[t.ID] + for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { + t = idom[targetloop.header.ID] + target[v.ID] = t + targetloop = loops.b2l[t.ID] + } } } } diff --git a/test/fixedbugs/issue75569.go b/test/fixedbugs/issue75569.go new file mode 100644 index 0000000000..8420641db2 --- /dev/null +++ b/test/fixedbugs/issue75569.go @@ -0,0 +1,77 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func fff(a []int, b bool, p, q *int) { +outer: + n := a[0] + a = a[1:] + switch n { + case 1: + goto one + case 2: + goto two + case 3: + goto three + case 4: + goto four + } + +one: + goto inner +two: + goto outer +three: + goto inner +four: + goto innerSideEntry + +inner: + n = a[0] + a = a[1:] + switch n { + case 1: + goto outer + case 2: + goto inner + case 3: + goto innerSideEntry + default: + return + } +innerSideEntry: + n = a[0] + a = a[1:] + switch n { + case 1: + goto outer + case 2: + goto inner + case 3: + goto inner + } + ggg(p, q) + goto inner +} + +var b bool + +func ggg(p, q *int) { + n := *p + 5 // this +5 ends up in the entry block, well before the *p load + if b { + *q = 0 + } + *p = n +} + +func main() { + var x, y int + fff([]int{4, 4, 4}, false, &x, &y) + if x != 5 { + panic(x) + } +} -- cgit v1.3-5-g9baa From 6b32c613ca2e69449b66ed552b93562e6be70577 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 18 Jul 2025 14:19:26 -0400 Subject: go/types: make typeset return an iterator typeset(t) now returns a func equivalent to iter.Seq2[Type, Type] for the sequence over (type, underlying) pairs in the typeset of t. underIs was modified to take advantage of the underlying iteration primitive, all, which computes the desired boolean conjunction directly. Change-Id: I3e17d5970fd2908c5dca0754db3e251bf1200af2 Reviewed-on: https://go-review.googlesource.com/c/go/+/688876 Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Findley --- src/cmd/compile/internal/types2/builtins.go | 47 ++++++++++------------- src/cmd/compile/internal/types2/index.go | 12 +++--- src/cmd/compile/internal/types2/signature.go | 7 ++-- src/cmd/compile/internal/types2/typeparam.go | 10 ++--- src/cmd/compile/internal/types2/typeset.go | 16 ++++---- src/cmd/compile/internal/types2/under.go | 57 ++++++++++++---------------- src/go/types/builtins.go | 47 ++++++++++------------- src/go/types/index.go | 3 +- src/go/types/signature.go | 7 ++-- src/go/types/typeparam.go | 10 ++--- src/go/types/typeset.go | 16 ++++---- src/go/types/under.go | 57 ++++++++++++---------------- 12 files changed, 129 insertions(+), 160 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index 3de2857ed4..df207a2746 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -98,17 +98,17 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { y := args[1] hasString := false - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} hasString = true - return true + } else { + y = nil + break } - y = nil - return false - }) + } if y != nil && hasString { // setting the signature also signals that we're done sig = makeSig(x.typ, x.typ, y.typ) @@ -368,16 +368,16 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( var special bool if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { special = true - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { - return true + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} + } else { + special = false + break } - special = false - return false - }) + } } // general case @@ -980,29 +980,22 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // or a type error if x is not a slice (or a type set of slices). func sliceElem(x *operand) (Type, *typeError) { var E Type - var err *typeError - typeset(x.typ, func(_, u Type) bool { + for _, u := range typeset(x.typ) { s, _ := u.(*Slice) if s == nil { if x.isNil() { // Printing x in this case would just print "nil". // Special case this so we can emphasize "untyped". - err = typeErrorf("argument must be a slice; have untyped nil") + return nil, typeErrorf("argument must be a slice; have untyped nil") } else { - err = typeErrorf("argument must be a slice; have %s", x) + return nil, typeErrorf("argument must be a slice; have %s", x) } - return false } if E == nil { E = s.elem } else if !Identical(E, s.elem) { - err = typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) - return false + return nil, typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) } - return true - }) - if err != nil { - return nil, err } return E, nil } diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go index 80e8514168..7e16a87332 100644 --- a/src/cmd/compile/internal/types2/index.go +++ b/src/cmd/compile/internal/types2/index.go @@ -216,11 +216,11 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { // determine common underlying type cu var ct, cu Type // type and respective common underlying type var hasString bool - typeset(x.typ, func(t, u Type) bool { + for t, u := range typeset(x.typ) { if u == nil { check.errorf(x, NonSliceableOperand, "cannot slice %s: no specific type in %s", x, x.typ) cu = nil - return false + break } // Treat strings like byte slices but remember that we saw a string. @@ -232,18 +232,16 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { // If this is the first type we're seeing, we're done. if cu == nil { ct, cu = t, u - return true + continue } // Otherwise, the current type must have the same underlying type as all previous types. if !Identical(cu, u) { check.errorf(x, NonSliceableOperand, "cannot slice %s: %s and %s have different underlying types", x, ct, t) cu = nil - return false + break } - - return true - }) + } if hasString { // If we saw a string, proceed with string type, // but don't go from untyped string to string. diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go index eaecb77af5..ea1cfd88cc 100644 --- a/src/cmd/compile/internal/types2/signature.go +++ b/src/cmd/compile/internal/types2/signature.go @@ -49,7 +49,7 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params } last := params.At(n - 1).typ var S *Slice - typeset(last, func(t, _ Type) bool { + for t := range typeset(last) { var s *Slice if isString(t) { s = NewSlice(universeByte) @@ -60,10 +60,9 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params S = s } else if !Identical(S, s) { S = nil - return false + break } - return true - }) + } if S == nil { panic(fmt.Sprintf("got %s, want variadic parameter of unnamed slice or string type", last)) } diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go index a04f928908..c60b5eb417 100644 --- a/src/cmd/compile/internal/types2/typeparam.go +++ b/src/cmd/compile/internal/types2/typeparam.go @@ -155,10 +155,10 @@ func (t *TypeParam) is(f func(*term) bool) bool { return t.iface().typeSet().is(f) } -// typeset is an iterator over the (type/underlying type) pairs of the +// typeset reports whether f(t, y) is true for all (type/underlying type) pairs of the // specific type terms of t's constraint. -// If there are no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (t *TypeParam) typeset(yield func(t, u Type) bool) { - t.iface().typeSet().typeset(yield) +// If there are no specific terms, typeset returns f(nil, nil). +// In any case, typeset is guaranteed to call f at least once. +func (t *TypeParam) typeset(f func(t, u Type) bool) bool { + return t.iface().typeSet().all(f) } diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index 74436952f2..ce487e74f7 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -104,13 +104,12 @@ func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll // subsetOf reports whether s1 ⊆ s2. func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) } -// typeset is an iterator over the (type/underlying type) pairs in s. -// If s has no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (s *_TypeSet) typeset(yield func(t, u Type) bool) { +// all reports whether f(t, u) is true for each (type/underlying type) pairs in s. +// If s has no specific terms, all calls f(nil, nil). +// In any case, all is guaranteed to call f at least once. +func (s *_TypeSet) all(f func(t, u Type) bool) bool { if !s.hasTerms() { - yield(nil, nil) - return + return f(nil, nil) } for _, t := range s.terms { @@ -123,10 +122,11 @@ func (s *_TypeSet) typeset(yield func(t, u Type) bool) { if debug { assert(Identical(u, under(u))) } - if !yield(t.typ, u) { - break + if !f(t.typ, u) { + return false } } + return true } // is calls f with the specific type terms of s and reports whether diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go index 9e5334b724..078ba9ab17 100644 --- a/src/cmd/compile/internal/types2/under.go +++ b/src/cmd/compile/internal/types2/under.go @@ -4,6 +4,8 @@ package types2 +import "iter" + // under returns the true expanded underlying type. // If it doesn't exist, the result is Typ[Invalid]. // under must only be called when a type is known @@ -18,12 +20,18 @@ func under(t Type) Type { // If typ is a type parameter, underIs returns the result of typ.underIs(f). // Otherwise, underIs returns the result of f(under(typ)). func underIs(typ Type, f func(Type) bool) bool { - var ok bool - typeset(typ, func(_, u Type) bool { - ok = f(u) - return ok + return all(typ, func(_, u Type) bool { + return f(u) }) - return ok +} + +// all reports whether f(t, u) is true for all (type/underlying type) +// pairs in the typeset of t. See [typeset] for details of sequence. +func all(t Type, f func(t, u Type) bool) bool { + if p, _ := Unalias(t).(*TypeParam); p != nil { + return p.typeset(f) + } + return f(t, under(t)) } // typeset is an iterator over the (type/underlying type) pairs of the @@ -32,12 +40,10 @@ func underIs(typ Type, f func(Type) bool) bool { // In that case, if there are no specific terms, typeset calls yield with (nil, nil). // If t is not a type parameter, the implied type set consists of just t. // In any case, typeset is guaranteed to call yield at least once. -func typeset(t Type, yield func(t, u Type) bool) { - if p, _ := Unalias(t).(*TypeParam); p != nil { - p.typeset(yield) - return +func typeset(t Type) iter.Seq2[Type, Type] { + return func(yield func(t, u Type) bool) { + _ = all(t, yield) } - yield(t, under(t)) } // A typeError describes a type error. @@ -80,35 +86,28 @@ func (err *typeError) format(check *Checker) string { // with the single type t in its type set. func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { var ct, cu Type // type and respective common underlying type - var err *typeError - - bad := func(format string, args ...any) bool { - err = typeErrorf(format, args...) - return false - } - - typeset(t, func(t, u Type) bool { + for t, u := range typeset(t) { if cond != nil { - if err = cond(t, u); err != nil { - return false + if err := cond(t, u); err != nil { + return nil, err } } if u == nil { - return bad("no specific type") + return nil, typeErrorf("no specific type") } // If this is the first type we're seeing, we're done. if cu == nil { ct, cu = t, u - return true + continue } // If we've seen a channel before, and we have a channel now, they must be compatible. if chu, _ := cu.(*Chan); chu != nil { if ch, _ := u.(*Chan); ch != nil { if !Identical(chu.elem, ch.elem) { - return bad("channels %s and %s have different element types", ct, t) + return nil, typeErrorf("channels %s and %s have different element types", ct, t) } // If we have different channel directions, keep the restricted one // and complain if they conflict. @@ -118,22 +117,16 @@ func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { case chu.dir == SendRecv: ct, cu = t, u // switch to restricted channel case ch.dir != SendRecv: - return bad("channels %s and %s have conflicting directions", ct, t) + return nil, typeErrorf("channels %s and %s have conflicting directions", ct, t) } - return true + continue } } // Otherwise, the current type must have the same underlying type as all previous types. if !Identical(cu, u) { - return bad("%s and %s have different underlying types", ct, t) + return nil, typeErrorf("%s and %s have different underlying types", ct, t) } - - return true - }) - - if err != nil { - return nil, err } return cu, nil } diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index 1163321ecd..9b03a40cbc 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -101,17 +101,17 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { y := args[1] hasString := false - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} hasString = true - return true + } else { + y = nil + break } - y = nil - return false - }) + } if y != nil && hasString { // setting the signature also signals that we're done sig = makeSig(x.typ, x.typ, y.typ) @@ -371,16 +371,16 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b var special bool if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { special = true - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { - return true + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} + } else { + special = false + break } - special = false - return false - }) + } } // general case @@ -983,29 +983,22 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // or a type error if x is not a slice (or a type set of slices). func sliceElem(x *operand) (Type, *typeError) { var E Type - var err *typeError - typeset(x.typ, func(_, u Type) bool { + for _, u := range typeset(x.typ) { s, _ := u.(*Slice) if s == nil { if x.isNil() { // Printing x in this case would just print "nil". // Special case this so we can emphasize "untyped". - err = typeErrorf("argument must be a slice; have untyped nil") + return nil, typeErrorf("argument must be a slice; have untyped nil") } else { - err = typeErrorf("argument must be a slice; have %s", x) + return nil, typeErrorf("argument must be a slice; have %s", x) } - return false } if E == nil { E = s.elem } else if !Identical(E, s.elem) { - err = typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) - return false + return nil, typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) } - return true - }) - if err != nil { - return nil, err } return E, nil } diff --git a/src/go/types/index.go b/src/go/types/index.go index 58c8893a8d..1d4f36dcf3 100644 --- a/src/go/types/index.go +++ b/src/go/types/index.go @@ -218,7 +218,8 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) { // determine common underlying type cu var ct, cu Type // type and respective common underlying type var hasString bool - typeset(x.typ, func(t, u Type) bool { + // TODO(adonovan): use go1.23 "range typeset()". + typeset(x.typ)(func(t, u Type) bool { if u == nil { check.errorf(x, NonSliceableOperand, "cannot slice %s: no specific type in %s", x, x.typ) cu = nil diff --git a/src/go/types/signature.go b/src/go/types/signature.go index f059ecb183..fa41c797b2 100644 --- a/src/go/types/signature.go +++ b/src/go/types/signature.go @@ -62,7 +62,7 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params } last := params.At(n - 1).typ var S *Slice - typeset(last, func(t, _ Type) bool { + for t := range typeset(last) { var s *Slice if isString(t) { s = NewSlice(universeByte) @@ -73,10 +73,9 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params S = s } else if !Identical(S, s) { S = nil - return false + break } - return true - }) + } if S == nil { panic(fmt.Sprintf("got %s, want variadic parameter of unnamed slice or string type", last)) } diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go index cdcd552739..2ffef8f613 100644 --- a/src/go/types/typeparam.go +++ b/src/go/types/typeparam.go @@ -158,10 +158,10 @@ func (t *TypeParam) is(f func(*term) bool) bool { return t.iface().typeSet().is(f) } -// typeset is an iterator over the (type/underlying type) pairs of the +// typeset reports whether f(t, y) is true for all (type/underlying type) pairs of the // specific type terms of t's constraint. -// If there are no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (t *TypeParam) typeset(yield func(t, u Type) bool) { - t.iface().typeSet().typeset(yield) +// If there are no specific terms, typeset returns f(nil, nil). +// In any case, typeset is guaranteed to call f at least once. +func (t *TypeParam) typeset(f func(t, u Type) bool) bool { + return t.iface().typeSet().all(f) } diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index dd384e8504..46ed5ce180 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -107,13 +107,12 @@ func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll // subsetOf reports whether s1 ⊆ s2. func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) } -// typeset is an iterator over the (type/underlying type) pairs in s. -// If s has no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (s *_TypeSet) typeset(yield func(t, u Type) bool) { +// all reports whether f(t, u) is true for each (type/underlying type) pairs in s. +// If s has no specific terms, all calls f(nil, nil). +// In any case, all is guaranteed to call f at least once. +func (s *_TypeSet) all(f func(t, u Type) bool) bool { if !s.hasTerms() { - yield(nil, nil) - return + return f(nil, nil) } for _, t := range s.terms { @@ -126,10 +125,11 @@ func (s *_TypeSet) typeset(yield func(t, u Type) bool) { if debug { assert(Identical(u, under(u))) } - if !yield(t.typ, u) { - break + if !f(t.typ, u) { + return false } } + return true } // is calls f with the specific type terms of s and reports whether diff --git a/src/go/types/under.go b/src/go/types/under.go index 2c09c49134..43bf0ad07c 100644 --- a/src/go/types/under.go +++ b/src/go/types/under.go @@ -7,6 +7,8 @@ package types +import "iter" + // under returns the true expanded underlying type. // If it doesn't exist, the result is Typ[Invalid]. // under must only be called when a type is known @@ -21,12 +23,18 @@ func under(t Type) Type { // If typ is a type parameter, underIs returns the result of typ.underIs(f). // Otherwise, underIs returns the result of f(under(typ)). func underIs(typ Type, f func(Type) bool) bool { - var ok bool - typeset(typ, func(_, u Type) bool { - ok = f(u) - return ok + return all(typ, func(_, u Type) bool { + return f(u) }) - return ok +} + +// all reports whether f(t, u) is true for all (type/underlying type) +// pairs in the typeset of t. See [typeset] for details of sequence. +func all(t Type, f func(t, u Type) bool) bool { + if p, _ := Unalias(t).(*TypeParam); p != nil { + return p.typeset(f) + } + return f(t, under(t)) } // typeset is an iterator over the (type/underlying type) pairs of the @@ -35,12 +43,10 @@ func underIs(typ Type, f func(Type) bool) bool { // In that case, if there are no specific terms, typeset calls yield with (nil, nil). // If t is not a type parameter, the implied type set consists of just t. // In any case, typeset is guaranteed to call yield at least once. -func typeset(t Type, yield func(t, u Type) bool) { - if p, _ := Unalias(t).(*TypeParam); p != nil { - p.typeset(yield) - return +func typeset(t Type) iter.Seq2[Type, Type] { + return func(yield func(t, u Type) bool) { + _ = all(t, yield) } - yield(t, under(t)) } // A typeError describes a type error. @@ -83,35 +89,28 @@ func (err *typeError) format(check *Checker) string { // with the single type t in its type set. func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { var ct, cu Type // type and respective common underlying type - var err *typeError - - bad := func(format string, args ...any) bool { - err = typeErrorf(format, args...) - return false - } - - typeset(t, func(t, u Type) bool { + for t, u := range typeset(t) { if cond != nil { - if err = cond(t, u); err != nil { - return false + if err := cond(t, u); err != nil { + return nil, err } } if u == nil { - return bad("no specific type") + return nil, typeErrorf("no specific type") } // If this is the first type we're seeing, we're done. if cu == nil { ct, cu = t, u - return true + continue } // If we've seen a channel before, and we have a channel now, they must be compatible. if chu, _ := cu.(*Chan); chu != nil { if ch, _ := u.(*Chan); ch != nil { if !Identical(chu.elem, ch.elem) { - return bad("channels %s and %s have different element types", ct, t) + return nil, typeErrorf("channels %s and %s have different element types", ct, t) } // If we have different channel directions, keep the restricted one // and complain if they conflict. @@ -121,22 +120,16 @@ func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { case chu.dir == SendRecv: ct, cu = t, u // switch to restricted channel case ch.dir != SendRecv: - return bad("channels %s and %s have conflicting directions", ct, t) + return nil, typeErrorf("channels %s and %s have conflicting directions", ct, t) } - return true + continue } } // Otherwise, the current type must have the same underlying type as all previous types. if !Identical(cu, u) { - return bad("%s and %s have different underlying types", ct, t) + return nil, typeErrorf("%s and %s have different underlying types", ct, t) } - - return true - }) - - if err != nil { - return nil, err } return cu, nil } -- cgit v1.3-5-g9baa From 81a83bba216a1382e53216f6535f3035c4a1ec4e Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 25 Sep 2025 10:30:14 -0400 Subject: cmd: update x/tools@4df13e3 This includes only a couple of minor cmd/vet fixes for new(expr). export GOWORK=off cd src/cmd go get golang.org/x/tools@4df13e3 go mod tidy go mod vendor For #45624 Change-Id: Iafba4350d321d6cd1fcc91a062e2c150e3f4d553 Reviewed-on: https://go-review.googlesource.com/c/go/+/706735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Findley Auto-Submit: Alan Donovan --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 4 +-- .../tools/go/analysis/passes/copylock/copylock.go | 5 ++- .../x/tools/internal/analysisinternal/analysis.go | 38 +++++++++++++++------- src/cmd/vendor/modules.txt | 2 +- 5 files changed, 34 insertions(+), 17 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/go.mod b/src/cmd/go.mod index c72a250aea..017883a787 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -11,7 +11,7 @@ require ( golang.org/x/sys v0.36.0 golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 golang.org/x/term v0.34.0 - golang.org/x/tools v0.37.1-0.20250915202913-9fccddc465ef + golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 ) require ( diff --git a/src/cmd/go.sum b/src/cmd/go.sum index a4801f1843..0906ffcc60 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -22,7 +22,7 @@ golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/tools v0.37.1-0.20250915202913-9fccddc465ef h1:ISPkUgvOYIt0oS7oVnwAPktCKBvgWkDlWWGMgX0veZM= -golang.org/x/tools v0.37.1-0.20250915202913-9fccddc465ef/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 h1:IcXDtHggZZo+GzNzvVRPyNFLnOc2/Z1gg3ZVIWF2uCU= +golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ= diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index a4e455d9b3..d35b85f03a 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -157,7 +157,10 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) { } if fun, ok := pass.TypesInfo.Uses[id].(*types.Builtin); ok { switch fun.Name() { - case "new", "len", "cap", "Sizeof", "Offsetof", "Alignof": + case "len", "cap", "Sizeof", "Offsetof", "Alignof": + // The argument of this operation is used only + // for its type (e.g. len(array)), or the operation + // does not copy a lock (e.g. len(slice)). return } } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index bc7f9984e9..cea89d34da 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -193,18 +193,23 @@ func CheckReadable(pass *analysis.Pass, filename string) error { return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) } -// AddImport checks whether this file already imports pkgpath and -// that import is in scope at pos. If so, it returns the name under -// which it was imported and a zero edit. Otherwise, it adds a new -// import of pkgpath, using a name derived from the preferred name, -// and returns the chosen name, a prefix to be concatenated with member -// to form a qualified name, and the edit for the new import. +// AddImport checks whether this file already imports pkgpath and that +// the import is in scope at pos. If so, it returns the name under +// which it was imported and no edits. Otherwise, it adds a new import +// of pkgpath, using a name derived from the preferred name, and +// returns the chosen name, a prefix to be concatenated with member to +// form a qualified name, and the edit for the new import. // -// In the special case that pkgpath is dot-imported then member, the -// identifier for which the import is being added, is consulted. If -// member is not shadowed at pos, AddImport returns (".", "", nil). -// (AddImport accepts the caller's implicit claim that the imported -// package declares member.) +// The member argument indicates the name of the desired symbol within +// the imported package. This is needed in the case when the existing +// import is a dot import, because then it is possible that the +// desired symbol is shadowed by other declarations in the current +// package. If member is not shadowed at pos, AddImport returns (".", +// "", nil). (AddImport accepts the caller's implicit claim that the +// imported package declares member.) +// +// Use a preferredName of "_" to request a blank import; +// member is ignored in this case. // // It does not mutate its arguments. func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (name, prefix string, newImport []analysis.TextEdit) { @@ -220,6 +225,10 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member pkgname := info.PkgNameOf(spec) if pkgname != nil && pkgname.Imported().Path() == pkgpath { name = pkgname.Name() + if preferredName == "_" { + // Request for blank import; any existing import will do. + return name, "", nil + } if name == "." { // The scope of ident must be the file scope. if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { @@ -232,8 +241,12 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member } // We must add a new import. + // Ensure we have a fresh name. - newName := FreshName(scope, pos, preferredName) + newName := preferredName + if preferredName != "_" { + newName = FreshName(scope, pos, preferredName) + } // Create a new import declaration either before the first existing // declaration (which must exist), including its comments; or @@ -246,6 +259,7 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member if newName != preferredName || newName != pathpkg.Base(pkgpath) { newText = fmt.Sprintf("%s %q", newName, pkgpath) } + decl0 := file.Decls[0] var before ast.Node = decl0 switch decl0 := decl0.(type) { diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index f166b77ea2..133271355f 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -73,7 +73,7 @@ golang.org/x/text/internal/tag golang.org/x/text/language golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.37.1-0.20250915202913-9fccddc465ef +# golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 ## explicit; go 1.24.0 golang.org/x/tools/cmd/bisect golang.org/x/tools/cover -- cgit v1.3-5-g9baa From 76d088eb74115ea14f774d1940557ca3047e1ebb Mon Sep 17 00:00:00 2001 From: Joel Sing Date: Wed, 10 Sep 2025 01:00:22 +1000 Subject: cmd/internal/obj/riscv: remove ACFLWSP/ACFSWSP and ACFLW/ACFSW These are RV32-only instructions that will not be implemented. Updates #71105 Change-Id: Ie386fe36e56f1151bb8756088dd79804584317c0 Reviewed-on: https://go-review.googlesource.com/c/go/+/702395 LUCI-TryBot-Result: Go LUCI Reviewed-by: Meng Zhuo Reviewed-by: Mark Ryan Reviewed-by: Mark Freeman Reviewed-by: Michael Pratt --- src/cmd/internal/obj/riscv/anames.go | 4 ---- src/cmd/internal/obj/riscv/cpu.go | 4 ---- 2 files changed, 8 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/internal/obj/riscv/anames.go b/src/cmd/internal/obj/riscv/anames.go index a8807fc7a8..6c48e2f7de 100644 --- a/src/cmd/internal/obj/riscv/anames.go +++ b/src/cmd/internal/obj/riscv/anames.go @@ -195,20 +195,16 @@ var Anames = []string{ "FLTQ", "FCLASSQ", "CLWSP", - "CFLWSP", "CLDSP", "CFLDSP", "CSWSP", "CSDSP", - "CFSWSP", "CFSDSP", "CLW", "CLD", - "CFLW", "CFLD", "CSW", "CSD", - "CFSW", "CFSD", "CJ", "CJR", diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go index 305ef061e3..60174a0b3a 100644 --- a/src/cmd/internal/obj/riscv/cpu.go +++ b/src/cmd/internal/obj/riscv/cpu.go @@ -588,22 +588,18 @@ const ( // 26.3.1: Compressed Stack-Pointer-Based Loads and Stores ACLWSP - ACFLWSP ACLDSP ACFLDSP ACSWSP ACSDSP - ACFSWSP ACFSDSP // 26.3.2: Compressed Register-Based Loads and Stores ACLW ACLD - ACFLW ACFLD ACSW ACSD - ACFSW ACFSD // 26.4: Compressed Control Transfer Instructions -- cgit v1.3-5-g9baa From 6dceff8bad6213bca76361462c99b0d06fd0a8f9 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Tue, 16 Sep 2025 13:34:19 -0400 Subject: cmd/link: handle -w flag in external linking mode Currently, when the -w flag is set, it doesn't actually disable the debug info generation with in external linking mode. (It does make the Go object have no debug info, but C objects may still have.) Pass "-Wl,-S" to let the external linker disable debug info generation. Change-Id: I0fce56b9f23a45546b69b9e6dd027c5527b1bc87 Reviewed-on: https://go-review.googlesource.com/c/go/+/705857 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor --- src/cmd/link/dwarf_test.go | 16 ++++++++++++++-- src/cmd/link/internal/ld/lib.go | 2 ++ 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'src/cmd') diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index 5a464fccf3..4ca578498d 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -370,14 +370,26 @@ func TestFlagW(t *testing.T) { t.Fatal(err) } - tests := []struct { + type testCase struct { flag string wantDWARF bool - }{ + } + tests := []testCase{ {"-w", false}, // -w flag disables DWARF {"-s", false}, // -s implies -w {"-s -w=0", true}, // -w=0 negates the implied -w } + if testenv.HasCGO() { + tests = append(tests, + testCase{"-w -linkmode=external", false}, + testCase{"-s -linkmode=external", false}, + // Some external linkers don't have a way to preserve DWARF + // without emitting the symbol table. Skip this case for now. + // I suppose we can post- process, e.g. with objcopy. + //testCase{"-s -w=0 -linkmode=external", true}, + ) + } + for _, test := range tests { name := strings.ReplaceAll(test.flag, " ", "_") t.Run(name, func(t *testing.T) { diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 1a1bc18675..623acc1ad4 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1451,6 +1451,8 @@ func (ctxt *Link) hostlink() { } else { argv = append(argv, "-s") } + } else if *FlagW { + argv = append(argv, "-Wl,-S") // suppress debugging symbols } // On darwin, whether to combine DWARF into executable. -- cgit v1.3-5-g9baa From 393d91aea060e5b379e7913e524026d0672a96a7 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 25 Sep 2025 12:41:08 -0400 Subject: cmd/fix: remove all functionality The buildtag fixer has been incorporated into the vet analyzer of the same name; all other fixers were already no-ops since CL 695855. Fixes #73605 Updates #71859 Change-Id: I90b6c730849a5ecbac3e6fb6fc0e062b5de74831 Reviewed-on: https://go-review.googlesource.com/c/go/+/706758 Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI --- src/cmd/fix/buildtag.go | 52 --- src/cmd/fix/buildtag_test.go | 34 -- src/cmd/fix/cftype.go | 25 -- src/cmd/fix/context.go | 17 - src/cmd/fix/doc.go | 29 +- src/cmd/fix/egltype.go | 26 -- src/cmd/fix/fix.go | 552 ----------------------------- src/cmd/fix/gotypes.go | 16 - src/cmd/fix/import_test.go | 458 ------------------------ src/cmd/fix/jnitype.go | 17 - src/cmd/fix/main.go | 246 +------------ src/cmd/fix/main_test.go | 166 --------- src/cmd/fix/netipv6zone.go | 19 - src/cmd/fix/printerconfig.go | 16 - src/cmd/fix/typecheck.go | 814 ------------------------------------------- 15 files changed, 12 insertions(+), 2475 deletions(-) delete mode 100644 src/cmd/fix/buildtag.go delete mode 100644 src/cmd/fix/buildtag_test.go delete mode 100644 src/cmd/fix/cftype.go delete mode 100644 src/cmd/fix/context.go delete mode 100644 src/cmd/fix/egltype.go delete mode 100644 src/cmd/fix/fix.go delete mode 100644 src/cmd/fix/gotypes.go delete mode 100644 src/cmd/fix/import_test.go delete mode 100644 src/cmd/fix/jnitype.go delete mode 100644 src/cmd/fix/main_test.go delete mode 100644 src/cmd/fix/netipv6zone.go delete mode 100644 src/cmd/fix/printerconfig.go delete mode 100644 src/cmd/fix/typecheck.go (limited to 'src/cmd') diff --git a/src/cmd/fix/buildtag.go b/src/cmd/fix/buildtag.go deleted file mode 100644 index 6b706c4cb5..0000000000 --- a/src/cmd/fix/buildtag.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "go/ast" - "go/version" - "strings" -) - -func init() { - register(buildtagFix) -} - -const buildtagGoVersionCutoff = "go1.18" - -var buildtagFix = fix{ - name: "buildtag", - date: "2021-08-25", - f: buildtag, - desc: `Remove +build comments from modules using Go 1.18 or later`, -} - -func buildtag(f *ast.File) bool { - if version.Compare(*goVersion, buildtagGoVersionCutoff) < 0 { - return false - } - - // File is already gofmt-ed, so we know that if there are +build lines, - // they are in a comment group that starts with a //go:build line followed - // by a blank line. While we cannot delete comments from an AST and - // expect consistent output in general, this specific case - deleting only - // some lines from a comment block - does format correctly. - fixed := false - for _, g := range f.Comments { - sawGoBuild := false - for i, c := range g.List { - if strings.HasPrefix(c.Text, "//go:build ") { - sawGoBuild = true - } - if sawGoBuild && strings.HasPrefix(c.Text, "// +build ") { - g.List = g.List[:i] - fixed = true - break - } - } - } - - return fixed -} diff --git a/src/cmd/fix/buildtag_test.go b/src/cmd/fix/buildtag_test.go deleted file mode 100644 index e5997043c2..0000000000 --- a/src/cmd/fix/buildtag_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - addTestCases(buildtagTests, buildtag) -} - -var buildtagTests = []testCase{ - { - Name: "buildtag.oldGo", - Version: "go1.10", - In: `//go:build yes -// +build yes - -package main -`, - }, - { - Name: "buildtag.new", - Version: "go1.99", - In: `//go:build yes -// +build yes - -package main -`, - Out: `//go:build yes - -package main -`, - }, -} diff --git a/src/cmd/fix/cftype.go b/src/cmd/fix/cftype.go deleted file mode 100644 index 3e9f4c5a35..0000000000 --- a/src/cmd/fix/cftype.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "go/ast" -) - -func init() { - register(cftypeFix) -} - -var cftypeFix = fix{ - name: "cftype", - date: "2017-09-27", - f: noop, - desc: `Fixes initializers and casts of C.*Ref and JNI types (removed)`, - disabled: false, -} - -func noop(f *ast.File) bool { - return false -} diff --git a/src/cmd/fix/context.go b/src/cmd/fix/context.go deleted file mode 100644 index fe2e095052..0000000000 --- a/src/cmd/fix/context.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - register(contextFix) -} - -var contextFix = fix{ - name: "context", - date: "2016-09-09", - f: noop, - desc: `Change imports of golang.org/x/net/context to context (removed)`, - disabled: false, -} diff --git a/src/cmd/fix/doc.go b/src/cmd/fix/doc.go index 062eb79285..b3d6914471 100644 --- a/src/cmd/fix/doc.go +++ b/src/cmd/fix/doc.go @@ -9,29 +9,12 @@ the necessary changes to your programs. Usage: - go tool fix [-r name,...] [path ...] + go tool fix [ignored...] -Without an explicit path, fix reads standard input and writes the -result to standard output. - -If the named path is a file, fix rewrites the named files in place. -If the named path is a directory, fix rewrites all .go files in that -directory tree. When fix rewrites a file, it prints a line to standard -error giving the name of the file and the rewrite applied. - -If the -diff flag is set, no files are rewritten. Instead fix prints -the differences a rewrite would introduce. - -The -r flag restricts the set of rewrites considered to those in the -named list. By default fix considers all known rewrites. Fix's -rewrites are idempotent, so that it is safe to apply fix to updated -or partially updated code even without using the -r flag. - -Fix prints the full list of fixes it can apply in its help output; -to see them, run go tool fix -help. - -Fix does not make backup copies of the files that it edits. -Instead, use a version control system's “diff” functionality to inspect -the changes that fix makes before committing them. +This tool is currently in transition. All its historical fixers were +long obsolete and have been removed, so it is currently a no-op. In +due course the tool will integrate with the Go analysis framework +(golang.org/x/tools/go/analysis) and run a modern suite of fix +algorithms; see https://go.dev/issue/71859. */ package main diff --git a/src/cmd/fix/egltype.go b/src/cmd/fix/egltype.go deleted file mode 100644 index 8ba66efb06..0000000000 --- a/src/cmd/fix/egltype.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - register(eglFixDisplay) - register(eglFixConfig) -} - -var eglFixDisplay = fix{ - name: "egl", - date: "2018-12-15", - f: noop, - desc: `Fixes initializers of EGLDisplay (removed)`, - disabled: false, -} - -var eglFixConfig = fix{ - name: "eglconf", - date: "2020-05-30", - f: noop, - desc: `Fixes initializers of EGLConfig (removed)`, - disabled: false, -} diff --git a/src/cmd/fix/fix.go b/src/cmd/fix/fix.go deleted file mode 100644 index 26adae41ee..0000000000 --- a/src/cmd/fix/fix.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/token" - "path" - "strconv" -) - -type fix struct { - name string - date string // date that fix was introduced, in YYYY-MM-DD format - f func(*ast.File) bool - desc string - disabled bool // whether this fix should be disabled by default -} - -var fixes []fix - -func register(f fix) { - fixes = append(fixes, f) -} - -// walk traverses the AST x, calling visit(y) for each node y in the tree but -// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt, -// in a bottom-up traversal. -func walk(x any, visit func(any)) { - walkBeforeAfter(x, nop, visit) -} - -func nop(any) {} - -// walkBeforeAfter is like walk but calls before(x) before traversing -// x's children and after(x) afterward. -func walkBeforeAfter(x any, before, after func(any)) { - before(x) - - switch n := x.(type) { - default: - panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x)) - - case nil: - - // pointers to interfaces - case *ast.Decl: - walkBeforeAfter(*n, before, after) - case *ast.Expr: - walkBeforeAfter(*n, before, after) - case *ast.Spec: - walkBeforeAfter(*n, before, after) - case *ast.Stmt: - walkBeforeAfter(*n, before, after) - - // pointers to struct pointers - case **ast.BlockStmt: - walkBeforeAfter(*n, before, after) - case **ast.CallExpr: - walkBeforeAfter(*n, before, after) - case **ast.FieldList: - walkBeforeAfter(*n, before, after) - case **ast.FuncType: - walkBeforeAfter(*n, before, after) - case **ast.Ident: - walkBeforeAfter(*n, before, after) - case **ast.BasicLit: - walkBeforeAfter(*n, before, after) - - // pointers to slices - case *[]ast.Decl: - walkBeforeAfter(*n, before, after) - case *[]ast.Expr: - walkBeforeAfter(*n, before, after) - case *[]*ast.File: - walkBeforeAfter(*n, before, after) - case *[]*ast.Ident: - walkBeforeAfter(*n, before, after) - case *[]ast.Spec: - walkBeforeAfter(*n, before, after) - case *[]ast.Stmt: - walkBeforeAfter(*n, before, after) - - // These are ordered and grouped to match ../../go/ast/ast.go - case *ast.Field: - walkBeforeAfter(&n.Names, before, after) - walkBeforeAfter(&n.Type, before, after) - walkBeforeAfter(&n.Tag, before, after) - case *ast.FieldList: - for _, field := range n.List { - walkBeforeAfter(field, before, after) - } - case *ast.BadExpr: - case *ast.Ident: - case *ast.Ellipsis: - walkBeforeAfter(&n.Elt, before, after) - case *ast.BasicLit: - case *ast.FuncLit: - walkBeforeAfter(&n.Type, before, after) - walkBeforeAfter(&n.Body, before, after) - case *ast.CompositeLit: - walkBeforeAfter(&n.Type, before, after) - walkBeforeAfter(&n.Elts, before, after) - case *ast.ParenExpr: - walkBeforeAfter(&n.X, before, after) - case *ast.SelectorExpr: - walkBeforeAfter(&n.X, before, after) - case *ast.IndexExpr: - walkBeforeAfter(&n.X, before, after) - walkBeforeAfter(&n.Index, before, after) - case *ast.IndexListExpr: - walkBeforeAfter(&n.X, before, after) - walkBeforeAfter(&n.Indices, before, after) - case *ast.SliceExpr: - walkBeforeAfter(&n.X, before, after) - if n.Low != nil { - walkBeforeAfter(&n.Low, before, after) - } - if n.High != nil { - walkBeforeAfter(&n.High, before, after) - } - case *ast.TypeAssertExpr: - walkBeforeAfter(&n.X, before, after) - walkBeforeAfter(&n.Type, before, after) - case *ast.CallExpr: - walkBeforeAfter(&n.Fun, before, after) - walkBeforeAfter(&n.Args, before, after) - case *ast.StarExpr: - walkBeforeAfter(&n.X, before, after) - case *ast.UnaryExpr: - walkBeforeAfter(&n.X, before, after) - case *ast.BinaryExpr: - walkBeforeAfter(&n.X, before, after) - walkBeforeAfter(&n.Y, before, after) - case *ast.KeyValueExpr: - walkBeforeAfter(&n.Key, before, after) - walkBeforeAfter(&n.Value, before, after) - - case *ast.ArrayType: - walkBeforeAfter(&n.Len, before, after) - walkBeforeAfter(&n.Elt, before, after) - case *ast.StructType: - walkBeforeAfter(&n.Fields, before, after) - case *ast.FuncType: - if n.TypeParams != nil { - walkBeforeAfter(&n.TypeParams, before, after) - } - walkBeforeAfter(&n.Params, before, after) - if n.Results != nil { - walkBeforeAfter(&n.Results, before, after) - } - case *ast.InterfaceType: - walkBeforeAfter(&n.Methods, before, after) - case *ast.MapType: - walkBeforeAfter(&n.Key, before, after) - walkBeforeAfter(&n.Value, before, after) - case *ast.ChanType: - walkBeforeAfter(&n.Value, before, after) - - case *ast.BadStmt: - case *ast.DeclStmt: - walkBeforeAfter(&n.Decl, before, after) - case *ast.EmptyStmt: - case *ast.LabeledStmt: - walkBeforeAfter(&n.Stmt, before, after) - case *ast.ExprStmt: - walkBeforeAfter(&n.X, before, after) - case *ast.SendStmt: - walkBeforeAfter(&n.Chan, before, after) - walkBeforeAfter(&n.Value, before, after) - case *ast.IncDecStmt: - walkBeforeAfter(&n.X, before, after) - case *ast.AssignStmt: - walkBeforeAfter(&n.Lhs, before, after) - walkBeforeAfter(&n.Rhs, before, after) - case *ast.GoStmt: - walkBeforeAfter(&n.Call, before, after) - case *ast.DeferStmt: - walkBeforeAfter(&n.Call, before, after) - case *ast.ReturnStmt: - walkBeforeAfter(&n.Results, before, after) - case *ast.BranchStmt: - case *ast.BlockStmt: - walkBeforeAfter(&n.List, before, after) - case *ast.IfStmt: - walkBeforeAfter(&n.Init, before, after) - walkBeforeAfter(&n.Cond, before, after) - walkBeforeAfter(&n.Body, before, after) - walkBeforeAfter(&n.Else, before, after) - case *ast.CaseClause: - walkBeforeAfter(&n.List, before, after) - walkBeforeAfter(&n.Body, before, after) - case *ast.SwitchStmt: - walkBeforeAfter(&n.Init, before, after) - walkBeforeAfter(&n.Tag, before, after) - walkBeforeAfter(&n.Body, before, after) - case *ast.TypeSwitchStmt: - walkBeforeAfter(&n.Init, before, after) - walkBeforeAfter(&n.Assign, before, after) - walkBeforeAfter(&n.Body, before, after) - case *ast.CommClause: - walkBeforeAfter(&n.Comm, before, after) - walkBeforeAfter(&n.Body, before, after) - case *ast.SelectStmt: - walkBeforeAfter(&n.Body, before, after) - case *ast.ForStmt: - walkBeforeAfter(&n.Init, before, after) - walkBeforeAfter(&n.Cond, before, after) - walkBeforeAfter(&n.Post, before, after) - walkBeforeAfter(&n.Body, before, after) - case *ast.RangeStmt: - walkBeforeAfter(&n.Key, before, after) - walkBeforeAfter(&n.Value, before, after) - walkBeforeAfter(&n.X, before, after) - walkBeforeAfter(&n.Body, before, after) - - case *ast.ImportSpec: - case *ast.ValueSpec: - walkBeforeAfter(&n.Type, before, after) - walkBeforeAfter(&n.Values, before, after) - walkBeforeAfter(&n.Names, before, after) - case *ast.TypeSpec: - if n.TypeParams != nil { - walkBeforeAfter(&n.TypeParams, before, after) - } - walkBeforeAfter(&n.Type, before, after) - - case *ast.BadDecl: - case *ast.GenDecl: - walkBeforeAfter(&n.Specs, before, after) - case *ast.FuncDecl: - if n.Recv != nil { - walkBeforeAfter(&n.Recv, before, after) - } - walkBeforeAfter(&n.Type, before, after) - if n.Body != nil { - walkBeforeAfter(&n.Body, before, after) - } - - case *ast.File: - walkBeforeAfter(&n.Decls, before, after) - - case *ast.Package: - walkBeforeAfter(&n.Files, before, after) - - case []*ast.File: - for i := range n { - walkBeforeAfter(&n[i], before, after) - } - case []ast.Decl: - for i := range n { - walkBeforeAfter(&n[i], before, after) - } - case []ast.Expr: - for i := range n { - walkBeforeAfter(&n[i], before, after) - } - case []*ast.Ident: - for i := range n { - walkBeforeAfter(&n[i], before, after) - } - case []ast.Stmt: - for i := range n { - walkBeforeAfter(&n[i], before, after) - } - case []ast.Spec: - for i := range n { - walkBeforeAfter(&n[i], before, after) - } - } - after(x) -} - -// imports reports whether f imports path. -func imports(f *ast.File, path string) bool { - return importSpec(f, path) != nil -} - -// importSpec returns the import spec if f imports path, -// or nil otherwise. -func importSpec(f *ast.File, path string) *ast.ImportSpec { - for _, s := range f.Imports { - if importPath(s) == path { - return s - } - } - return nil -} - -// importPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func importPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err == nil { - return t - } - return "" -} - -// declImports reports whether gen contains an import of path. -func declImports(gen *ast.GenDecl, path string) bool { - if gen.Tok != token.IMPORT { - return false - } - for _, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if importPath(impspec) == path { - return true - } - } - return false -} - -// isTopName reports whether n is a top-level unresolved identifier with the given name. -func isTopName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.Name == name && id.Obj == nil -} - -// renameTop renames all references to the top-level name old. -// It reports whether it makes any changes. -func renameTop(f *ast.File, old, new string) bool { - var fixed bool - - // Rename any conflicting imports - // (assuming package name is last element of path). - for _, s := range f.Imports { - if s.Name != nil { - if s.Name.Name == old { - s.Name.Name = new - fixed = true - } - } else { - _, thisName := path.Split(importPath(s)) - if thisName == old { - s.Name = ast.NewIdent(new) - fixed = true - } - } - } - - // Rename any top-level declarations. - for _, d := range f.Decls { - switch d := d.(type) { - case *ast.FuncDecl: - if d.Recv == nil && d.Name.Name == old { - d.Name.Name = new - d.Name.Obj.Name = new - fixed = true - } - case *ast.GenDecl: - for _, s := range d.Specs { - switch s := s.(type) { - case *ast.TypeSpec: - if s.Name.Name == old { - s.Name.Name = new - s.Name.Obj.Name = new - fixed = true - } - case *ast.ValueSpec: - for _, n := range s.Names { - if n.Name == old { - n.Name = new - n.Obj.Name = new - fixed = true - } - } - } - } - } - } - - // Rename top-level old to new, both unresolved names - // (probably defined in another file) and names that resolve - // to a declaration we renamed. - walk(f, func(n any) { - id, ok := n.(*ast.Ident) - if ok && isTopName(id, old) { - id.Name = new - fixed = true - } - if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new { - id.Name = id.Obj.Name - fixed = true - } - }) - - return fixed -} - -// matchLen returns the length of the longest prefix shared by x and y. -func matchLen(x, y string) int { - i := 0 - for i < len(x) && i < len(y) && x[i] == y[i] { - i++ - } - return i -} - -// addImport adds the import path to the file f, if absent. -func addImport(f *ast.File, ipath string) (added bool) { - if imports(f, ipath) { - return false - } - - // Determine name of import. - // Assume added imports follow convention of using last element. - _, name := path.Split(ipath) - - // Rename any conflicting top-level references from name to name_. - renameTop(f, name, name+"_") - - newImport := &ast.ImportSpec{ - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: strconv.Quote(ipath), - }, - } - - // Find an import decl to add to. - var ( - bestMatch = -1 - lastImport = -1 - impDecl *ast.GenDecl - impIndex = -1 - ) - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if ok && gen.Tok == token.IMPORT { - lastImport = i - // Do not add to import "C", to avoid disrupting the - // association with its doc comment, breaking cgo. - if declImports(gen, "C") { - continue - } - - // Compute longest shared prefix with imports in this block. - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - n := matchLen(importPath(impspec), ipath) - if n > bestMatch { - bestMatch = n - impDecl = gen - impIndex = j - } - } - } - } - - // If no import decl found, add one after the last import. - if impDecl == nil { - impDecl = &ast.GenDecl{ - Tok: token.IMPORT, - } - f.Decls = append(f.Decls, nil) - copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) - f.Decls[lastImport+1] = impDecl - } - - // Ensure the import decl has parentheses, if needed. - if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() { - impDecl.Lparen = impDecl.Pos() - } - - insertAt := impIndex + 1 - if insertAt == 0 { - insertAt = len(impDecl.Specs) - } - impDecl.Specs = append(impDecl.Specs, nil) - copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) - impDecl.Specs[insertAt] = newImport - if insertAt > 0 { - // Assign same position as the previous import, - // so that the sorter sees it as being in the same block. - prev := impDecl.Specs[insertAt-1] - newImport.Path.ValuePos = prev.Pos() - newImport.EndPos = prev.Pos() - } - - f.Imports = append(f.Imports, newImport) - return true -} - -// deleteImport deletes the import path from the file f, if present. -func deleteImport(f *ast.File, path string) (deleted bool) { - oldImport := importSpec(f, path) - - // Find the import node that imports path, if any. - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT { - continue - } - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if oldImport != impspec { - continue - } - - // We found an import spec that imports path. - // Delete it. - deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] - - // If this was the last import spec in this decl, - // delete the decl, too. - if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] - } else if len(gen.Specs) == 1 { - gen.Lparen = token.NoPos // drop parens - } - if j > 0 { - // We deleted an entry but now there will be - // a blank line-sized hole where the import was. - // Close the hole by making the previous - // import appear to "end" where this one did. - gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End() - } - break - } - } - - // Delete it from f.Imports. - for i, imp := range f.Imports { - if imp == oldImport { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - break - } - } - - return -} - -// rewriteImport rewrites any import of path oldPath to path newPath. -func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) { - for _, imp := range f.Imports { - if importPath(imp) == oldPath { - rewrote = true - // record old End, because the default is to compute - // it using the length of imp.Path.Value. - imp.EndPos = imp.End() - imp.Path.Value = strconv.Quote(newPath) - } - } - return -} diff --git a/src/cmd/fix/gotypes.go b/src/cmd/fix/gotypes.go deleted file mode 100644 index 987dab5d02..0000000000 --- a/src/cmd/fix/gotypes.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - register(gotypesFix) -} - -var gotypesFix = fix{ - name: "gotypes", - date: "2015-07-16", - f: noop, - desc: `Change imports of golang.org/x/tools/go/{exact,types} to go/{constant,types} (removed)`, -} diff --git a/src/cmd/fix/import_test.go b/src/cmd/fix/import_test.go deleted file mode 100644 index 8644e28f85..0000000000 --- a/src/cmd/fix/import_test.go +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import "go/ast" - -func init() { - addTestCases(importTests, nil) -} - -var importTests = []testCase{ - { - Name: "import.0", - Fn: addImportFn("os"), - In: `package main - -import ( - "os" -) -`, - Out: `package main - -import ( - "os" -) -`, - }, - { - Name: "import.1", - Fn: addImportFn("os"), - In: `package main -`, - Out: `package main - -import "os" -`, - }, - { - Name: "import.2", - Fn: addImportFn("os"), - In: `package main - -// Comment -import "C" -`, - Out: `package main - -// Comment -import "C" -import "os" -`, - }, - { - Name: "import.3", - Fn: addImportFn("os"), - In: `package main - -// Comment -import "C" - -import ( - "io" - "utf8" -) -`, - Out: `package main - -// Comment -import "C" - -import ( - "io" - "os" - "utf8" -) -`, - }, - { - Name: "import.4", - Fn: deleteImportFn("os"), - In: `package main - -import ( - "os" -) -`, - Out: `package main -`, - }, - { - Name: "import.5", - Fn: deleteImportFn("os"), - In: `package main - -// Comment -import "C" -import "os" -`, - Out: `package main - -// Comment -import "C" -`, - }, - { - Name: "import.6", - Fn: deleteImportFn("os"), - In: `package main - -// Comment -import "C" - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -// Comment -import "C" - -import ( - "io" - "utf8" -) -`, - }, - { - Name: "import.7", - Fn: deleteImportFn("io"), - In: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - Out: `package main - -import ( - // a - "os" // b - "utf8" // c -) -`, - }, - { - Name: "import.8", - Fn: deleteImportFn("os"), - In: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - Out: `package main - -import ( - "io" // a - // b - "utf8" // c -) -`, - }, - { - Name: "import.9", - Fn: deleteImportFn("utf8"), - In: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - Out: `package main - -import ( - "io" // a - "os" // b - // c -) -`, - }, - { - Name: "import.10", - Fn: deleteImportFn("io"), - In: `package main - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -import ( - "os" - "utf8" -) -`, - }, - { - Name: "import.11", - Fn: deleteImportFn("os"), - In: `package main - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -import ( - "io" - "utf8" -) -`, - }, - { - Name: "import.12", - Fn: deleteImportFn("utf8"), - In: `package main - -import ( - "io" - "os" - "utf8" -) -`, - Out: `package main - -import ( - "io" - "os" -) -`, - }, - { - Name: "import.13", - Fn: rewriteImportFn("utf8", "encoding/utf8"), - In: `package main - -import ( - "io" - "os" - "utf8" // thanks ken -) -`, - Out: `package main - -import ( - "encoding/utf8" // thanks ken - "io" - "os" -) -`, - }, - { - Name: "import.14", - Fn: rewriteImportFn("asn1", "encoding/asn1"), - In: `package main - -import ( - "asn1" - "crypto" - "crypto/rsa" - _ "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "time" -) - -var x = 1 -`, - Out: `package main - -import ( - "crypto" - "crypto/rsa" - _ "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "time" -) - -var x = 1 -`, - }, - { - Name: "import.15", - Fn: rewriteImportFn("url", "net/url"), - In: `package main - -import ( - "bufio" - "net" - "path" - "url" -) - -var x = 1 // comment on x, not on url -`, - Out: `package main - -import ( - "bufio" - "net" - "net/url" - "path" -) - -var x = 1 // comment on x, not on url -`, - }, - { - Name: "import.16", - Fn: rewriteImportFn("http", "net/http", "template", "text/template"), - In: `package main - -import ( - "flag" - "http" - "log" - "template" -) - -var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 -`, - Out: `package main - -import ( - "flag" - "log" - "net/http" - "text/template" -) - -var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 -`, - }, - { - Name: "import.17", - Fn: addImportFn("x/y/z", "x/a/c"), - In: `package main - -// Comment -import "C" - -import ( - "a" - "b" - - "x/w" - - "d/f" -) -`, - Out: `package main - -// Comment -import "C" - -import ( - "a" - "b" - - "x/a/c" - "x/w" - "x/y/z" - - "d/f" -) -`, - }, - { - Name: "import.18", - Fn: addDelImportFn("e", "o"), - In: `package main - -import ( - "f" - "o" - "z" -) -`, - Out: `package main - -import ( - "e" - "f" - "z" -) -`, - }, -} - -func addImportFn(path ...string) func(*ast.File) bool { - return func(f *ast.File) bool { - fixed := false - for _, p := range path { - if !imports(f, p) { - addImport(f, p) - fixed = true - } - } - return fixed - } -} - -func deleteImportFn(path string) func(*ast.File) bool { - return func(f *ast.File) bool { - if imports(f, path) { - deleteImport(f, path) - return true - } - return false - } -} - -func addDelImportFn(p1 string, p2 string) func(*ast.File) bool { - return func(f *ast.File) bool { - fixed := false - if !imports(f, p1) { - addImport(f, p1) - fixed = true - } - if imports(f, p2) { - deleteImport(f, p2) - fixed = true - } - return fixed - } -} - -func rewriteImportFn(oldnew ...string) func(*ast.File) bool { - return func(f *ast.File) bool { - fixed := false - for i := 0; i < len(oldnew); i += 2 { - if imports(f, oldnew[i]) { - rewriteImport(f, oldnew[i], oldnew[i+1]) - fixed = true - } - } - return fixed - } -} diff --git a/src/cmd/fix/jnitype.go b/src/cmd/fix/jnitype.go deleted file mode 100644 index bee38e6720..0000000000 --- a/src/cmd/fix/jnitype.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - register(jniFix) -} - -var jniFix = fix{ - name: "jni", - date: "2017-12-04", - f: noop, - desc: `Fixes initializers of JNI's jobject and subtypes (removed)`, - disabled: false, -} diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go index 933c32bcd9..87cc0d6414 100644 --- a/src/cmd/fix/main.go +++ b/src/cmd/fix/main.go @@ -5,261 +5,27 @@ package main import ( - "bytes" "flag" "fmt" - "go/ast" - "go/format" - "go/parser" - "go/scanner" - "go/token" - "go/version" - "internal/diff" - "io" - "io/fs" "os" - "path/filepath" - "slices" - "strings" - - "cmd/internal/telemetry/counter" ) var ( - fset = token.NewFileSet() - exitCode = 0 -) - -var allowedRewrites = flag.String("r", "", - "restrict the rewrites to this comma-separated list") - -var forceRewrites = flag.String("force", "", - "force these fixes to run even if the code looks updated") - -var allowed, force map[string]bool - -var ( - doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files") - goVersion = flag.String("go", "", "go language version for files") + _ = flag.Bool("diff", false, "obsolete, no effect") + _ = flag.String("go", "", "obsolete, no effect") + _ = flag.String("r", "", "obsolete, no effect") + _ = flag.String("force", "", "obsolete, no effect") ) -// enable for debugging fix failures -const debug = false // display incorrectly reformatted source and exit - func usage() { - fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n") + fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r ignored] [-force ignored] ...\n") flag.PrintDefaults() - fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n") - slices.SortFunc(fixes, func(a, b fix) int { - return strings.Compare(a.name, b.name) - }) - for _, f := range fixes { - if f.disabled { - fmt.Fprintf(os.Stderr, "\n%s (disabled)\n", f.name) - } else { - fmt.Fprintf(os.Stderr, "\n%s\n", f.name) - } - desc := strings.TrimSpace(f.desc) - desc = strings.ReplaceAll(desc, "\n", "\n\t") - fmt.Fprintf(os.Stderr, "\t%s\n", desc) - } os.Exit(2) } func main() { - counter.Open() flag.Usage = usage flag.Parse() - counter.Inc("fix/invocations") - counter.CountFlags("fix/flag:", *flag.CommandLine) - - if !version.IsValid(*goVersion) { - report(fmt.Errorf("invalid -go=%s", *goVersion)) - os.Exit(exitCode) - } - - slices.SortFunc(fixes, func(a, b fix) int { - return strings.Compare(a.date, b.date) - }) - - if *allowedRewrites != "" { - allowed = make(map[string]bool) - for f := range strings.SplitSeq(*allowedRewrites, ",") { - allowed[f] = true - } - } - - if *forceRewrites != "" { - force = make(map[string]bool) - for f := range strings.SplitSeq(*forceRewrites, ",") { - force[f] = true - } - } - - if flag.NArg() == 0 { - if err := processFile("standard input", true); err != nil { - report(err) - } - os.Exit(exitCode) - } - - for i := 0; i < flag.NArg(); i++ { - path := flag.Arg(i) - switch dir, err := os.Stat(path); { - case err != nil: - report(err) - case dir.IsDir(): - walkDir(path) - default: - if err := processFile(path, false); err != nil { - report(err) - } - } - } - - os.Exit(exitCode) -} - -const parserMode = parser.ParseComments - -func gofmtFile(f *ast.File) ([]byte, error) { - var buf bytes.Buffer - if err := format.Node(&buf, fset, f); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func processFile(filename string, useStdin bool) error { - var f *os.File - var err error - var fixlog strings.Builder - - if useStdin { - f = os.Stdin - } else { - f, err = os.Open(filename) - if err != nil { - return err - } - defer f.Close() - } - - src, err := io.ReadAll(f) - if err != nil { - return err - } - - file, err := parser.ParseFile(fset, filename, src, parserMode) - if err != nil { - return err - } - - // Make sure file is in canonical format. - // This "fmt" pseudo-fix cannot be disabled. - newSrc, err := gofmtFile(file) - if err != nil { - return err - } - if !bytes.Equal(newSrc, src) { - newFile, err := parser.ParseFile(fset, filename, newSrc, parserMode) - if err != nil { - return err - } - file = newFile - fmt.Fprintf(&fixlog, " fmt") - } - - // Apply all fixes to file. - newFile := file - fixed := false - for _, fix := range fixes { - if allowed != nil && !allowed[fix.name] { - continue - } - if fix.disabled && !force[fix.name] { - continue - } - if fix.f(newFile) { - fixed = true - fmt.Fprintf(&fixlog, " %s", fix.name) - - // AST changed. - // Print and parse, to update any missing scoping - // or position information for subsequent fixers. - newSrc, err := gofmtFile(newFile) - if err != nil { - return err - } - newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode) - if err != nil { - if debug { - fmt.Printf("%s", newSrc) - report(err) - os.Exit(exitCode) - } - return err - } - } - } - if !fixed { - return nil - } - fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:]) - - // Print AST. We did that after each fix, so this appears - // redundant, but it is necessary to generate gofmt-compatible - // source code in a few cases. The official gofmt style is the - // output of the printer run on a standard AST generated by the parser, - // but the source we generated inside the loop above is the - // output of the printer run on a mangled AST generated by a fixer. - newSrc, err = gofmtFile(newFile) - if err != nil { - return err - } - - if *doDiff { - os.Stdout.Write(diff.Diff(filename, src, "fixed/"+filename, newSrc)) - return nil - } - - if useStdin { - os.Stdout.Write(newSrc) - return nil - } - - return os.WriteFile(f.Name(), newSrc, 0) -} - -func gofmt(n any) string { - var gofmtBuf strings.Builder - if err := format.Node(&gofmtBuf, fset, n); err != nil { - return "<" + err.Error() + ">" - } - return gofmtBuf.String() -} - -func report(err error) { - scanner.PrintError(os.Stderr, err) - exitCode = 2 -} - -func walkDir(path string) { - filepath.WalkDir(path, visitFile) -} - -func visitFile(path string, f fs.DirEntry, err error) error { - if err == nil && isGoFile(f) { - err = processFile(path, false) - } - if err != nil { - report(err) - } - return nil -} -func isGoFile(f fs.DirEntry) bool { - // ignore non-Go files - name := f.Name() - return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") + os.Exit(0) } diff --git a/src/cmd/fix/main_test.go b/src/cmd/fix/main_test.go deleted file mode 100644 index 8d841b101f..0000000000 --- a/src/cmd/fix/main_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/parser" - "internal/diff" - "internal/testenv" - "strings" - "testing" -) - -type testCase struct { - Name string - Fn func(*ast.File) bool - Version string - In string - Out string -} - -var testCases []testCase - -func addTestCases(t []testCase, fn func(*ast.File) bool) { - // Fill in fn to avoid repetition in definitions. - if fn != nil { - for i := range t { - if t[i].Fn == nil { - t[i].Fn = fn - } - } - } - testCases = append(testCases, t...) -} - -func fnop(*ast.File) bool { return false } - -func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) { - file, err := parser.ParseFile(fset, desc, in, parserMode) - if err != nil { - t.Errorf("parsing: %v", err) - return - } - - outb, err := gofmtFile(file) - if err != nil { - t.Errorf("printing: %v", err) - return - } - if s := string(outb); in != s && mustBeGofmt { - t.Errorf("not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s", - desc, in, desc, s) - tdiff(t, "want", in, "have", s) - return - } - - if fn == nil { - for _, fix := range fixes { - if fix.f(file) { - fixed = true - } - } - } else { - fixed = fn(file) - } - - outb, err = gofmtFile(file) - if err != nil { - t.Errorf("printing: %v", err) - return - } - - return string(outb), fixed, true -} - -func TestRewrite(t *testing.T) { - // If cgo is enabled, enforce that cgo commands invoked by cmd/fix - // do not fail during testing. - if testenv.HasCGO() { - testenv.MustHaveGoBuild(t) // Really just 'go tool cgo', but close enough. - - // The reportCgoError hook is global, so we can't set it per-test - // if we want to be able to run those tests in parallel. - // Instead, simply set it to panic on error: the goroutine dump - // from the panic should help us determine which test failed. - prevReportCgoError := reportCgoError - reportCgoError = func(err error) { - panic(fmt.Sprintf("unexpected cgo error: %v", err)) - } - t.Cleanup(func() { reportCgoError = prevReportCgoError }) - } - - for _, tt := range testCases { - tt := tt - t.Run(tt.Name, func(t *testing.T) { - if tt.Version == "" { - if testing.Verbose() { - // Don't run in parallel: cmd/fix sometimes writes directly to stderr, - // and since -v prints which test is currently running we want that - // information to accurately correlate with the stderr output. - } else { - t.Parallel() - } - } else { - old := *goVersion - *goVersion = tt.Version - defer func() { - *goVersion = old - }() - } - - // Apply fix: should get tt.Out. - out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true) - if !ok { - return - } - - // reformat to get printing right - out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false) - if !ok { - return - } - - if tt.Out == "" { - tt.Out = tt.In - } - if out != tt.Out { - t.Errorf("incorrect output.\n") - if !strings.HasPrefix(tt.Name, "testdata/") { - t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out) - } - tdiff(t, "have", out, "want", tt.Out) - return - } - - if changed := out != tt.In; changed != fixed { - t.Errorf("changed=%v != fixed=%v", changed, fixed) - return - } - - // Should not change if run again. - out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true) - if !ok { - return - } - - if fixed2 { - t.Errorf("applied fixes during second round") - return - } - - if out2 != out { - t.Errorf("changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s", - out, out2) - tdiff(t, "first", out, "second", out2) - } - }) - } -} - -func tdiff(t *testing.T, aname, a, bname, b string) { - t.Errorf("%s", diff.Diff(aname, []byte(a), bname, []byte(b))) -} diff --git a/src/cmd/fix/netipv6zone.go b/src/cmd/fix/netipv6zone.go deleted file mode 100644 index 75d2150e43..0000000000 --- a/src/cmd/fix/netipv6zone.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - register(netipv6zoneFix) -} - -var netipv6zoneFix = fix{ - name: "netipv6zone", - date: "2012-11-26", - f: noop, - desc: `Adapt element key to IPAddr, UDPAddr or TCPAddr composite literals (removed). - -https://codereview.appspot.com/6849045/ -`, -} diff --git a/src/cmd/fix/printerconfig.go b/src/cmd/fix/printerconfig.go deleted file mode 100644 index f9e49d7c0b..0000000000 --- a/src/cmd/fix/printerconfig.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -func init() { - register(printerconfigFix) -} - -var printerconfigFix = fix{ - name: "printerconfig", - date: "2012-12-11", - f: noop, - desc: `Add element keys to Config composite literals (removed).`, -} diff --git a/src/cmd/fix/typecheck.go b/src/cmd/fix/typecheck.go deleted file mode 100644 index be21582fce..0000000000 --- a/src/cmd/fix/typecheck.go +++ /dev/null @@ -1,814 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "maps" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strings" -) - -// Partial type checker. -// -// The fact that it is partial is very important: the input is -// an AST and a description of some type information to -// assume about one or more packages, but not all the -// packages that the program imports. The checker is -// expected to do as much as it can with what it has been -// given. There is not enough information supplied to do -// a full type check, but the type checker is expected to -// apply information that can be derived from variable -// declarations, function and method returns, and type switches -// as far as it can, so that the caller can still tell the types -// of expression relevant to a particular fix. -// -// TODO(rsc,gri): Replace with go/typechecker. -// Doing that could be an interesting test case for go/typechecker: -// the constraints about working with partial information will -// likely exercise it in interesting ways. The ideal interface would -// be to pass typecheck a map from importpath to package API text -// (Go source code), but for now we use data structures (TypeConfig, Type). -// -// The strings mostly use gofmt form. -// -// A Field or FieldList has as its type a comma-separated list -// of the types of the fields. For example, the field list -// x, y, z int -// has type "int, int, int". - -// The prefix "type " is the type of a type. -// For example, given -// var x int -// type T int -// x's type is "int" but T's type is "type int". -// mkType inserts the "type " prefix. -// getType removes it. -// isType tests for it. - -func mkType(t string) string { - return "type " + t -} - -func getType(t string) string { - if !isType(t) { - return "" - } - return t[len("type "):] -} - -func isType(t string) bool { - return strings.HasPrefix(t, "type ") -} - -// TypeConfig describes the universe of relevant types. -// For ease of creation, the types are all referred to by string -// name (e.g., "reflect.Value"). TypeByName is the only place -// where the strings are resolved. - -type TypeConfig struct { - Type map[string]*Type - Var map[string]string - Func map[string]string - - // External maps from a name to its type. - // It provides additional typings not present in the Go source itself. - // For now, the only additional typings are those generated by cgo. - External map[string]string -} - -// typeof returns the type of the given name, which may be of -// the form "x" or "p.X". -func (cfg *TypeConfig) typeof(name string) string { - if cfg.Var != nil { - if t := cfg.Var[name]; t != "" { - return t - } - } - if cfg.Func != nil { - if t := cfg.Func[name]; t != "" { - return "func()" + t - } - } - return "" -} - -// Type describes the Fields and Methods of a type. -// If the field or method cannot be found there, it is next -// looked for in the Embed list. -type Type struct { - Field map[string]string // map field name to type - Method map[string]string // map method name to comma-separated return types (should start with "func ") - Embed []string // list of types this type embeds (for extra methods) - Def string // definition of named type -} - -// dot returns the type of "typ.name", making its decision -// using the type information in cfg. -func (typ *Type) dot(cfg *TypeConfig, name string) string { - if typ.Field != nil { - if t := typ.Field[name]; t != "" { - return t - } - } - if typ.Method != nil { - if t := typ.Method[name]; t != "" { - return t - } - } - - for _, e := range typ.Embed { - etyp := cfg.Type[e] - if etyp != nil { - if t := etyp.dot(cfg, name); t != "" { - return t - } - } - } - - return "" -} - -// typecheck type checks the AST f assuming the information in cfg. -// It returns two maps with type information: -// typeof maps AST nodes to type information in gofmt string form. -// assign maps type strings to lists of expressions that were assigned -// to values of another type that were assigned to that type. -func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[any]string, assign map[string][]any) { - typeof = make(map[any]string) - assign = make(map[string][]any) - cfg1 := &TypeConfig{} - *cfg1 = *cfg // make copy so we can add locally - copied := false - - // If we import "C", add types of cgo objects. - cfg.External = map[string]string{} - cfg1.External = cfg.External - if imports(f, "C") { - // Run cgo on gofmtFile(f) - // Parse, extract decls from _cgo_gotypes.go - // Map _Ctype_* types to C.* types. - err := func() error { - txt, err := gofmtFile(f) - if err != nil { - return err - } - dir, err := os.MkdirTemp(os.TempDir(), "fix_cgo_typecheck") - if err != nil { - return err - } - defer os.RemoveAll(dir) - err = os.WriteFile(filepath.Join(dir, "in.go"), txt, 0600) - if err != nil { - return err - } - goCmd := "go" - if goroot := runtime.GOROOT(); goroot != "" { - goCmd = filepath.Join(goroot, "bin", "go") - } - cmd := exec.Command(goCmd, "tool", "cgo", "-objdir", dir, "-srcdir", dir, "in.go") - if reportCgoError != nil { - // Since cgo command errors will be reported, also forward the error - // output from the command for debugging. - cmd.Stderr = os.Stderr - } - err = cmd.Run() - if err != nil { - return err - } - out, err := os.ReadFile(filepath.Join(dir, "_cgo_gotypes.go")) - if err != nil { - return err - } - cgo, err := parser.ParseFile(token.NewFileSet(), "cgo.go", out, 0) - if err != nil { - return err - } - for _, decl := range cgo.Decls { - fn, ok := decl.(*ast.FuncDecl) - if !ok { - continue - } - if strings.HasPrefix(fn.Name.Name, "_Cfunc_") { - var params, results []string - for _, p := range fn.Type.Params.List { - t := gofmt(p.Type) - t = strings.ReplaceAll(t, "_Ctype_", "C.") - params = append(params, t) - } - for _, r := range fn.Type.Results.List { - t := gofmt(r.Type) - t = strings.ReplaceAll(t, "_Ctype_", "C.") - results = append(results, t) - } - cfg.External["C."+fn.Name.Name[7:]] = joinFunc(params, results) - } - } - return nil - }() - if err != nil { - if reportCgoError == nil { - fmt.Fprintf(os.Stderr, "go fix: warning: no cgo types: %s\n", err) - } else { - reportCgoError(err) - } - } - } - - // gather function declarations - for _, decl := range f.Decls { - fn, ok := decl.(*ast.FuncDecl) - if !ok { - continue - } - typecheck1(cfg, fn.Type, typeof, assign) - t := typeof[fn.Type] - if fn.Recv != nil { - // The receiver must be a type. - rcvr := typeof[fn.Recv] - if !isType(rcvr) { - if len(fn.Recv.List) != 1 { - continue - } - rcvr = mkType(gofmt(fn.Recv.List[0].Type)) - typeof[fn.Recv.List[0].Type] = rcvr - } - rcvr = getType(rcvr) - if rcvr != "" && rcvr[0] == '*' { - rcvr = rcvr[1:] - } - typeof[rcvr+"."+fn.Name.Name] = t - } else { - if isType(t) { - t = getType(t) - } else { - t = gofmt(fn.Type) - } - typeof[fn.Name] = t - - // Record typeof[fn.Name.Obj] for future references to fn.Name. - typeof[fn.Name.Obj] = t - } - } - - // gather struct declarations - for _, decl := range f.Decls { - d, ok := decl.(*ast.GenDecl) - if ok { - for _, s := range d.Specs { - switch s := s.(type) { - case *ast.TypeSpec: - if cfg1.Type[s.Name.Name] != nil { - break - } - if !copied { - copied = true - // Copy map lazily: it's time. - cfg1.Type = maps.Clone(cfg.Type) - if cfg1.Type == nil { - cfg1.Type = make(map[string]*Type) - } - } - t := &Type{Field: map[string]string{}} - cfg1.Type[s.Name.Name] = t - switch st := s.Type.(type) { - case *ast.StructType: - for _, f := range st.Fields.List { - for _, n := range f.Names { - t.Field[n.Name] = gofmt(f.Type) - } - } - case *ast.ArrayType, *ast.StarExpr, *ast.MapType: - t.Def = gofmt(st) - } - } - } - } - } - - typecheck1(cfg1, f, typeof, assign) - return typeof, assign -} - -// reportCgoError, if non-nil, reports a non-nil error from running the "cgo" -// tool. (Set to a non-nil hook during testing if cgo is expected to work.) -var reportCgoError func(err error) - -func makeExprList(a []*ast.Ident) []ast.Expr { - var b []ast.Expr - for _, x := range a { - b = append(b, x) - } - return b -} - -// typecheck1 is the recursive form of typecheck. -// It is like typecheck but adds to the information in typeof -// instead of allocating a new map. -func typecheck1(cfg *TypeConfig, f any, typeof map[any]string, assign map[string][]any) { - // set sets the type of n to typ. - // If isDecl is true, n is being declared. - set := func(n ast.Expr, typ string, isDecl bool) { - if typeof[n] != "" || typ == "" { - if typeof[n] != typ { - assign[typ] = append(assign[typ], n) - } - return - } - typeof[n] = typ - - // If we obtained typ from the declaration of x - // propagate the type to all the uses. - // The !isDecl case is a cheat here, but it makes - // up in some cases for not paying attention to - // struct fields. The real type checker will be - // more accurate so we won't need the cheat. - if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") { - typeof[id.Obj] = typ - } - } - - // Type-check an assignment lhs = rhs. - // If isDecl is true, this is := so we can update - // the types of the objects that lhs refers to. - typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) { - if len(lhs) > 1 && len(rhs) == 1 { - if _, ok := rhs[0].(*ast.CallExpr); ok { - t := split(typeof[rhs[0]]) - // Lists should have same length but may not; pair what can be paired. - for i := 0; i < len(lhs) && i < len(t); i++ { - set(lhs[i], t[i], isDecl) - } - return - } - } - if len(lhs) == 1 && len(rhs) == 2 { - // x = y, ok - rhs = rhs[:1] - } else if len(lhs) == 2 && len(rhs) == 1 { - // x, ok = y - lhs = lhs[:1] - } - - // Match as much as we can. - for i := 0; i < len(lhs) && i < len(rhs); i++ { - x, y := lhs[i], rhs[i] - if typeof[y] != "" { - set(x, typeof[y], isDecl) - } else { - set(y, typeof[x], false) - } - } - } - - expand := func(s string) string { - typ := cfg.Type[s] - if typ != nil && typ.Def != "" { - return typ.Def - } - return s - } - - // The main type check is a recursive algorithm implemented - // by walkBeforeAfter(n, before, after). - // Most of it is bottom-up, but in a few places we need - // to know the type of the function we are checking. - // The before function records that information on - // the curfn stack. - var curfn []*ast.FuncType - - before := func(n any) { - // push function type on stack - switch n := n.(type) { - case *ast.FuncDecl: - curfn = append(curfn, n.Type) - case *ast.FuncLit: - curfn = append(curfn, n.Type) - } - } - - // After is the real type checker. - after := func(n any) { - if n == nil { - return - } - if false && reflect.TypeOf(n).Kind() == reflect.Pointer { // debugging trace - defer func() { - if t := typeof[n]; t != "" { - pos := fset.Position(n.(ast.Node).Pos()) - fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t) - } - }() - } - - switch n := n.(type) { - case *ast.FuncDecl, *ast.FuncLit: - // pop function type off stack - curfn = curfn[:len(curfn)-1] - - case *ast.FuncType: - typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results]))) - - case *ast.FieldList: - // Field list is concatenation of sub-lists. - t := "" - for _, field := range n.List { - if t != "" { - t += ", " - } - t += typeof[field] - } - typeof[n] = t - - case *ast.Field: - // Field is one instance of the type per name. - all := "" - t := typeof[n.Type] - if !isType(t) { - // Create a type, because it is typically *T or *p.T - // and we might care about that type. - t = mkType(gofmt(n.Type)) - typeof[n.Type] = t - } - t = getType(t) - if len(n.Names) == 0 { - all = t - } else { - for _, id := range n.Names { - if all != "" { - all += ", " - } - all += t - typeof[id.Obj] = t - typeof[id] = t - } - } - typeof[n] = all - - case *ast.ValueSpec: - // var declaration. Use type if present. - if n.Type != nil { - t := typeof[n.Type] - if !isType(t) { - t = mkType(gofmt(n.Type)) - typeof[n.Type] = t - } - t = getType(t) - for _, id := range n.Names { - set(id, t, true) - } - } - // Now treat same as assignment. - typecheckAssign(makeExprList(n.Names), n.Values, true) - - case *ast.AssignStmt: - typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE) - - case *ast.Ident: - // Identifier can take its type from underlying object. - if t := typeof[n.Obj]; t != "" { - typeof[n] = t - } - - case *ast.SelectorExpr: - // Field or method. - name := n.Sel.Name - if t := typeof[n.X]; t != "" { - t = strings.TrimPrefix(t, "*") // implicit * - if typ := cfg.Type[t]; typ != nil { - if t := typ.dot(cfg, name); t != "" { - typeof[n] = t - return - } - } - tt := typeof[t+"."+name] - if isType(tt) { - typeof[n] = getType(tt) - return - } - } - // Package selector. - if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil { - str := x.Name + "." + name - if cfg.Type[str] != nil { - typeof[n] = mkType(str) - return - } - if t := cfg.typeof(x.Name + "." + name); t != "" { - typeof[n] = t - return - } - } - - case *ast.CallExpr: - // make(T) has type T. - if isTopName(n.Fun, "make") && len(n.Args) >= 1 { - typeof[n] = gofmt(n.Args[0]) - return - } - // new(T) has type *T - if isTopName(n.Fun, "new") && len(n.Args) == 1 { - typeof[n] = "*" + gofmt(n.Args[0]) - return - } - // Otherwise, use type of function to determine arguments. - t := typeof[n.Fun] - if t == "" { - t = cfg.External[gofmt(n.Fun)] - } - in, out := splitFunc(t) - if in == nil && out == nil { - return - } - typeof[n] = join(out) - for i, arg := range n.Args { - if i >= len(in) { - break - } - if typeof[arg] == "" { - typeof[arg] = in[i] - } - } - - case *ast.TypeAssertExpr: - // x.(type) has type of x. - if n.Type == nil { - typeof[n] = typeof[n.X] - return - } - // x.(T) has type T. - if t := typeof[n.Type]; isType(t) { - typeof[n] = getType(t) - } else { - typeof[n] = gofmt(n.Type) - } - - case *ast.SliceExpr: - // x[i:j] has type of x. - typeof[n] = typeof[n.X] - - case *ast.IndexExpr: - // x[i] has key type of x's type. - t := expand(typeof[n.X]) - if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") { - // Lazy: assume there are no nested [] in the array - // length or map key type. - if _, elem, ok := strings.Cut(t, "]"); ok { - typeof[n] = elem - } - } - - case *ast.StarExpr: - // *x for x of type *T has type T when x is an expr. - // We don't use the result when *x is a type, but - // compute it anyway. - t := expand(typeof[n.X]) - if isType(t) { - typeof[n] = "type *" + getType(t) - } else if strings.HasPrefix(t, "*") { - typeof[n] = t[len("*"):] - } - - case *ast.UnaryExpr: - // &x for x of type T has type *T. - t := typeof[n.X] - if t != "" && n.Op == token.AND { - typeof[n] = "*" + t - } - - case *ast.CompositeLit: - // T{...} has type T. - typeof[n] = gofmt(n.Type) - - // Propagate types down to values used in the composite literal. - t := expand(typeof[n]) - if strings.HasPrefix(t, "[") { // array or slice - // Lazy: assume there are no nested [] in the array length. - if _, et, ok := strings.Cut(t, "]"); ok { - for _, e := range n.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - e = kv.Value - } - if typeof[e] == "" { - typeof[e] = et - } - } - } - } - if strings.HasPrefix(t, "map[") { // map - // Lazy: assume there are no nested [] in the map key type. - if kt, vt, ok := strings.Cut(t[len("map["):], "]"); ok { - for _, e := range n.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - if typeof[kv.Key] == "" { - typeof[kv.Key] = kt - } - if typeof[kv.Value] == "" { - typeof[kv.Value] = vt - } - } - } - } - } - if typ := cfg.Type[t]; typ != nil && len(typ.Field) > 0 { // struct - for _, e := range n.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - if ft := typ.Field[fmt.Sprintf("%s", kv.Key)]; ft != "" { - if typeof[kv.Value] == "" { - typeof[kv.Value] = ft - } - } - } - } - } - - case *ast.ParenExpr: - // (x) has type of x. - typeof[n] = typeof[n.X] - - case *ast.RangeStmt: - t := expand(typeof[n.X]) - if t == "" { - return - } - var key, value string - if t == "string" { - key, value = "int", "rune" - } else if strings.HasPrefix(t, "[") { - key = "int" - _, value, _ = strings.Cut(t, "]") - } else if strings.HasPrefix(t, "map[") { - if k, v, ok := strings.Cut(t[len("map["):], "]"); ok { - key, value = k, v - } - } - changed := false - if n.Key != nil && key != "" { - changed = true - set(n.Key, key, n.Tok == token.DEFINE) - } - if n.Value != nil && value != "" { - changed = true - set(n.Value, value, n.Tok == token.DEFINE) - } - // Ugly failure of vision: already type-checked body. - // Do it again now that we have that type info. - if changed { - typecheck1(cfg, n.Body, typeof, assign) - } - - case *ast.TypeSwitchStmt: - // Type of variable changes for each case in type switch, - // but go/parser generates just one variable. - // Repeat type check for each case with more precise - // type information. - as, ok := n.Assign.(*ast.AssignStmt) - if !ok { - return - } - varx, ok := as.Lhs[0].(*ast.Ident) - if !ok { - return - } - t := typeof[varx] - for _, cas := range n.Body.List { - cas := cas.(*ast.CaseClause) - if len(cas.List) == 1 { - // Variable has specific type only when there is - // exactly one type in the case list. - if tt := typeof[cas.List[0]]; isType(tt) { - tt = getType(tt) - typeof[varx] = tt - typeof[varx.Obj] = tt - typecheck1(cfg, cas.Body, typeof, assign) - } - } - } - // Restore t. - typeof[varx] = t - typeof[varx.Obj] = t - - case *ast.ReturnStmt: - if len(curfn) == 0 { - // Probably can't happen. - return - } - f := curfn[len(curfn)-1] - res := n.Results - if f.Results != nil { - t := split(typeof[f.Results]) - for i := 0; i < len(res) && i < len(t); i++ { - set(res[i], t[i], false) - } - } - - case *ast.BinaryExpr: - // Propagate types across binary ops that require two args of the same type. - switch n.Op { - case token.EQL, token.NEQ: // TODO: more cases. This is enough for the cftype fix. - if typeof[n.X] != "" && typeof[n.Y] == "" { - typeof[n.Y] = typeof[n.X] - } - if typeof[n.X] == "" && typeof[n.Y] != "" { - typeof[n.X] = typeof[n.Y] - } - } - } - } - walkBeforeAfter(f, before, after) -} - -// Convert between function type strings and lists of types. -// Using strings makes this a little harder, but it makes -// a lot of the rest of the code easier. This will all go away -// when we can use go/typechecker directly. - -// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"]. -func splitFunc(s string) (in, out []string) { - if !strings.HasPrefix(s, "func(") { - return nil, nil - } - - i := len("func(") // index of beginning of 'in' arguments - nparen := 0 - for j := i; j < len(s); j++ { - switch s[j] { - case '(': - nparen++ - case ')': - nparen-- - if nparen < 0 { - // found end of parameter list - out := strings.TrimSpace(s[j+1:]) - if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' { - out = out[1 : len(out)-1] - } - return split(s[i:j]), split(out) - } - } - } - return nil, nil -} - -// joinFunc is the inverse of splitFunc. -func joinFunc(in, out []string) string { - outs := "" - if len(out) == 1 { - outs = " " + out[0] - } else if len(out) > 1 { - outs = " (" + join(out) + ")" - } - return "func(" + join(in) + ")" + outs -} - -// split splits "int, float" into ["int", "float"] and splits "" into []. -func split(s string) []string { - out := []string{} - i := 0 // current type being scanned is s[i:j]. - nparen := 0 - for j := 0; j < len(s); j++ { - switch s[j] { - case ' ': - if i == j { - i++ - } - case '(': - nparen++ - case ')': - nparen-- - if nparen < 0 { - // probably can't happen - return nil - } - case ',': - if nparen == 0 { - if i < j { - out = append(out, s[i:j]) - } - i = j + 1 - } - } - } - if nparen != 0 { - // probably can't happen - return nil - } - if i < len(s) { - out = append(out, s[i:]) - } - return out -} - -// join is the inverse of split. -func join(x []string) string { - return strings.Join(x, ", ") -} -- cgit v1.3-5-g9baa From d7abfe4f0dc91568648a66495b9f5d7ebc0f22b5 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Fri, 30 May 2025 17:05:41 -0400 Subject: runtime: acquire/release C TSAN lock when calling cgo symbolizer/tracebacker When calling into C via cmd/cgo, the generated code calls _cgo_tsan_acquire / _cgo_tsan_release around the C call to report a dummy lock to the C/C++ TSAN runtime. This is necessary because the C/C++ TSAN runtime does not understand synchronization within Go and would otherwise report false positive race reports. See the comment in cmd/cgo/out.go for more details. Various C functions in runtime/cgo also contain manual calls to _cgo_tsan_acquire/release where necessary to suppress race reports. However, the cgo symbolizer and cgo traceback functions called from callCgoSymbolizer and cgoContextPCs, respectively, do not have any instrumentation [1]. They call directly into user C functions with no TSAN instrumentation. This means they have an opportunity to report false race conditions. The most direct way is via their argument. Both are passed a pointer to a struct stored on the Go stack, and both write to fields of the struct. If two calls are passed the same pointer from different threads, the C TSAN runtime will think this is a race. This is simple to achieve for the cgo symbolizer function, which the new regression test does. callCgoSymbolizer is called on the standard goroutine stack, so the argument is a pointer into the goroutine stack. If the goroutine moves Ms between two calls, it will look like a race. On the other hand, cgoContextPCs is called on the system stack. Each M has a unique system stack, so for it to pass the same argument pointer on different threads would require the first M to exit, free its stack, and the same region of address space to be used as the stack for a new M. Theoretically possible, but quite unlikely. Both of these are addressed by providing a C wrapper in runtime/cgo that calls _cgo_tsan_acquire/_cgo_tsan_release around calls to the symbolizer and traceback functions. There is a lot of room for future cleanup here. Most runtime/cgo functions have manual instrumentation in their C implementation. That could be removed in favor of instrumentation in the runtime. We could even theoretically remove the instrumentation from cmd/cgo and move it to cgocall. None of these are necessary, but may make things more consistent and easier to follow. [1] Note that the cgo traceback function called from the signal handler via x_cgo_callers _does_ have manual instrumentation. Fixes #73949. Cq-Include-Trybots: luci.golang.try:gotip-freebsd-amd64,gotip-linux-amd64-longtest,gotip-windows-amd64-longtest Change-Id: I6a6a636c9daa38f7fd00694af76b75cb93ba1886 Reviewed-on: https://go-review.googlesource.com/c/go/+/677955 Reviewed-by: Michael Knyszek Auto-Submit: Michael Pratt Reviewed-by: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI --- .../testdata/tsan_tracebackctxt/main.go | 78 +++++++++++++++++++++ .../testdata/tsan_tracebackctxt/tracebackctxt_c.c | 70 +++++++++++++++++++ src/cmd/cgo/internal/testsanitizers/tsan_test.go | 3 +- src/runtime/cgo.go | 8 ++- src/runtime/cgo/callbacks.go | 31 +++++++-- src/runtime/cgo/gcc_context.c | 4 +- src/runtime/cgo/gcc_libinit.c | 79 ++++++++++++++++++--- src/runtime/cgo/gcc_libinit_windows.c | 80 +++++++++++++++++++--- src/runtime/cgo/libcgo.h | 52 +++++++++++--- src/runtime/symtab.go | 4 +- src/runtime/testdata/testprog/setcgotraceback.go | 45 ++++++++++++ src/runtime/traceback.go | 47 +++++++++---- src/runtime/traceback_test.go | 15 ++++ 13 files changed, 462 insertions(+), 54 deletions(-) create mode 100644 src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/main.go create mode 100644 src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/tracebackctxt_c.c create mode 100644 src/runtime/testdata/testprog/setcgotraceback.go (limited to 'src/cmd') diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/main.go b/src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/main.go new file mode 100644 index 0000000000..998a08ca53 --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/main.go @@ -0,0 +1,78 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +// Defined in tracebackctxt_c.c. +extern void C1(void); +extern void C2(void); +extern void tcContext(void*); +extern void tcTraceback(void*); +extern void tcSymbolizer(void*); +*/ +import "C" + +import ( + "fmt" + "runtime" + "sync" + "unsafe" +) + +// Regression test for https://go.dev/issue/73949. TSAN should not report races +// on writes to the argument passed to the symbolizer function. +// +// Triggering this race requires calls to the symbolizer function with the same +// argument pointer on multiple threads. The runtime passes a stack variable to +// this function, so that means we need to get a single goroutine to execute on +// two threads, calling the symbolizer function on each. +// +// runtime.CallersFrames / Next will call the symbolizer function (if there are +// C frames). So the approach here is, with GOMAXPROCS=2, have 2 goroutines +// that use CallersFrames over and over, both frequently calling Gosched in an +// attempt to get picked up by the other P. + +var tracebackOK bool + +func main() { + runtime.GOMAXPROCS(2) + runtime.SetCgoTraceback(0, unsafe.Pointer(C.tcTraceback), unsafe.Pointer(C.tcContext), unsafe.Pointer(C.tcSymbolizer)) + C.C1() + if tracebackOK { + fmt.Println("OK") + } +} + +//export G1 +func G1() { + C.C2() +} + +//export G2 +func G2() { + pc := make([]uintptr, 32) + n := runtime.Callers(0, pc) + + var wg sync.WaitGroup + for range 2 { + wg.Go(func() { + for range 1000 { + cf := runtime.CallersFrames(pc[:n]) + var frames []runtime.Frame + for { + frame, more := cf.Next() + frames = append(frames, frame) + if !more { + break + } + } + runtime.Gosched() + } + }) + } + wg.Wait() + + tracebackOK = true +} diff --git a/src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/tracebackctxt_c.c b/src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/tracebackctxt_c.c new file mode 100644 index 0000000000..9ddaa4aaf2 --- /dev/null +++ b/src/cmd/cgo/internal/testsanitizers/testdata/tsan_tracebackctxt/tracebackctxt_c.c @@ -0,0 +1,70 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The C definitions for tracebackctxt.go. That file uses //export so +// it can't put function definitions in the "C" import comment. + +#include +#include + +// Functions exported from Go. +extern void G1(void); +extern void G2(void); + +void C1() { + G1(); +} + +void C2() { + G2(); +} + +struct cgoContextArg { + uintptr_t context; +}; + +struct cgoTracebackArg { + uintptr_t context; + uintptr_t sigContext; + uintptr_t* buf; + uintptr_t max; +}; + +struct cgoSymbolizerArg { + uintptr_t pc; + const char* file; + uintptr_t lineno; + const char* func; + uintptr_t entry; + uintptr_t more; + uintptr_t data; +}; + +void tcContext(void* parg) { + struct cgoContextArg* arg = (struct cgoContextArg*)(parg); + if (arg->context == 0) { + arg->context = 1; + } +} + +void tcTraceback(void* parg) { + int base, i; + struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg); + if (arg->max < 1) { + return; + } + arg->buf[0] = 6; // Chosen by fair dice roll. +} + +void tcSymbolizer(void *parg) { + struct cgoSymbolizerArg* arg = (struct cgoSymbolizerArg*)(parg); + if (arg->pc == 0) { + return; + } + // Report two lines per PC returned by traceback, to test more handling. + arg->more = arg->file == NULL; + arg->file = "tracebackctxt.go"; + arg->func = "cFunction"; + arg->lineno = arg->pc + (arg->more << 16); +} diff --git a/src/cmd/cgo/internal/testsanitizers/tsan_test.go b/src/cmd/cgo/internal/testsanitizers/tsan_test.go index 265c5e3605..589db2e6bc 100644 --- a/src/cmd/cgo/internal/testsanitizers/tsan_test.go +++ b/src/cmd/cgo/internal/testsanitizers/tsan_test.go @@ -56,6 +56,7 @@ func TestTSAN(t *testing.T) { {src: "tsan13.go", needsRuntime: true}, {src: "tsan14.go", needsRuntime: true}, {src: "tsan15.go", needsRuntime: true}, + {src: "tsan_tracebackctxt", needsRuntime: true}, // Subdirectory } for _, tc := range cases { tc := tc @@ -67,7 +68,7 @@ func TestTSAN(t *testing.T) { defer dir.RemoveAll(t) outPath := dir.Join(name) - mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src))) + mustRun(t, config.goCmd("build", "-o", outPath, "./"+srcPath(tc.src))) cmdArgs := []string{outPath} if goos == "linux" { diff --git a/src/runtime/cgo.go b/src/runtime/cgo.go index eca905bad9..60f2403d73 100644 --- a/src/runtime/cgo.go +++ b/src/runtime/cgo.go @@ -15,7 +15,9 @@ import "unsafe" //go:linkname _cgo_sys_thread_create _cgo_sys_thread_create //go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done //go:linkname _cgo_callers _cgo_callers -//go:linkname _cgo_set_context_function _cgo_set_context_function +//go:linkname _cgo_set_traceback_functions _cgo_set_traceback_functions +//go:linkname _cgo_call_traceback_function _cgo_call_traceback_function +//go:linkname _cgo_call_symbolizer_function _cgo_call_symbolizer_function //go:linkname _cgo_yield _cgo_yield //go:linkname _cgo_pthread_key_created _cgo_pthread_key_created //go:linkname _cgo_bindm _cgo_bindm @@ -27,7 +29,9 @@ var ( _cgo_sys_thread_create unsafe.Pointer _cgo_notify_runtime_init_done unsafe.Pointer _cgo_callers unsafe.Pointer - _cgo_set_context_function unsafe.Pointer + _cgo_set_traceback_functions unsafe.Pointer + _cgo_call_traceback_function unsafe.Pointer + _cgo_call_symbolizer_function unsafe.Pointer _cgo_yield unsafe.Pointer _cgo_pthread_key_created unsafe.Pointer _cgo_bindm unsafe.Pointer diff --git a/src/runtime/cgo/callbacks.go b/src/runtime/cgo/callbacks.go index 3c246a88b6..986f61914f 100644 --- a/src/runtime/cgo/callbacks.go +++ b/src/runtime/cgo/callbacks.go @@ -121,13 +121,30 @@ var _cgo_bindm = &x_cgo_bindm var x_cgo_notify_runtime_init_done byte var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done -// Sets the traceback context function. See runtime.SetCgoTraceback. - -//go:cgo_import_static x_cgo_set_context_function -//go:linkname x_cgo_set_context_function x_cgo_set_context_function -//go:linkname _cgo_set_context_function _cgo_set_context_function -var x_cgo_set_context_function byte -var _cgo_set_context_function = &x_cgo_set_context_function +// Sets the traceback, context, and symbolizer functions. See +// runtime.SetCgoTraceback. + +//go:cgo_import_static x_cgo_set_traceback_functions +//go:linkname x_cgo_set_traceback_functions x_cgo_set_traceback_functions +//go:linkname _cgo_set_traceback_functions _cgo_set_traceback_functions +var x_cgo_set_traceback_functions byte +var _cgo_set_traceback_functions = &x_cgo_set_traceback_functions + +// Call the traceback function registered with x_cgo_set_traceback_functions. + +//go:cgo_import_static x_cgo_call_traceback_function +//go:linkname x_cgo_call_traceback_function x_cgo_call_traceback_function +//go:linkname _cgo_call_traceback_function _cgo_call_traceback_function +var x_cgo_call_traceback_function byte +var _cgo_call_traceback_function = &x_cgo_call_traceback_function + +// Call the symbolizer function registered with x_cgo_set_symbolizer_functions. + +//go:cgo_import_static x_cgo_call_symbolizer_function +//go:linkname x_cgo_call_symbolizer_function x_cgo_call_symbolizer_function +//go:linkname _cgo_call_symbolizer_function _cgo_call_symbolizer_function +var x_cgo_call_symbolizer_function byte +var _cgo_call_symbolizer_function = &x_cgo_call_symbolizer_function // Calls a libc function to execute background work injected via libc // interceptors, such as processing pending signals under the thread diff --git a/src/runtime/cgo/gcc_context.c b/src/runtime/cgo/gcc_context.c index ad58692821..b647c99a98 100644 --- a/src/runtime/cgo/gcc_context.c +++ b/src/runtime/cgo/gcc_context.c @@ -8,11 +8,11 @@ // Releases the cgo traceback context. void _cgo_release_context(uintptr_t ctxt) { - void (*pfn)(struct context_arg*); + void (*pfn)(struct cgoContextArg*); pfn = _cgo_get_context_function(); if (ctxt != 0 && pfn != nil) { - struct context_arg arg; + struct cgoContextArg arg; arg.Context = ctxt; (*pfn)(&arg); diff --git a/src/runtime/cgo/gcc_libinit.c b/src/runtime/cgo/gcc_libinit.c index e9b0a3f769..05998fadf8 100644 --- a/src/runtime/cgo/gcc_libinit.c +++ b/src/runtime/cgo/gcc_libinit.c @@ -32,8 +32,14 @@ static void pthread_key_destructor(void* g); uintptr_t x_cgo_pthread_key_created; void (*x_crosscall2_ptr)(void (*fn)(void *), void *, int, size_t); +// The traceback function, used when tracing C calls. +static void (*cgo_traceback_function)(struct cgoTracebackArg*); + // The context function, used when tracing back C calls into Go. -static void (*cgo_context_function)(struct context_arg*); +static void (*cgo_context_function)(struct cgoContextArg*); + +// The symbolizer function, used when symbolizing C frames. +static void (*cgo_symbolizer_function)(struct cgoSymbolizerArg*); void x_cgo_sys_thread_create(void* (*func)(void*), void* arg) { @@ -52,7 +58,7 @@ x_cgo_sys_thread_create(void* (*func)(void*), void* arg) { uintptr_t _cgo_wait_runtime_init_done(void) { - void (*pfn)(struct context_arg*); + void (*pfn)(struct cgoContextArg*); int done; pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME); @@ -70,7 +76,6 @@ _cgo_wait_runtime_init_done(void) { x_cgo_pthread_key_created = 1; } - // TODO(iant): For the case of a new C thread calling into Go, such // as when using -buildmode=c-archive, we know that Go runtime // initialization is complete but we do not know that all Go init @@ -87,7 +92,7 @@ _cgo_wait_runtime_init_done(void) { } if (pfn != nil) { - struct context_arg arg; + struct cgoContextArg arg; arg.Context = 0; (*pfn)(&arg); @@ -138,17 +143,71 @@ x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) { pthread_mutex_unlock(&runtime_init_mu); } -// Sets the context function to call to record the traceback context -// when calling a Go function from C code. Called from runtime.SetCgoTraceback. -void x_cgo_set_context_function(void (*context)(struct context_arg*)) { - __atomic_store_n(&cgo_context_function, context, __ATOMIC_RELEASE); +// Sets the traceback, context, and symbolizer functions. Called from +// runtime.SetCgoTraceback. +void x_cgo_set_traceback_functions(struct cgoSetTracebackFunctionsArg* arg) { + __atomic_store_n(&cgo_traceback_function, arg->Traceback, __ATOMIC_RELEASE); + __atomic_store_n(&cgo_context_function, arg->Context, __ATOMIC_RELEASE); + __atomic_store_n(&cgo_symbolizer_function, arg->Symbolizer, __ATOMIC_RELEASE); +} + +// Gets the traceback function to call to trace C calls. +void (*(_cgo_get_traceback_function(void)))(struct cgoTracebackArg*) { + return __atomic_load_n(&cgo_traceback_function, __ATOMIC_CONSUME); +} + +// Call the traceback function registered with x_cgo_set_traceback_functions. +// +// The traceback function is an arbitrary user C function which may be built +// with TSAN, and thus must be wrapped with TSAN acquire/release calls. For +// normal cgo calls, cmd/cgo automatically inserts TSAN acquire/release calls. +// Since the traceback, context, and symbolizer functions are registered at +// startup and called via the runtime, they do not get automatic TSAN +// acquire/release calls. +// +// The only purpose of this wrapper is to perform TSAN acquire/release. +// Alternatively, if the runtime arranged to safely call TSAN acquire/release, +// it could perform the call directly. +void x_cgo_call_traceback_function(struct cgoTracebackArg* arg) { + void (*pfn)(struct cgoTracebackArg*); + + pfn = _cgo_get_traceback_function(); + if (pfn == nil) { + return; + } + + _cgo_tsan_acquire(); + (*pfn)(arg); + _cgo_tsan_release(); } -// Gets the context function. -void (*(_cgo_get_context_function(void)))(struct context_arg*) { +// Gets the context function to call to record the traceback context +// when calling a Go function from C code. +void (*(_cgo_get_context_function(void)))(struct cgoContextArg*) { return __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME); } +// Gets the symbolizer function to call to symbolize C frames. +void (*(_cgo_get_symbolizer_function(void)))(struct cgoSymbolizerArg*) { + return __atomic_load_n(&cgo_symbolizer_function, __ATOMIC_CONSUME); +} + +// Call the symbolizer function registered with x_cgo_set_traceback_functions. +// +// See comment on x_cgo_call_traceback_function. +void x_cgo_call_symbolizer_function(struct cgoSymbolizerArg* arg) { + void (*pfn)(struct cgoSymbolizerArg*); + + pfn = _cgo_get_symbolizer_function(); + if (pfn == nil) { + return; + } + + _cgo_tsan_acquire(); + (*pfn)(arg); + _cgo_tsan_release(); +} + // _cgo_try_pthread_create retries pthread_create if it fails with // EAGAIN. int diff --git a/src/runtime/cgo/gcc_libinit_windows.c b/src/runtime/cgo/gcc_libinit_windows.c index 9275185d6e..926f916843 100644 --- a/src/runtime/cgo/gcc_libinit_windows.c +++ b/src/runtime/cgo/gcc_libinit_windows.c @@ -32,6 +32,7 @@ static CRITICAL_SECTION runtime_init_cs; static HANDLE runtime_init_wait; static int runtime_init_done; +// No pthreads on Windows, these are always zero. uintptr_t x_cgo_pthread_key_created; void (*x_crosscall2_ptr)(void (*fn)(void *), void *, int, size_t); @@ -81,7 +82,7 @@ _cgo_is_runtime_initialized() { uintptr_t _cgo_wait_runtime_init_done(void) { - void (*pfn)(struct context_arg*); + void (*pfn)(struct cgoContextArg*); _cgo_maybe_run_preinit(); while (!_cgo_is_runtime_initialized()) { @@ -89,7 +90,7 @@ _cgo_wait_runtime_init_done(void) { } pfn = _cgo_get_context_function(); if (pfn != nil) { - struct context_arg arg; + struct cgoContextArg arg; arg.Context = 0; (*pfn)(&arg); @@ -118,20 +119,54 @@ x_cgo_notify_runtime_init_done(void* dummy) { } } +// The traceback function, used when tracing C calls. +static void (*cgo_traceback_function)(struct cgoTracebackArg*); + // The context function, used when tracing back C calls into Go. -static void (*cgo_context_function)(struct context_arg*); +static void (*cgo_context_function)(struct cgoContextArg*); + +// The symbolizer function, used when symbolizing C frames. +static void (*cgo_symbolizer_function)(struct cgoSymbolizerArg*); + +// Sets the traceback, context, and symbolizer functions. Called from +// runtime.SetCgoTraceback. +void x_cgo_set_traceback_functions(struct cgoSetTracebackFunctionsArg* arg) { + EnterCriticalSection(&runtime_init_cs); + cgo_traceback_function = arg->Traceback; + cgo_context_function = arg->Context; + cgo_symbolizer_function = arg->Symbolizer; + LeaveCriticalSection(&runtime_init_cs); +} + +// Gets the traceback function to call to trace C calls. +void (*(_cgo_get_traceback_function(void)))(struct cgoTracebackArg*) { + void (*ret)(struct cgoTracebackArg*); -// Sets the context function to call to record the traceback context -// when calling a Go function from C code. Called from runtime.SetCgoTraceback. -void x_cgo_set_context_function(void (*context)(struct context_arg*)) { EnterCriticalSection(&runtime_init_cs); - cgo_context_function = context; + ret = cgo_traceback_function; LeaveCriticalSection(&runtime_init_cs); + return ret; +} + +// Call the traceback function registered with x_cgo_set_traceback_functions. +// +// On other platforms, this coordinates with C/C++ TSAN. On Windows, there is +// no C/C++ TSAN. +void x_cgo_call_traceback_function(struct cgoTracebackArg* arg) { + void (*pfn)(struct cgoTracebackArg*); + + pfn = _cgo_get_traceback_function(); + if (pfn == nil) { + return; + } + + (*pfn)(arg); } -// Gets the context function. -void (*(_cgo_get_context_function(void)))(struct context_arg*) { - void (*ret)(struct context_arg*); +// Gets the context function to call to record the traceback context +// when calling a Go function from C code. +void (*(_cgo_get_context_function(void)))(struct cgoContextArg*) { + void (*ret)(struct cgoContextArg*); EnterCriticalSection(&runtime_init_cs); ret = cgo_context_function; @@ -139,6 +174,31 @@ void (*(_cgo_get_context_function(void)))(struct context_arg*) { return ret; } +// Gets the symbolizer function to call to symbolize C frames. +void (*(_cgo_get_symbolizer_function(void)))(struct cgoSymbolizerArg*) { + void (*ret)(struct cgoSymbolizerArg*); + + EnterCriticalSection(&runtime_init_cs); + ret = cgo_symbolizer_function; + LeaveCriticalSection(&runtime_init_cs); + return ret; +} + +// Call the symbolizer function registered with x_cgo_set_symbolizer_functions. +// +// On other platforms, this coordinates with C/C++ TSAN. On Windows, there is +// no C/C++ TSAN. +void x_cgo_call_symbolizer_function(struct cgoSymbolizerArg* arg) { + void (*pfn)(struct cgoSymbolizerArg*); + + pfn = _cgo_get_symbolizer_function(); + if (pfn == nil) { + return; + } + + (*pfn)(arg); +} + void _cgo_beginthread(unsigned long (__stdcall *func)(void*), void* arg) { int tries; HANDLE thandle; diff --git a/src/runtime/cgo/libcgo.h b/src/runtime/cgo/libcgo.h index 26da68fadb..aa0b57d6d7 100644 --- a/src/runtime/cgo/libcgo.h +++ b/src/runtime/cgo/libcgo.h @@ -89,15 +89,7 @@ void darwin_arm_init_thread_exception_port(void); void darwin_arm_init_mach_exception_handler(void); /* - * The cgo context function. See runtime.SetCgoTraceback. - */ -struct context_arg { - uintptr_t Context; -}; -extern void (*(_cgo_get_context_function(void)))(struct context_arg*); - -/* - * The argument for the cgo traceback callback. See runtime.SetCgoTraceback. + * The cgo traceback callback. See runtime.SetCgoTraceback. */ struct cgoTracebackArg { uintptr_t Context; @@ -105,6 +97,38 @@ struct cgoTracebackArg { uintptr_t* Buf; uintptr_t Max; }; +extern void (*(_cgo_get_traceback_function(void)))(struct cgoTracebackArg*); + +/* + * The cgo context callback. See runtime.SetCgoTraceback. + */ +struct cgoContextArg { + uintptr_t Context; +}; +extern void (*(_cgo_get_context_function(void)))(struct cgoContextArg*); + +/* + * The argument for the cgo symbolizer callback. See runtime.SetCgoTraceback. + */ +struct cgoSymbolizerArg { + uintptr_t PC; + const char* File; + uintptr_t Lineno; + const char* Func; + uintptr_t Entry; + uintptr_t More; + uintptr_t Data; +}; +extern void (*(_cgo_get_symbolizer_function(void)))(struct cgoSymbolizerArg*); + +/* + * The argument for x_cgo_set_traceback_functions. See runtime.SetCgoTraceback. + */ +struct cgoSetTracebackFunctionsArg { + void (*Traceback)(struct cgoTracebackArg*); + void (*Context)(struct cgoContextArg*); + void (*Symbolizer)(struct cgoSymbolizerArg*); +}; /* * TSAN support. This is only useful when building with @@ -121,11 +145,21 @@ struct cgoTracebackArg { #ifdef CGO_TSAN +// _cgo_tsan_acquire tells C/C++ TSAN that we are acquiring a dummy lock. We +// call this when calling from Go to C. This is necessary because TSAN cannot +// see the synchronization in Go. Note that C/C++ code built with TSAN is not +// the same as the Go race detector. +// +// cmd/cgo generates calls to _cgo_tsan_acquire and _cgo_tsan_release. For +// other cgo calls, manual calls are required. +// // These must match the definitions in yesTsanProlog in cmd/cgo/out.go. // In general we should call _cgo_tsan_acquire when we enter C code, // and call _cgo_tsan_release when we return to Go code. +// // This is only necessary when calling code that might be instrumented // by TSAN, which mostly means system library calls that TSAN intercepts. +// // See the comment in cmd/cgo/out.go for more details. long long _cgo_sync __attribute__ ((common)); diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 56f2a00d76..62ad8d1361 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -108,7 +108,7 @@ func (ci *Frames) Next() (frame Frame, more bool) { } funcInfo := findfunc(pc) if !funcInfo.valid() { - if cgoSymbolizer != nil { + if cgoSymbolizerAvailable() { // Pre-expand cgo frames. We could do this // incrementally, too, but there's no way to // avoid allocation in this case anyway. @@ -295,6 +295,8 @@ func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr { // expandCgoFrames expands frame information for pc, known to be // a non-Go function, using the cgoSymbolizer hook. expandCgoFrames // returns nil if pc could not be expanded. +// +// Preconditions: cgoSymbolizerAvailable returns true. func expandCgoFrames(pc uintptr) []Frame { arg := cgoSymbolizerArg{pc: pc} callCgoSymbolizer(&arg) diff --git a/src/runtime/testdata/testprog/setcgotraceback.go b/src/runtime/testdata/testprog/setcgotraceback.go new file mode 100644 index 0000000000..de005027ec --- /dev/null +++ b/src/runtime/testdata/testprog/setcgotraceback.go @@ -0,0 +1,45 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "internal/abi" + "runtime" + "unsafe" +) + +func init() { + register("SetCgoTracebackNoCgo", SetCgoTracebackNoCgo) +} + +func cgoTraceback() { + panic("unexpectedly reached cgo traceback function") +} + +func cgoContext() { + panic("unexpectedly reached cgo context function") +} + +func cgoSymbolizer() { + panic("unexpectedly reached cgo symbolizer function") +} + +// SetCgoTraceback is a no-op in non-cgo binaries. +func SetCgoTracebackNoCgo() { + traceback := unsafe.Pointer(abi.FuncPCABIInternal(cgoTraceback)) + context := unsafe.Pointer(abi.FuncPCABIInternal(cgoContext)) + symbolizer := unsafe.Pointer(abi.FuncPCABIInternal(cgoSymbolizer)) + runtime.SetCgoTraceback(0, traceback, context, symbolizer) + + // In a cgo binary, runtime.(*Frames).Next calls the cgo symbolizer for + // any non-Go frames. Pass in a bogus frame to verify that Next does + // not attempt to call the cgo symbolizer, which would crash in a + // non-cgo binary like this one. + frames := runtime.CallersFrames([]uintptr{0x12345678}) + frames.Next() + + fmt.Println("OK") +} diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 00eac59201..949d48c79a 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -591,7 +591,7 @@ func (u *unwinder) symPC() uintptr { // If the current frame is not a cgo frame or if there's no registered cgo // unwinder, it returns 0. func (u *unwinder) cgoCallers(pcBuf []uintptr) int { - if cgoTraceback == nil || u.frame.fn.funcID != abi.FuncID_cgocallback || u.cgoCtxt < 0 { + if !cgoTracebackAvailable() || u.frame.fn.funcID != abi.FuncID_cgocallback || u.cgoCtxt < 0 { // We don't have a cgo unwinder (typical case), or we do but we're not // in a cgo frame or we're out of cgo context. return 0 @@ -1014,7 +1014,7 @@ func traceback2(u *unwinder, showRuntime bool, skip, max int) (n, lastN int) { anySymbolized := false stop := false for _, pc := range cgoBuf[:cgoN] { - if cgoSymbolizer == nil { + if !cgoSymbolizerAvailable() { if pr, stop := commitFrame(); stop { break } else if pr { @@ -1573,10 +1573,18 @@ func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) cgoContext = context cgoSymbolizer = symbolizer - // The context function is called when a C function calls a Go - // function. As such it is only called by C code in runtime/cgo. - if _cgo_set_context_function != nil { - cgocall(_cgo_set_context_function, context) + if _cgo_set_traceback_functions != nil { + type cgoSetTracebackFunctionsArg struct { + traceback unsafe.Pointer + context unsafe.Pointer + symbolizer unsafe.Pointer + } + arg := cgoSetTracebackFunctionsArg{ + traceback: traceback, + context: context, + symbolizer: symbolizer, + } + cgocall(_cgo_set_traceback_functions, noescape(unsafe.Pointer(&arg))) } } @@ -1584,6 +1592,18 @@ var cgoTraceback unsafe.Pointer var cgoContext unsafe.Pointer var cgoSymbolizer unsafe.Pointer +func cgoTracebackAvailable() bool { + // - The traceback function must be registered via SetCgoTraceback. + // - This must be a cgo binary (providing _cgo_call_traceback_function). + return cgoTraceback != nil && _cgo_call_traceback_function != nil +} + +func cgoSymbolizerAvailable() bool { + // - The symbolizer function must be registered via SetCgoTraceback. + // - This must be a cgo binary (providing _cgo_call_symbolizer_function). + return cgoSymbolizer != nil && _cgo_call_symbolizer_function != nil +} + // cgoTracebackArg is the type passed to cgoTraceback. type cgoTracebackArg struct { context uintptr @@ -1610,7 +1630,7 @@ type cgoSymbolizerArg struct { // printCgoTraceback prints a traceback of callers. func printCgoTraceback(callers *cgoCallers) { - if cgoSymbolizer == nil { + if !cgoSymbolizerAvailable() { for _, c := range callers { if c == 0 { break @@ -1635,6 +1655,8 @@ func printCgoTraceback(callers *cgoCallers) { // printOneCgoTraceback prints the traceback of a single cgo caller. // This can print more than one line because of inlining. // It returns the "stop" result of commitFrame. +// +// Preconditions: cgoSymbolizerAvailable returns true. func printOneCgoTraceback(pc uintptr, commitFrame func() (pr, stop bool), arg *cgoSymbolizerArg) bool { arg.pc = pc for { @@ -1665,6 +1687,8 @@ func printOneCgoTraceback(pc uintptr, commitFrame func() (pr, stop bool), arg *c } // callCgoSymbolizer calls the cgoSymbolizer function. +// +// Preconditions: cgoSymbolizerAvailable returns true. func callCgoSymbolizer(arg *cgoSymbolizerArg) { call := cgocall if panicking.Load() > 0 || getg().m.curg != getg() { @@ -1678,14 +1702,13 @@ func callCgoSymbolizer(arg *cgoSymbolizerArg) { if asanenabled { asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{})) } - call(cgoSymbolizer, noescape(unsafe.Pointer(arg))) + call(_cgo_call_symbolizer_function, noescape(unsafe.Pointer(arg))) } // cgoContextPCs gets the PC values from a cgo traceback. +// +// Preconditions: cgoTracebackAvailable returns true. func cgoContextPCs(ctxt uintptr, buf []uintptr) { - if cgoTraceback == nil { - return - } call := cgocall if panicking.Load() > 0 || getg().m.curg != getg() { // We do not want to call into the scheduler when panicking @@ -1703,5 +1726,5 @@ func cgoContextPCs(ctxt uintptr, buf []uintptr) { if asanenabled { asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg)) } - call(cgoTraceback, noescape(unsafe.Pointer(&arg))) + call(_cgo_call_traceback_function, noescape(unsafe.Pointer(&arg))) } diff --git a/src/runtime/traceback_test.go b/src/runtime/traceback_test.go index 8cbccac673..1dac91311c 100644 --- a/src/runtime/traceback_test.go +++ b/src/runtime/traceback_test.go @@ -8,6 +8,9 @@ import ( "bytes" "fmt" "internal/abi" + "internal/asan" + "internal/msan" + "internal/race" "internal/testenv" "regexp" "runtime" @@ -867,3 +870,15 @@ func TestTracebackGeneric(t *testing.T) { } } } + +func TestSetCgoTracebackNoCgo(t *testing.T) { + if asan.Enabled || msan.Enabled || race.Enabled { + t.Skip("skipped test: sanitizer builds use cgo") + } + + output := runTestProg(t, "testprog", "SetCgoTracebackNoCgo") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} -- cgit v1.3-5-g9baa