From a27261c42fcebf601587725714b9ef53c47b06b3 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 17 Sep 2025 22:17:41 -0400 Subject: go/types,types2: allow new(expr) For #45624 Change-Id: I6d77a2a1d6095cac0edc36060cbf98c72b749404 Reviewed-on: https://go-review.googlesource.com/c/go/+/704935 Auto-Submit: Alan Donovan Reviewed-by: Robert Findley LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/types2/builtins.go | 27 +++++++++++++++++++++++---- src/cmd/compile/internal/types2/version.go | 1 + 2 files changed, 24 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index 4bb2135755..3de2857ed4 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -636,11 +636,30 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } case _New: - // new(T) + // new(T) or new(expr) // (no argument evaluated yet) - T := check.varType(argList[0]) - if !isValid(T) { - return + arg := argList[0] + check.exprOrType(x, arg, true) + var T Type + switch x.mode { + case builtin: + check.errorf(x, UncalledBuiltin, "%s must be called", x) + x.mode = invalid + case typexpr: + // new(T) + T = x.typ + if !isValid(T) { + return + } + default: + // new(expr) + check.verifyVersionf(call.Fun, go1_26, "new(expr)") + T = Default(x.typ) + if T != x.typ { + // untyped constant: check for overflow. + check.assignment(x, T, "argument to new") + } + check.validVarType(arg, T) } x.mode = value diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go index b555f398da..765b0f7e9a 100644 --- a/src/cmd/compile/internal/types2/version.go +++ b/src/cmd/compile/internal/types2/version.go @@ -43,6 +43,7 @@ var ( go1_21 = asGoVersion("go1.21") go1_22 = asGoVersion("go1.22") go1_23 = asGoVersion("go1.23") + go1_26 = asGoVersion("go1.26") // current (deployed) Go version go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version)) -- cgit v1.3-5-g9baa From a5866ebe40207c4c64f0522721825b10887356e0 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Mon, 15 Sep 2025 17:31:46 +0700 Subject: cmd/compile: prevent shapifying of pointer shape type CL 641955 changes the Unified IR reader to not doing shapify when reading reshaping expression, prevent losing of the original type. This is an oversight, as the main problem isn't about shaping during the reshaping process itself, but about the specific case of shaping a pointer shape type. This bug occurs when instantiating a generic function within another generic function with a pointer shape type as type parameter, which will convert `*[]go.shape.T` to `*go.shape.uint8`, resulting in the loss of the original expression's type. This commit changes Unified IR reader to avoid pointer shaping for `*[]go.shape.T`, ensures that the original type is preserved when processing reshaping expressions. Updates #71184 Updates #73947 Fixes #74260 Fixes #75461 Change-Id: Icede6b73247d0d367bb485619f2dafb60ad66806 Reviewed-on: https://go-review.googlesource.com/c/go/+/704095 Auto-Submit: Cuong Manh Le LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/noder/reader.go | 46 +++++---------- src/cmd/compile/testdata/script/issue75461.txt | 78 ++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 33 deletions(-) create mode 100644 src/cmd/compile/testdata/script/issue75461.txt (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 3cbc7989a7..45e2bfd727 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -49,9 +49,6 @@ type pkgReader struct { // but bitwise inverted so we can detect if we're missing the entry // or not. newindex []index - - // indicates whether the data is reading during reshaping. - reshaping bool } func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader { @@ -119,10 +116,6 @@ type reader struct { // find parameters/results. funarghack bool - // reshaping is used during reading exprReshape code, preventing - // the reader from shapifying the re-shaped type. - reshaping bool - // methodSym is the name of method's name, if reading a method. // It's nil if reading a normal function or closure body. methodSym *types.Sym @@ -937,8 +930,19 @@ func shapify(targ *types.Type, basic bool) *types.Type { // types, and discarding struct field names and tags. However, we'll // need to start tracking how type parameters are actually used to // implement some of these optimizations. + pointerShaping := basic && targ.IsPtr() && !targ.Elem().NotInHeap() + // The exception is when the type parameter is a pointer to a type + // which `Type.HasShape()` returns true, but `Type.IsShape()` returns + // false, like `*[]go.shape.T`. This is because the type parameter is + // used to instantiate a generic function inside another generic function. + // In this case, we want to keep the targ as-is, otherwise, we may lose the + // original type after `*[]go.shape.T` is shapified to `*go.shape.uint8`. + // See issue #54535, #71184. + if pointerShaping && !targ.Elem().IsShape() && targ.Elem().HasShape() { + return targ + } under := targ.Underlying() - if basic && targ.IsPtr() && !targ.Elem().NotInHeap() { + if pointerShaping { under = types.NewPtr(types.Types[types.TUINT8]) } @@ -1014,25 +1018,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx index, implicits, explicits // arguments. for i, targ := range dict.targs { basic := r.Bool() - isPointerShape := basic && targ.IsPtr() && !targ.Elem().NotInHeap() - // We should not do shapify during the reshaping process, see #71184. - // However, this only matters for shapify a pointer type, which will - // lose the original underlying type. - // - // Example with a pointer type: - // - // - First, shapifying *[]T -> *uint8 - // - During the reshaping process, *uint8 is shapified to *go.shape.uint8 - // - This ends up with a different type with the original *[]T - // - // For a non-pointer type: - // - // - int -> go.shape.int - // - go.shape.int -> go.shape.int - // - // We always end up with the identical type. - canShapify := !pr.reshaping || !isPointerShape - if dict.shaped && canShapify { + if dict.shaped { dict.targs[i] = shapify(targ, basic) } } @@ -2470,10 +2456,7 @@ func (r *reader) expr() (res ir.Node) { case exprReshape: typ := r.typ() - old := r.reshaping - r.reshaping = true x := r.expr() - r.reshaping = old if types.IdenticalStrict(x.Type(), typ) { return x @@ -2596,10 +2579,7 @@ func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) { info := r.dict.subdicts[idx] explicits := r.p.typListIdx(info.explicits, r.dict) - old := r.p.reshaping - r.p.reshaping = r.reshaping baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) - r.p.reshaping = old // TODO(mdempsky): Is there a more robust way to get the // dictionary pointer type here? diff --git a/src/cmd/compile/testdata/script/issue75461.txt b/src/cmd/compile/testdata/script/issue75461.txt new file mode 100644 index 0000000000..05f0fd4cfa --- /dev/null +++ b/src/cmd/compile/testdata/script/issue75461.txt @@ -0,0 +1,78 @@ +go build main.go +! stdout . +! stderr . + +-- main.go -- +package main + +import ( + "demo/registry" +) + +func main() { + _ = registry.NewUserRegistry() +} + +-- go.mod -- +module demo + +go 1.24 + +-- model/user.go -- +package model + +type User struct { + ID int +} + +func (c *User) String() string { + return "" +} + +-- ordered/map.go -- +package ordered + +type OrderedMap[K comparable, V any] struct { + m map[K]V +} + +func New[K comparable, V any](options ...any) *OrderedMap[K, V] { + orderedMap := &OrderedMap[K, V]{} + return orderedMap +} + +-- registry/user.go -- +package registry + +import ( + "demo/model" + "demo/ordered" +) + +type baseRegistry = Registry[model.User, *model.User] + +type UserRegistry struct { + *baseRegistry +} + +type Registry[T any, P PStringer[T]] struct { + m *ordered.OrderedMap[string, P] +} + +type PStringer[T any] interface { + *T + String() string +} + +func NewRegistry[T any, P PStringer[T]]() *Registry[T, P] { + r := &Registry[T, P]{ + m: ordered.New[string, P](), + } + return r +} + +func NewUserRegistry() *UserRegistry { + return &UserRegistry{ + baseRegistry: NewRegistry[model.User](), + } +} -- cgit v1.3-5-g9baa From 3df27cd21aab3d3bcdc8ac56e7653ab023dc1112 Mon Sep 17 00:00:00 2001 From: mohanson Date: Sat, 20 Sep 2025 19:32:29 +0800 Subject: cmd/compile: fix typo in comment Fix typo for omitted. Change-Id: Ia633abe7f3d28f15f1f538425cdce9e6d9ef48c0 Reviewed-on: https://go-review.googlesource.com/c/go/+/705735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Keith Randall Reviewed-by: Keith Randall Auto-Submit: Keith Randall --- src/cmd/compile/internal/noder/doc.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/noder/doc.go b/src/cmd/compile/internal/noder/doc.go index a5d5533168..8eb67e92f0 100644 --- a/src/cmd/compile/internal/noder/doc.go +++ b/src/cmd/compile/internal/noder/doc.go @@ -87,7 +87,7 @@ constant for file bases and hence not encoded. [ Sync ] StringRef // the (absolute) file name for the base Bool // true if a file base, else a line base - // The below is ommitted for file bases. + // The below is omitted for file bases. [ Pos Uint64 // line Uint64 ] // column @@ -99,7 +99,7 @@ without a PosBase have no line or column. Pos = [ Sync ] Bool // true if the position has a base - // The below is ommitted if the position has no base. + // The below is omitted if the position has no base. [ Ref[PosBase] Uint64 // line Uint64 ] // column @@ -125,7 +125,7 @@ packages. The below package paths have special meaning. Pkg = RefTable [ Sync ] StringRef // path - // The below is ommitted for the special package paths + // The below is omitted for the special package paths // "builtin" and "unsafe". [ StringRef // name Imports ] -- cgit v1.3-5-g9baa From 7bc1935db55c9d182617aba074f048f9c7573680 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 18 Sep 2025 14:04:43 -0400 Subject: cmd/compile/internal: support new(expr) This CL adds compiler support for new(expr), a feature of go1.26 that allows the user to specify the initial value of the variable instead of its type. Also, a basic test of dynamic behavior. See CL 704737 for spec change and CL 704935 for type-checker changes. For #45624 Change-Id: I65d27de1ee3aabb819b57cce8ea77f3073447757 Reviewed-on: https://go-review.googlesource.com/c/go/+/705157 Reviewed-by: Keith Randall Reviewed-by: Mateusz Poliwczak Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ir/node.go | 2 +- src/cmd/compile/internal/ir/type.go | 4 ++++ src/cmd/compile/internal/noder/reader.go | 13 +++++++++++-- src/cmd/compile/internal/noder/writer.go | 8 +++++++- test/newexpr.go | 32 ++++++++++++++++++++++++++++++++ test/used.go | 3 +-- 6 files changed, 56 insertions(+), 6 deletions(-) create mode 100644 test/newexpr.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 003ec15de1..8c61bb6ed5 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -215,7 +215,7 @@ const ( ORSH // X >> Y OAND // X & Y OANDNOT // X &^ Y - ONEW // new(X); corresponds to calls to new in source code + ONEW // new(X); corresponds to calls to new(T) in source code ONOT // !X OBITNOT // ^X OPLUS // +X diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 6daca856a6..0f44cf8d04 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -42,6 +42,10 @@ func TypeNode(t *types.Type) Node { // A DynamicType represents a type expression whose exact type must be // computed dynamically. +// +// TODO(adonovan): I think "dynamic" is a misnomer here; it's really a +// type with free type parameters that needs to be instantiated to obtain +// a ground type for which an rtype can exist. type DynamicType struct { miniExpr diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 45e2bfd727..ca7c6bf151 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -2431,8 +2431,16 @@ func (r *reader) expr() (res ir.Node) { case exprNew: pos := r.pos() - typ := r.exprType() - return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ)) + if r.Bool() { + // new(expr) -> tmp := expr; &tmp + x := r.expr() + var init ir.Nodes + addr := ir.NewAddrExpr(pos, r.tempCopy(pos, x, &init)) + addr.SetInit(init) + return typecheck.Expr(addr) + } + // new(T) + return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, r.exprType())) case exprSizeof: return ir.NewUintptr(r.pos(), r.typ().Size()) @@ -3239,6 +3247,7 @@ func (r *reader) exprType() ir.Node { var rtype, itab ir.Node if r.Bool() { + // non-empty interface typ, rtype, _, _, itab = r.itab(pos) if !typ.IsInterface() { rtype = nil // TODO(mdempsky): Leave set? diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go index 54e5f1ea5f..9c90d221c2 100644 --- a/src/cmd/compile/internal/noder/writer.go +++ b/src/cmd/compile/internal/noder/writer.go @@ -2035,10 +2035,16 @@ func (w *writer) expr(expr syntax.Expr) { case "new": assert(len(expr.ArgList) == 1) assert(!expr.HasDots) + arg := expr.ArgList[0] w.Code(exprNew) w.pos(expr) - w.exprType(nil, expr.ArgList[0]) + tv := w.p.typeAndValue(arg) + if w.Bool(!tv.IsType()) { + w.expr(arg) // new(expr), go1.26 + } else { + w.exprType(nil, arg) // new(T) + } return case "Sizeof": diff --git a/test/newexpr.go b/test/newexpr.go new file mode 100644 index 0000000000..7deffae38f --- /dev/null +++ b/test/newexpr.go @@ -0,0 +1,32 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Issue #45624 is the proposal to accept new(expr) in go1.26. +// Here we test its run-time behavior. +func main() { + { + p := new(123) // untyped constant expr + if *p != 123 { + panic("wrong value") + } + } + { + x := 42 + p := new(x) // non-constant expr + if *p != x { + panic("wrong value") + } + } + { + x := [2]int{123, 456} + p := new(x) // composite value + if *p != x { + panic("wrong value") + } + } +} diff --git a/test/used.go b/test/used.go index 516f5968a8..33e1140cef 100644 --- a/test/used.go +++ b/test/used.go @@ -140,6 +140,5 @@ func _() { _ = int // ERROR "type int is not an expression|not an expression" (x) // ERROR "x .* not used|not used" _ = new(len) // ERROR "len.*must be called" - // Disabled due to issue #43125. - // _ = new(1 + 1) // DISABLED "1 \+ 1 is not a type" + _ = new(1 + 1) // ok } -- cgit v1.3-5-g9baa From 411c250d64304033181c46413a6e9381e8fe9b82 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 17 Mar 2025 11:45:52 -0400 Subject: runtime: add specialized malloc functions for sizes up to 512 bytes This CL adds a generator function in runtime/_mkmalloc to generate specialized mallocgc functions for sizes up throuht 512 bytes. (That's the limit where it's possible to end up in the no header case when there are scan bits, and where the benefits of the specialized functions significantly diminish according to microbenchmarks). If the specializedmalloc GOEXPERIMENT is turned on, mallocgc will call one of these functions in the no header case. malloc_generated.go is the generated file containing the specialized malloc functions. malloc_stubs.go contains the templates that will be stamped to create the specialized malloc functions. malloc_tables_generated contains the tables that mallocgc will use to select the specialized function to call. I've had to update the two stdlib_test.go files to account for the new submodule mkmalloc is in. mprof_test accounts for the changes in the stacks since different functions can be called in some cases. I still need to investigate heapsampling.go. Change-Id: Ia0f68dccdf1c6a200554ae88657cf4d686ace819 Reviewed-on: https://go-review.googlesource.com/c/go/+/665835 Reviewed-by: Michael Knyszek Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/types2/stdlib_test.go | 1 + src/go/types/stdlib_test.go | 1 + src/internal/runtime/gc/sizeclasses.go | 2 + src/runtime/_mkmalloc/constants.go | 29 + src/runtime/_mkmalloc/go.mod | 5 + src/runtime/_mkmalloc/go.sum | 2 + src/runtime/_mkmalloc/mkmalloc.go | 605 ++ src/runtime/_mkmalloc/mkmalloc_test.go | 36 + src/runtime/_mkmalloc/mksizeclasses.go | 59 +- src/runtime/malloc.go | 63 +- src/runtime/malloc_generated.go | 8468 ++++++++++++++++++++++++ src/runtime/malloc_stubs.go | 586 ++ src/runtime/malloc_tables_generated.go | 1038 +++ src/runtime/malloc_tables_plan9.go | 14 + src/runtime/malloc_test.go | 10 + 15 files changed, 10860 insertions(+), 59 deletions(-) create mode 100644 src/runtime/_mkmalloc/constants.go create mode 100644 src/runtime/_mkmalloc/go.mod create mode 100644 src/runtime/_mkmalloc/go.sum create mode 100644 src/runtime/_mkmalloc/mkmalloc.go create mode 100644 src/runtime/_mkmalloc/mkmalloc_test.go create mode 100644 src/runtime/malloc_generated.go create mode 100644 src/runtime/malloc_stubs.go create mode 100644 src/runtime/malloc_tables_generated.go create mode 100644 src/runtime/malloc_tables_plan9.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 365bc97462..a579c8184e 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -360,6 +360,7 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "runtime/_mkmalloc": true, } // printPackageMu synchronizes the printing of type-checked package files in diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 79ccbc6fcf..eb838b2c88 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -362,6 +362,7 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "runtime/_mkmalloc": true, } // printPackageMu synchronizes the printing of type-checked package files in diff --git a/src/internal/runtime/gc/sizeclasses.go b/src/internal/runtime/gc/sizeclasses.go index 3ef13834e4..befba425cc 100644 --- a/src/internal/runtime/gc/sizeclasses.go +++ b/src/internal/runtime/gc/sizeclasses.go @@ -91,6 +91,8 @@ const ( PageShift = 13 MaxObjsPerSpan = 1024 MaxSizeClassNPages = 10 + TinySize = 16 + TinySizeClass = 2 ) var SizeClassToSize = [NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768} diff --git a/src/runtime/_mkmalloc/constants.go b/src/runtime/_mkmalloc/constants.go new file mode 100644 index 0000000000..ad20c7b52b --- /dev/null +++ b/src/runtime/_mkmalloc/constants.go @@ -0,0 +1,29 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +const ( + // Constants that we use and will transfer to the runtime. + minHeapAlign = 8 + maxSmallSize = 32 << 10 + smallSizeDiv = 8 + smallSizeMax = 1024 + largeSizeDiv = 128 + pageShift = 13 + tinySize = 16 + + // Derived constants. + pageSize = 1 << pageShift +) + +const ( + maxPtrSize = max(4, 8) + maxPtrBits = 8 * maxPtrSize + + // Maximum size smallScanNoHeader would be called for, which is the + // maximum value gc.MinSizeForMallocHeader can have on any platform. + // gc.MinSizeForMallocHeader is defined as goarch.PtrSize * goarch.PtrBits. + smallScanNoHeaderMax = maxPtrSize * maxPtrBits +) diff --git a/src/runtime/_mkmalloc/go.mod b/src/runtime/_mkmalloc/go.mod new file mode 100644 index 0000000000..623c341769 --- /dev/null +++ b/src/runtime/_mkmalloc/go.mod @@ -0,0 +1,5 @@ +module runtime/_mkmalloc + +go 1.24 + +require golang.org/x/tools v0.33.0 diff --git a/src/runtime/_mkmalloc/go.sum b/src/runtime/_mkmalloc/go.sum new file mode 100644 index 0000000000..bead5223ca --- /dev/null +++ b/src/runtime/_mkmalloc/go.sum @@ -0,0 +1,2 @@ +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= diff --git a/src/runtime/_mkmalloc/mkmalloc.go b/src/runtime/_mkmalloc/mkmalloc.go new file mode 100644 index 0000000000..986b0aa9f8 --- /dev/null +++ b/src/runtime/_mkmalloc/mkmalloc.go @@ -0,0 +1,605 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "log" + "os" + "strings" + + "golang.org/x/tools/go/ast/astutil" + + internalastutil "runtime/_mkmalloc/astutil" +) + +var stdout = flag.Bool("stdout", false, "write sizeclasses source to stdout instead of sizeclasses.go") + +func makeSizeToSizeClass(classes []class) []uint8 { + sc := uint8(0) + ret := make([]uint8, smallScanNoHeaderMax+1) + for i := range ret { + if i > classes[sc].size { + sc++ + } + ret[i] = sc + } + return ret +} + +func main() { + log.SetFlags(0) + log.SetPrefix("mkmalloc: ") + + classes := makeClasses() + sizeToSizeClass := makeSizeToSizeClass(classes) + + if *stdout { + if _, err := os.Stdout.Write(mustFormat(generateSizeClasses(classes))); err != nil { + log.Fatal(err) + } + return + } + + sizeclasesesfile := "../../internal/runtime/gc/sizeclasses.go" + if err := os.WriteFile(sizeclasesesfile, mustFormat(generateSizeClasses(classes)), 0666); err != nil { + log.Fatal(err) + } + + outfile := "../malloc_generated.go" + if err := os.WriteFile(outfile, mustFormat(inline(specializedMallocConfig(classes, sizeToSizeClass))), 0666); err != nil { + log.Fatal(err) + } + + tablefile := "../malloc_tables_generated.go" + if err := os.WriteFile(tablefile, mustFormat(generateTable(sizeToSizeClass)), 0666); err != nil { + log.Fatal(err) + } +} + +// withLineNumbers returns b with line numbers added to help debugging. +func withLineNumbers(b []byte) []byte { + var buf bytes.Buffer + i := 1 + for line := range bytes.Lines(b) { + fmt.Fprintf(&buf, "%d: %s", i, line) + i++ + } + return buf.Bytes() +} + +// mustFormat formats the input source, or exits if there's an error. +func mustFormat(b []byte) []byte { + formatted, err := format.Source(b) + if err != nil { + log.Fatalf("error formatting source: %v\nsource:\n%s\n", err, withLineNumbers(b)) + } + return formatted +} + +// generatorConfig is the configuration for the generator. It uses the given file to find +// its templates, and generates each of the functions specified by specs. +type generatorConfig struct { + file string + specs []spec +} + +// spec is the specification for a function for the inliner to produce. The function gets +// the given name, and is produced by starting with the function with the name given by +// templateFunc and applying each of the ops. +type spec struct { + name string + templateFunc string + ops []op +} + +// replacementKind specifies the operation to ben done by a op. +type replacementKind int + +const ( + inlineFunc = replacementKind(iota) + subBasicLit +) + +// op is a single inlining operation for the inliner. Any calls to the function +// from are replaced with the inlined body of to. For non-functions, uses of from are +// replaced with the basic literal expression given by to. +type op struct { + kind replacementKind + from string + to string +} + +func smallScanNoHeaderSCFuncName(sc, scMax uint8) string { + if sc == 0 || sc > scMax { + return "mallocPanic" + } + return fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", sc) +} + +func tinyFuncName(size uintptr) string { + if size == 0 || size > smallScanNoHeaderMax { + return "mallocPanic" + } + return fmt.Sprintf("mallocTiny%d", size) +} + +func smallNoScanSCFuncName(sc, scMax uint8) string { + if sc < 2 || sc > scMax { + return "mallocPanic" + } + return fmt.Sprintf("mallocgcSmallNoScanSC%d", sc) +} + +// specializedMallocConfig produces an inlining config to stamp out the definitions of the size-specialized +// malloc functions to be written by mkmalloc. +func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generatorConfig { + config := generatorConfig{file: "../malloc_stubs.go"} + + // Only generate specialized functions for sizes that don't have + // a header on 64-bit platforms. (They may have a header on 32-bit, but + // we will fall back to the non-specialized versions in that case) + scMax := sizeToSizeClass[smallScanNoHeaderMax] + + str := fmt.Sprint + + // allocations with pointer bits + { + const noscan = 0 + for sc := uint8(0); sc <= scMax; sc++ { + if sc == 0 { + continue + } + name := smallScanNoHeaderSCFuncName(sc, scMax) + elemsize := classes[sc].size + config.specs = append(config.specs, spec{ + templateFunc: "mallocStub", + name: name, + ops: []op{ + {inlineFunc, "inlinedMalloc", "smallScanNoHeaderStub"}, + {inlineFunc, "heapSetTypeNoHeaderStub", "heapSetTypeNoHeaderStub"}, + {inlineFunc, "nextFreeFastStub", "nextFreeFastStub"}, + {inlineFunc, "writeHeapBitsSmallStub", "writeHeapBitsSmallStub"}, + {subBasicLit, "elemsize_", str(elemsize)}, + {subBasicLit, "sizeclass_", str(sc)}, + {subBasicLit, "noscanint_", str(noscan)}, + }, + }) + } + } + + // allocations without pointer bits + { + const noscan = 1 + + // tiny + tinySizeClass := sizeToSizeClass[tinySize] + for s := range uintptr(16) { + if s == 0 { + continue + } + name := tinyFuncName(s) + elemsize := classes[tinySizeClass].size + config.specs = append(config.specs, spec{ + templateFunc: "mallocStub", + name: name, + ops: []op{ + {inlineFunc, "inlinedMalloc", "tinyStub"}, + {inlineFunc, "nextFreeFastTiny", "nextFreeFastTiny"}, + {subBasicLit, "elemsize_", str(elemsize)}, + {subBasicLit, "sizeclass_", str(tinySizeClass)}, + {subBasicLit, "size_", str(s)}, + {subBasicLit, "noscanint_", str(noscan)}, + }, + }) + } + + // non-tiny + for sc := uint8(tinySizeClass); sc <= scMax; sc++ { + name := smallNoScanSCFuncName(sc, scMax) + elemsize := classes[sc].size + config.specs = append(config.specs, spec{ + templateFunc: "mallocStub", + name: name, + ops: []op{ + {inlineFunc, "inlinedMalloc", "smallNoScanStub"}, + {inlineFunc, "nextFreeFastStub", "nextFreeFastStub"}, + {subBasicLit, "elemsize_", str(elemsize)}, + {subBasicLit, "sizeclass_", str(sc)}, + {subBasicLit, "noscanint_", str(noscan)}, + }, + }) + } + } + + return config +} + +// inline applies the inlining operations given by the config. +func inline(config generatorConfig) []byte { + var out bytes.Buffer + + // Read the template file in. + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, config.file, nil, 0) + if err != nil { + log.Fatalf("parsing %s: %v", config.file, err) + } + + // Collect the function and import declarations. The function + // declarations in the template file provide both the templates + // that will be stamped out, and the functions that will be inlined + // into them. The imports from the template file will be copied + // straight to the output. + funcDecls := map[string]*ast.FuncDecl{} + importDecls := []*ast.GenDecl{} + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + funcDecls[decl.Name.Name] = decl + case *ast.GenDecl: + if decl.Tok.String() == "import" { + importDecls = append(importDecls, decl) + continue + } + } + } + + // Write out the package and import declarations. + out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n\n") + out.WriteString("package " + f.Name.Name + "\n\n") + for _, importDecl := range importDecls { + out.Write(mustFormatNode(fset, importDecl)) + out.WriteString("\n\n") + } + + // Produce each of the inlined functions specified by specs. + for _, spec := range config.specs { + // Start with a renamed copy of the template function. + containingFuncCopy := internalastutil.CloneNode(funcDecls[spec.templateFunc]) + if containingFuncCopy == nil { + log.Fatal("did not find", spec.templateFunc) + } + containingFuncCopy.Name.Name = spec.name + + // Apply each of the ops given by the specs + stamped := ast.Node(containingFuncCopy) + for _, repl := range spec.ops { + if toDecl, ok := funcDecls[repl.to]; ok { + stamped = inlineFunction(stamped, repl.from, toDecl) + } else { + stamped = substituteWithBasicLit(stamped, repl.from, repl.to) + } + } + + out.Write(mustFormatNode(fset, stamped)) + out.WriteString("\n\n") + } + + return out.Bytes() +} + +// substituteWithBasicLit recursively renames identifiers in the provided AST +// according to 'from' and 'to'. +func substituteWithBasicLit(node ast.Node, from, to string) ast.Node { + // The op is a substitution of an identifier with an basic literal. + toExpr, err := parser.ParseExpr(to) + if err != nil { + log.Fatalf("parsing expr %q: %v", to, err) + } + if _, ok := toExpr.(*ast.BasicLit); !ok { + log.Fatalf("op 'to' expr %q is not a basic literal", to) + } + return astutil.Apply(node, func(cursor *astutil.Cursor) bool { + if isIdentWithName(cursor.Node(), from) { + cursor.Replace(toExpr) + } + return true + }, nil) +} + +// inlineFunction recursively replaces calls to the function 'from' with the body of the function +// 'toDecl'. All calls to 'from' must appear in assignment statements. +// The replacement is very simple: it doesn't substitute the arguments for the parameters, so the +// arguments to the function call must be the same identifier as the parameters to the function +// declared by 'toDecl'. If there are any calls to from where that's not the case there will be a fatal error. +func inlineFunction(node ast.Node, from string, toDecl *ast.FuncDecl) ast.Node { + return astutil.Apply(node, func(cursor *astutil.Cursor) bool { + switch node := cursor.Node().(type) { + case *ast.AssignStmt: + // TODO(matloob) CHECK function args have same name + // as parameters (or parameter is "_"). + if len(node.Rhs) == 1 && isCallTo(node.Rhs[0], from) { + args := node.Rhs[0].(*ast.CallExpr).Args + if !argsMatchParameters(args, toDecl.Type.Params) { + log.Fatalf("applying op: arguments to %v don't match parameter names of %v: %v", from, toDecl.Name, debugPrint(args...)) + } + replaceAssignment(cursor, node, toDecl) + } + return false + case *ast.CallExpr: + // double check that all calls to from appear within an assignment + if isCallTo(node, from) { + if _, ok := cursor.Parent().(*ast.AssignStmt); !ok { + log.Fatalf("applying op: all calls to function %q being replaced must appear in an assignment statement, appears in %T", from, cursor.Parent()) + } + } + } + return true + }, nil) +} + +// argsMatchParameters reports whether the arguments given by args are all identifiers +// whose names are the same as the corresponding parameters in params. +func argsMatchParameters(args []ast.Expr, params *ast.FieldList) bool { + var paramIdents []*ast.Ident + for _, f := range params.List { + paramIdents = append(paramIdents, f.Names...) + } + + if len(args) != len(paramIdents) { + return false + } + + for i := range args { + if !isIdentWithName(args[i], paramIdents[i].Name) { + return false + } + } + + return true +} + +// isIdentWithName reports whether the expression is an identifier with the given name. +func isIdentWithName(expr ast.Node, name string) bool { + ident, ok := expr.(*ast.Ident) + if !ok { + return false + } + return ident.Name == name +} + +// isCallTo reports whether the expression is a call expression to the function with the given name. +func isCallTo(expr ast.Expr, name string) bool { + callexpr, ok := expr.(*ast.CallExpr) + if !ok { + return false + } + return isIdentWithName(callexpr.Fun, name) +} + +// replaceAssignment replaces an assignment statement where the right hand side is a function call +// whose arguments have the same names as the parameters to funcdecl with the body of funcdecl. +// It sets the left hand side of the assignment to the return values of the function. +func replaceAssignment(cursor *astutil.Cursor, assign *ast.AssignStmt, funcdecl *ast.FuncDecl) { + if !hasTerminatingReturn(funcdecl.Body) { + log.Fatal("function being inlined must have a return at the end") + } + + body := internalastutil.CloneNode(funcdecl.Body) + if hasTerminatingAndNonterminatingReturn(funcdecl.Body) { + // The function has multiple return points. Add the code that we'd continue with in the caller + // after each of the return points. The calling function must have a terminating return + // so we don't continue execution in the replaced function after we finish executing the + // continue block that we add. + body = addContinues(cursor, assign, body, everythingFollowingInParent(cursor)).(*ast.BlockStmt) + } + + if len(body.List) < 1 { + log.Fatal("replacing with empty bodied function") + } + + // The op happens in two steps: first we insert the body of the function being inlined (except for + // the final return) before the assignment, and then we change the assignment statement to replace the function call + // with the expressions being returned. + + // Determine the expressions being returned. + beforeReturn, ret := body.List[:len(body.List)-1], body.List[len(body.List)-1] + returnStmt, ok := ret.(*ast.ReturnStmt) + if !ok { + log.Fatal("last stmt in function we're replacing with should be a return") + } + results := returnStmt.Results + + // Insert the body up to the final return. + for _, stmt := range beforeReturn { + cursor.InsertBefore(stmt) + } + + // Rewrite the assignment statement. + replaceWithAssignment(cursor, assign.Lhs, results, assign.Tok) +} + +// hasTerminatingReturn reparts whether the block ends in a return statement. +func hasTerminatingReturn(block *ast.BlockStmt) bool { + _, ok := block.List[len(block.List)-1].(*ast.ReturnStmt) + return ok +} + +// hasTerminatingAndNonterminatingReturn reports whether the block ends in a return +// statement, and also has a return elsewhere in it. +func hasTerminatingAndNonterminatingReturn(block *ast.BlockStmt) bool { + if !hasTerminatingReturn(block) { + return false + } + var ret bool + for i := range block.List[:len(block.List)-1] { + ast.Inspect(block.List[i], func(node ast.Node) bool { + _, ok := node.(*ast.ReturnStmt) + if ok { + ret = true + return false + } + return true + }) + } + return ret +} + +// everythingFollowingInParent returns a block with everything in the parent block node of the cursor after +// the cursor itself. The cursor must point to an element in a block node's list. +func everythingFollowingInParent(cursor *astutil.Cursor) *ast.BlockStmt { + parent := cursor.Parent() + block, ok := parent.(*ast.BlockStmt) + if !ok { + log.Fatal("internal error: in everythingFollowingInParent, cursor doesn't point to element in block list") + } + + blockcopy := internalastutil.CloneNode(block) // get a clean copy + blockcopy.List = blockcopy.List[cursor.Index()+1:] // and remove everything before and including stmt + + if _, ok := blockcopy.List[len(blockcopy.List)-1].(*ast.ReturnStmt); !ok { + log.Printf("%s", mustFormatNode(token.NewFileSet(), blockcopy)) + log.Fatal("internal error: parent doesn't end in a return") + } + return blockcopy +} + +// in the case that there's a return in the body being inlined (toBlock), addContinues +// replaces those returns that are not at the end of the function with the code in the +// caller after the function call that execution would continue with after the return. +// The block being added must end in a return. +func addContinues(cursor *astutil.Cursor, assignNode *ast.AssignStmt, toBlock *ast.BlockStmt, continueBlock *ast.BlockStmt) ast.Node { + if !hasTerminatingReturn(continueBlock) { + log.Fatal("the block being continued to in addContinues must end in a return") + } + applyFunc := func(cursor *astutil.Cursor) bool { + ret, ok := cursor.Node().(*ast.ReturnStmt) + if !ok { + return true + } + + if cursor.Parent() == toBlock && cursor.Index() == len(toBlock.List)-1 { + return false + } + + // This is the opposite of replacing a function call with the body. First + // we replace the return statement with the assignment from the caller, and + // then add the code we continue with. + replaceWithAssignment(cursor, assignNode.Lhs, ret.Results, assignNode.Tok) + cursor.InsertAfter(internalastutil.CloneNode(continueBlock)) + + return false + } + return astutil.Apply(toBlock, applyFunc, nil) +} + +// debugPrint prints out the expressions given by nodes for debugging. +func debugPrint(nodes ...ast.Expr) string { + var b strings.Builder + for i, node := range nodes { + b.Write(mustFormatNode(token.NewFileSet(), node)) + if i != len(nodes)-1 { + b.WriteString(", ") + } + } + return b.String() +} + +// mustFormatNode produces the formatted Go code for the given node. +func mustFormatNode(fset *token.FileSet, node any) []byte { + var buf bytes.Buffer + format.Node(&buf, fset, node) + return buf.Bytes() +} + +// mustMatchExprs makes sure that the expression lists have the same length, +// and returns the lists of the expressions on the lhs and rhs where the +// identifiers are not the same. These are used to produce assignment statements +// where the expressions on the right are assigned to the identifiers on the left. +func mustMatchExprs(lhs []ast.Expr, rhs []ast.Expr) ([]ast.Expr, []ast.Expr) { + if len(lhs) != len(rhs) { + log.Fatal("exprs don't match", debugPrint(lhs...), debugPrint(rhs...)) + } + + var newLhs, newRhs []ast.Expr + for i := range lhs { + lhsIdent, ok1 := lhs[i].(*ast.Ident) + rhsIdent, ok2 := rhs[i].(*ast.Ident) + if ok1 && ok2 && lhsIdent.Name == rhsIdent.Name { + continue + } + newLhs = append(newLhs, lhs[i]) + newRhs = append(newRhs, rhs[i]) + } + + return newLhs, newRhs +} + +// replaceWithAssignment replaces the node pointed to by the cursor with an assignment of the +// left hand side to the righthand side, removing any redundant assignments of a variable to itself, +// and replacing an assignment to a single basic literal with a constant declaration. +func replaceWithAssignment(cursor *astutil.Cursor, lhs, rhs []ast.Expr, tok token.Token) { + newLhs, newRhs := mustMatchExprs(lhs, rhs) + if len(newLhs) == 0 { + cursor.Delete() + return + } + if len(newRhs) == 1 { + if lit, ok := newRhs[0].(*ast.BasicLit); ok { + constDecl := &ast.DeclStmt{ + Decl: &ast.GenDecl{ + Tok: token.CONST, + Specs: []ast.Spec{ + &ast.ValueSpec{ + Names: []*ast.Ident{newLhs[0].(*ast.Ident)}, + Values: []ast.Expr{lit}, + }, + }, + }, + } + cursor.Replace(constDecl) + return + } + } + newAssignment := &ast.AssignStmt{ + Lhs: newLhs, + Rhs: newRhs, + Tok: tok, + } + cursor.Replace(newAssignment) +} + +// generateTable generates the file with the jump tables for the specialized malloc functions. +func generateTable(sizeToSizeClass []uint8) []byte { + scMax := sizeToSizeClass[smallScanNoHeaderMax] + + var b bytes.Buffer + fmt.Fprintln(&b, `// Code generated by mkmalloc.go; DO NOT EDIT. +//go:build !plan9 + +package runtime + +import "unsafe" + +var mallocScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{`) + + for i := range uintptr(smallScanNoHeaderMax + 1) { + fmt.Fprintf(&b, "%s,\n", smallScanNoHeaderSCFuncName(sizeToSizeClass[i], scMax)) + } + + fmt.Fprintln(&b, ` +} + +var mallocNoScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{`) + for i := range uintptr(smallScanNoHeaderMax + 1) { + if i < 16 { + fmt.Fprintf(&b, "%s,\n", tinyFuncName(i)) + } else { + fmt.Fprintf(&b, "%s,\n", smallNoScanSCFuncName(sizeToSizeClass[i], scMax)) + } + } + + fmt.Fprintln(&b, ` +}`) + + return b.Bytes() +} diff --git a/src/runtime/_mkmalloc/mkmalloc_test.go b/src/runtime/_mkmalloc/mkmalloc_test.go new file mode 100644 index 0000000000..bd15c3226a --- /dev/null +++ b/src/runtime/_mkmalloc/mkmalloc_test.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "os" + "testing" +) + +func TestNoChange(t *testing.T) { + classes := makeClasses() + sizeToSizeClass := makeSizeToSizeClass(classes) + + outfile := "../malloc_generated.go" + want, err := os.ReadFile(outfile) + if err != nil { + t.Fatal(err) + } + got := mustFormat(inline(specializedMallocConfig(classes, sizeToSizeClass))) + if !bytes.Equal(want, got) { + t.Fatalf("want:\n%s\ngot:\n%s\n", withLineNumbers(want), withLineNumbers(got)) + } + + tablefile := "../malloc_tables_generated.go" + wanttable, err := os.ReadFile(tablefile) + if err != nil { + t.Fatal(err) + } + gotTable := mustFormat(generateTable(sizeToSizeClass)) + if !bytes.Equal(wanttable, gotTable) { + t.Fatalf("want:\n%s\ngot:\n%s\n", withLineNumbers(wanttable), withLineNumbers(gotTable)) + } +} diff --git a/src/runtime/_mkmalloc/mksizeclasses.go b/src/runtime/_mkmalloc/mksizeclasses.go index a8d2d2db1e..2c39617c6b 100644 --- a/src/runtime/_mkmalloc/mksizeclasses.go +++ b/src/runtime/_mkmalloc/mksizeclasses.go @@ -31,19 +31,14 @@ import ( "bytes" "flag" "fmt" - "go/format" "io" - "log" "math" "math/bits" - "os" ) // Generate internal/runtime/gc/msize.go -var stdout = flag.Bool("stdout", false, "write to stdout instead of sizeclasses.go") - -func main() { +func generateSizeClasses(classes []class) []byte { flag.Parse() var b bytes.Buffer @@ -51,39 +46,14 @@ func main() { fmt.Fprintln(&b, "//go:generate go -C ../../../runtime/_mkmalloc run mksizeclasses.go") fmt.Fprintln(&b) fmt.Fprintln(&b, "package gc") - classes := makeClasses() printComment(&b, classes) printClasses(&b, classes) - out, err := format.Source(b.Bytes()) - if err != nil { - log.Fatal(err) - } - if *stdout { - _, err = os.Stdout.Write(out) - } else { - err = os.WriteFile("../../internal/runtime/gc/sizeclasses.go", out, 0666) - } - if err != nil { - log.Fatal(err) - } + return b.Bytes() } -const ( - // Constants that we use and will transfer to the runtime. - minHeapAlign = 8 - maxSmallSize = 32 << 10 - smallSizeDiv = 8 - smallSizeMax = 1024 - largeSizeDiv = 128 - pageShift = 13 - - // Derived constants. - pageSize = 1 << pageShift -) - type class struct { size int // max size npages int // number of pages @@ -294,6 +264,15 @@ func maxNPages(classes []class) int { } func printClasses(w io.Writer, classes []class) { + sizeToSizeClass := func(size int) int { + for j, c := range classes { + if c.size >= size { + return j + } + } + panic("unreachable") + } + fmt.Fprintln(w, "const (") fmt.Fprintf(w, "MinHeapAlign = %d\n", minHeapAlign) fmt.Fprintf(w, "MaxSmallSize = %d\n", maxSmallSize) @@ -304,6 +283,8 @@ func printClasses(w io.Writer, classes []class) { fmt.Fprintf(w, "PageShift = %d\n", pageShift) fmt.Fprintf(w, "MaxObjsPerSpan = %d\n", maxObjsPerSpan(classes)) fmt.Fprintf(w, "MaxSizeClassNPages = %d\n", maxNPages(classes)) + fmt.Fprintf(w, "TinySize = %d\n", tinySize) + fmt.Fprintf(w, "TinySizeClass = %d\n", sizeToSizeClass(tinySize)) fmt.Fprintln(w, ")") fmt.Fprint(w, "var SizeClassToSize = [NumSizeClasses]uint16 {") @@ -332,12 +313,7 @@ func printClasses(w io.Writer, classes []class) { sc := make([]int, smallSizeMax/smallSizeDiv+1) for i := range sc { size := i * smallSizeDiv - for j, c := range classes { - if c.size >= size { - sc[i] = j - break - } - } + sc[i] = sizeToSizeClass(size) } fmt.Fprint(w, "var SizeToSizeClass8 = [SmallSizeMax/SmallSizeDiv+1]uint8 {") for _, v := range sc { @@ -349,12 +325,7 @@ func printClasses(w io.Writer, classes []class) { sc = make([]int, (maxSmallSize-smallSizeMax)/largeSizeDiv+1) for i := range sc { size := smallSizeMax + i*largeSizeDiv - for j, c := range classes { - if c.size >= size { - sc[i] = j - break - } - } + sc[i] = sizeToSizeClass(size) } fmt.Fprint(w, "var SizeToSizeClass128 = [(MaxSmallSize-SmallSizeMax)/LargeSizeDiv+1]uint8 {") for _, v := range sc { diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 5b5a633d9a..db91e89359 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -127,8 +127,8 @@ const ( _64bit = 1 << (^uintptr(0) >> 63) / 2 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. - _TinySize = 16 - _TinySizeClass = int8(2) + _TinySize = gc.TinySize + _TinySizeClass = int8(gc.TinySizeClass) _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc @@ -1080,6 +1080,12 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger // at scale. const doubleCheckMalloc = false +// sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized +// mallocgc implementation: the experiment must be enabled, and none of the sanitizers should +// be enabled. The tables used to select the size-specialized malloc function do not compile +// properly on plan9, so size-specialized malloc is also disabled on plan9. +const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled + // Allocate an object of size bytes. // Small objects are allocated from the per-P cache's free lists. // Large objects (> 32 kB) are allocated straight from the heap. @@ -1110,6 +1116,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { return unsafe.Pointer(&zerobase) } + if sizeSpecializedMallocEnabled && heapBitsInSpan(size) { + if typ == nil || !typ.Pointers() { + return mallocNoScanTable[size](size, typ, needzero) + } else { + if !needzero { + throw("objects with pointers must be zeroed") + } + return mallocScanTable[size](size, typ, needzero) + } + } + // It's possible for any malloc to trigger sweeping, which may in // turn queue finalizers. Record this dynamic lock edge. // N.B. Compiled away if lockrank experiment is not enabled. @@ -1138,25 +1155,41 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Actually do the allocation. var x unsafe.Pointer var elemsize uintptr - if size <= maxSmallSize-gc.MallocHeaderSize { - if typ == nil || !typ.Pointers() { - if size < maxTinySize { - x, elemsize = mallocgcTiny(size, typ) - } else { + if sizeSpecializedMallocEnabled { + // we know that heapBitsInSpan is true. + if size <= maxSmallSize-gc.MallocHeaderSize { + if typ == nil || !typ.Pointers() { x, elemsize = mallocgcSmallNoscan(size, typ, needzero) - } - } else { - if !needzero { - throw("objects with pointers must be zeroed") - } - if heapBitsInSpan(size) { - x, elemsize = mallocgcSmallScanNoHeader(size, typ) } else { + if !needzero { + throw("objects with pointers must be zeroed") + } x, elemsize = mallocgcSmallScanHeader(size, typ) } + } else { + x, elemsize = mallocgcLarge(size, typ, needzero) } } else { - x, elemsize = mallocgcLarge(size, typ, needzero) + if size <= maxSmallSize-gc.MallocHeaderSize { + if typ == nil || !typ.Pointers() { + if size < maxTinySize { + x, elemsize = mallocgcTiny(size, typ) + } else { + x, elemsize = mallocgcSmallNoscan(size, typ, needzero) + } + } else { + if !needzero { + throw("objects with pointers must be zeroed") + } + if heapBitsInSpan(size) { + x, elemsize = mallocgcSmallScanNoHeader(size, typ) + } else { + x, elemsize = mallocgcSmallScanHeader(size, typ) + } + } + } else { + x, elemsize = mallocgcLarge(size, typ, needzero) + } } // Notify sanitizers, if enabled. diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go new file mode 100644 index 0000000000..600048c675 --- /dev/null +++ b/src/runtime/malloc_generated.go @@ -0,0 +1,8468 @@ +// Code generated by mkmalloc.go; DO NOT EDIT. + +package runtime + +import ( + "internal/goarch" + "internal/runtime/sys" + "unsafe" +) + +func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 1 + + const elemsize = 8 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallScanNoHeader(size, typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(0) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 8 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 8 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 16 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 24 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 24 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 32 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 32 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 48 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 48 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 64 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 64 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 80 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 80 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 96 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 96 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 112 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 112 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 128 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 128 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 144 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 144 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 160 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 160 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 176 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 176 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 192 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 192 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 208 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 208 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 224 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 224 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 240 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 240 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 256 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 256 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 288 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 288 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 320 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 320 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 352 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 352 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 384 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 384 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 416 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 416 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 448 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 448 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 480 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 480 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 512 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + + c.scanAlloc += 8 + } else { + dataSize := size + x := uintptr(x) + + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) { + throw("tried to write heap bits, but no heap bits in span") + } + + src0 := readUintptr(getGCMask(typ)) + + const elemsize = 512 + + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 2 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 3 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 4 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 5 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 6 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 7 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 8 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 9 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 10 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 11 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 12 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 13 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 14 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const constsize = 15 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + c := getMCache(mp) + off := c.tinyoffset + + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + const elemsize = 0 + { + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + + checkGCTrigger := false + span := c.alloc[tinySpanClass] + + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / + 16, + ) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + + x = add(x, elemsize-constsize) + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 2 + + const elemsize = 16 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 16 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 3 + + const elemsize = 24 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 24 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 4 + + const elemsize = 32 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 32 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 5 + + const elemsize = 48 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 48 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 6 + + const elemsize = 64 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 64 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 7 + + const elemsize = 80 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 80 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 8 + + const elemsize = 96 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 96 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 9 + + const elemsize = 112 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 112 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 10 + + const elemsize = 128 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 128 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 11 + + const elemsize = 144 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 144 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 12 + + const elemsize = 160 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 160 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 13 + + const elemsize = 176 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 176 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 14 + + const elemsize = 192 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 192 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 15 + + const elemsize = 208 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 208 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 16 + + const elemsize = 224 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 224 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 17 + + const elemsize = 240 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 240 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 18 + + const elemsize = 256 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 256 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 19 + + const elemsize = 288 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 288 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 20 + + const elemsize = 320 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 320 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 21 + + const elemsize = 352 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 352 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 22 + + const elemsize = 384 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 384 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 23 + + const elemsize = 416 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 416 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 24 + + const elemsize = 448 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 448 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 25 + + const elemsize = 480 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 480 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + lockRankMayQueueFinalizer() + + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + const sizeclass = 26 + + const elemsize = 512 + + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(1) + span := c.alloc[spc] + + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)* + 512 + + span.base()) + } + } + } + v := nextFreeFastResult + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + publicationBarrier() + + if writeBarrier.enabled { + + gcmarknewobject(span, uintptr(x)) + } else { + + span.freeIndexForScan = span.freeindex + } + + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go new file mode 100644 index 0000000000..7fd1444189 --- /dev/null +++ b/src/runtime/malloc_stubs.go @@ -0,0 +1,586 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains stub functions that are not meant to be called directly, +// but that will be assembled together using the inlining logic in runtime/_mkmalloc +// to produce a full mallocgc function that's specialized for a span class +// or specific size in the case of the tiny allocator. +// +// To assemble a mallocgc function, the mallocStub function is cloned, and the call to +// inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub, +// smallNoScanStub or tinyStub, depending on the parameters being specialized. +// +// The size_ (for the tiny case) and elemsize_, sizeclass_, and noscanint_ (for all three cases) +// identifiers are replaced with the value of the parameter in the specialized case. +// The nextFreeFastStub, nextFreeFastTiny, heapSetTypeNoHeaderStub, and writeHeapBitsSmallStub +// functions are also inlined by _mkmalloc. + +package runtime + +import ( + "internal/goarch" + "internal/runtime/sys" + "unsafe" +) + +// These identifiers will all be replaced by the inliner. So their values don't +// really matter: they just need to be set so that the stub functions, which +// will never be used on their own, can compile. elemsize_ can't be set to +// zero because we divide by it in nextFreeFastTiny, and the compiler would +// complain about a division by zero. Its replaced value will always be greater +// than zero. +const elemsize_ = 8 +const sizeclass_ = 0 +const noscanint_ = 0 +const size_ = 0 + +func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + // Short-circuit zero-sized allocation requests. + return unsafe.Pointer(&zerobase) +} + +func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + panic("not defined for sizeclass") +} + +func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + if doubleCheckMalloc { + if gcphase == _GCmarktermination { + throw("mallocgc called with gcphase == _GCmarktermination") + } + } + + // It's possible for any malloc to trigger sweeping, which may in + // turn queue finalizers. Record this dynamic lock edge. + // N.B. Compiled away if lockrank experiment is not enabled. + lockRankMayQueueFinalizer() + + // Pre-malloc debug hooks. + if debug.malloc { + if x := preMallocgcDebug(size, typ); x != nil { + return x + } + } + + // Assist the GC if needed. + if gcBlackenEnabled != 0 { + deductAssistCredit(size) + } + + // Actually do the allocation. + x, elemsize := inlinedMalloc(size, typ, needzero) + + // Adjust our GC assist debt to account for internal fragmentation. + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + // Post-malloc debug hooks. + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x +} + +// inlinedMalloc will never be called. It is defined just so that the compiler can compile +// the mallocStub function, which will also never be called, but instead used as a template +// to generate a size-specialized malloc function. The call to inlinedMalloc in mallocStub +// will be replaced with the inlined body of smallScanNoHeaderStub, smallNoScanStub, or tinyStub +// when generating the size-specialized malloc function. See the comment at the top of this +// file for more information. +func inlinedMalloc(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + return unsafe.Pointer(uintptr(0)), 0 +} + +func doubleCheckSmallScanNoHeader(size uintptr, typ *_type, mp *m) { + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + if typ == nil || !typ.Pointers() { + throw("noscan allocated in scan-only path") + } + if !heapBitsInSpan(size) { + throw("heap bits in not in span for non-header-only path") + } +} + +func smallScanNoHeaderStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + const sizeclass = sizeclass_ + const elemsize = elemsize_ + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallScanNoHeader(size, typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(noscanint_) + span := c.alloc[spc] + v := nextFreeFastStub(span) + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + if goarch.PtrSize == 8 && sizeclass == 1 { + // initHeapBits already set the pointer bits for the 8-byte sizeclass + // on 64-bit platforms. + c.scanAlloc += 8 + } else { + dataSize := size // make the inliner happy + x := uintptr(x) + scanSize := heapSetTypeNoHeaderStub(x, dataSize, typ, span) + c.scanAlloc += scanSize + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } else { + // Track the last free index before the mark phase. This field + // is only used by the garbage collector. During the mark phase + // this is used by the conservative scanner to filter out objects + // that are both free and recently-allocated. It's safe to do that + // because we allocate-black if the GC is enabled. The conservative + // scanner produces pointers out of thin air, so without additional + // synchronization it might otherwise observe a partially-initialized + // object, which could crash the program. + span.freeIndexForScan = span.freeindex + } + + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something. The code below just pretends it is + // internal fragmentation and matches the GC's accounting by + // using the whole allocation slot. + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + return x, elemsize +} + +func doubleCheckSmallNoScan(typ *_type, mp *m) { + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + if typ != nil && typ.Pointers() { + throw("expected noscan type for noscan alloc") + } +} + +func smallNoScanStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + // TODO(matloob): Add functionality to mkmalloc to allow us to inline a non-constant + // sizeclass_ and elemsize_ value (instead just set to the expressions to look up the size class + // and elemsize. We'd also need to teach mkmalloc that values that are touched by these (specifically + // spc below) should turn into vars. This would allow us to generate mallocgcSmallNoScan itself, + // so that its code could not diverge from the generated functions. + const sizeclass = sizeclass_ + const elemsize = elemsize_ + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if doubleCheckMalloc { + doubleCheckSmallNoScan(typ, mp) + } + mp.mallocing = 1 + + checkGCTrigger := false + c := getMCache(mp) + const spc = spanClass(sizeclass<<1) | spanClass(noscanint_) + span := c.alloc[spc] + v := nextFreeFastStub(span) + if v == 0 { + v, span, checkGCTrigger = c.nextFree(spc) + } + x := unsafe.Pointer(v) + if needzero && span.needzero != 0 { + memclrNoHeapPointers(x, elemsize) + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } else { + // Track the last free index before the mark phase. This field + // is only used by the garbage collector. During the mark phase + // this is used by the conservative scanner to filter out objects + // that are both free and recently-allocated. It's safe to do that + // because we allocate-black if the GC is enabled. The conservative + // scanner produces pointers out of thin air, so without additional + // synchronization it might otherwise observe a partially-initialized + // object, which could crash the program. + span.freeIndexForScan = span.freeindex + } + + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something. The code below just pretends it is + // internal fragmentation and matches the GC's accounting by + // using the whole allocation slot. + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + return x, elemsize +} + +func doubleCheckTiny(size uintptr, typ *_type, mp *m) { + if mp.mallocing != 0 { + throw("malloc deadlock") + } + if mp.gsignal == getg() { + throw("malloc during signal") + } + if typ != nil && typ.Pointers() { + throw("expected noscan for tiny alloc") + } +} + +func tinyStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { + const constsize = size_ + const elemsize = elemsize_ + + // Set mp.mallocing to keep from being preempted by GC. + mp := acquirem() + if doubleCheckMalloc { + doubleCheckTiny(constsize, typ, mp) + } + mp.mallocing = 1 + + // Tiny allocator. + // + // Tiny allocator combines several tiny allocation requests + // into a single memory block. The resulting memory block + // is freed when all subobjects are unreachable. The subobjects + // must be noscan (don't have pointers), this ensures that + // the amount of potentially wasted memory is bounded. + // + // Size of the memory block used for combining (maxTinySize) is tunable. + // Current setting is 16 bytes, which relates to 2x worst case memory + // wastage (when all but one subobjects are unreachable). + // 8 bytes would result in no wastage at all, but provides less + // opportunities for combining. + // 32 bytes provides more opportunities for combining, + // but can lead to 4x worst case wastage. + // The best case winning is 8x regardless of block size. + // + // Objects obtained from tiny allocator must not be freed explicitly. + // So when an object will be freed explicitly, we ensure that + // its size >= maxTinySize. + // + // SetFinalizer has a special case for objects potentially coming + // from tiny allocator, it such case it allows to set finalizers + // for an inner byte of a memory block. + // + // The main targets of tiny allocator are small strings and + // standalone escaping variables. On a json benchmark + // the allocator reduces number of allocations by ~12% and + // reduces heap size by ~20%. + c := getMCache(mp) + off := c.tinyoffset + // Align tiny pointer for required (conservative) alignment. + if constsize&7 == 0 { + off = alignUp(off, 8) + } else if goarch.PtrSize == 4 && constsize == 12 { + // Conservatively align 12-byte objects to 8 bytes on 32-bit + // systems so that objects whose first field is a 64-bit + // value is aligned to 8 bytes and does not cause a fault on + // atomic access. See issue 37262. + // TODO(mknyszek): Remove this workaround if/when issue 36606 + // is resolved. + off = alignUp(off, 8) + } else if constsize&3 == 0 { + off = alignUp(off, 4) + } else if constsize&1 == 0 { + off = alignUp(off, 2) + } + if off+constsize <= maxTinySize && c.tiny != 0 { + // The object fits into existing tiny block. + x := unsafe.Pointer(c.tiny + off) + c.tinyoffset = off + constsize + c.tinyAllocs++ + mp.mallocing = 0 + releasem(mp) + return x, 0 + } + // Allocate a new maxTinySize block. + checkGCTrigger := false + span := c.alloc[tinySpanClass] + v := nextFreeFastTiny(span) + if v == 0 { + v, span, checkGCTrigger = c.nextFree(tinySpanClass) + } + x := unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 // Always zero + (*[2]uint64)(x)[1] = 0 + // See if we need to replace the existing tiny block with the new one + // based on amount of remaining free space. + if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { + // Note: disabled when race detector is on, see comment near end of this function. + c.tiny = uintptr(x) + c.tinyoffset = constsize + } + + // Ensure that the stores above that initialize x to + // type-safe memory and set the heap bits occur before + // the caller can make x observable to the garbage + // collector. Otherwise, on weakly ordered machines, + // the garbage collector could follow a pointer to x, + // but see uninitialized memory or stale heap bits. + publicationBarrier() + + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } else { + // Track the last free index before the mark phase. This field + // is only used by the garbage collector. During the mark phase + // this is used by the conservative scanner to filter out objects + // that are both free and recently-allocated. It's safe to do that + // because we allocate-black if the GC is enabled. The conservative + // scanner produces pointers out of thin air, so without additional + // synchronization it might otherwise observe a partially-initialized + // object, which could crash the program. + span.freeIndexForScan = span.freeindex + } + + // Note cache c only valid while m acquired; see #47302 + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something. The code below just pretends it is + // internal fragmentation and matches the GC's accounting by + // using the whole allocation slot. + c.nextSample -= int64(elemsize) + if c.nextSample < 0 || MemProfileRate != c.memProfRate { + profilealloc(mp, x, elemsize) + } + mp.mallocing = 0 + releasem(mp) + + if checkGCTrigger { + if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { + gcStart(t) + } + } + + if raceenabled { + // Pad tinysize allocations so they are aligned with the end + // of the tinyalloc region. This ensures that any arithmetic + // that goes off the top end of the object will be detectable + // by checkptr (issue 38872). + // Note that we disable tinyalloc when raceenabled for this to work. + // TODO: This padding is only performed when the race detector + // is enabled. It would be nice to enable it if any package + // was compiled with checkptr, but there's no easy way to + // detect that (especially at compile time). + // TODO: enable this padding for all allocations, not just + // tinyalloc ones. It's tricky because of pointer maps. + // Maybe just all noscan objects? + x = add(x, elemsize-constsize) + } + return x, elemsize +} + +// TODO(matloob): Should we let the go compiler inline this instead of using mkmalloc? +// We won't be able to use elemsize_ but that's probably ok. +func nextFreeFastTiny(span *mspan) gclinkptr { + const nbytes = 8192 + const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / elemsize_) + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache? + result := span.freeindex + uint16(theBit) + if result < nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base()) + } + } + } + return nextFreeFastResult +} + +func nextFreeFastStub(span *mspan) gclinkptr { + var nextFreeFastResult gclinkptr + if span.allocCache != 0 { + theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache? + result := span.freeindex + uint16(theBit) + if result < span.nelems { + freeidx := result + 1 + if !(freeidx%64 == 0 && freeidx != span.nelems) { + span.allocCache >>= uint(theBit + 1) + span.freeindex = freeidx + span.allocCount++ + nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base()) + } + } + } + return nextFreeFastResult +} + +func heapSetTypeNoHeaderStub(x, dataSize uintptr, typ *_type, span *mspan) uintptr { + if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(elemsize_)) { + throw("tried to write heap bits, but no heap bits in span") + } + scanSize := writeHeapBitsSmallStub(span, x, dataSize, typ) + if doubleCheckHeapSetType { + doubleCheckHeapType(x, dataSize, typ, nil, span) + } + return scanSize +} + +// writeHeapBitsSmallStub writes the heap bits for small objects whose ptr/scalar data is +// stored as a bitmap at the end of the span. +// +// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. +// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. +// +//go:nosplit +func writeHeapBitsSmallStub(span *mspan, x, dataSize uintptr, typ *_type) uintptr { + // The objects here are always really small, so a single load is sufficient. + src0 := readUintptr(getGCMask(typ)) + + const elemsize = elemsize_ + + // Create repetitions of the bitmap if we have a small slice backing store. + scanSize := typ.PtrBytes + src := src0 + if typ.Size_ == goarch.PtrSize { + src = (1 << (dataSize / goarch.PtrSize)) - 1 + } else { + // N.B. We rely on dataSize being an exact multiple of the type size. + // The alternative is to be defensive and mask out src to the length + // of dataSize. The purpose is to save on one additional masking operation. + if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { + throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") + } + for i := typ.Size_; i < dataSize; i += typ.Size_ { + src |= src0 << (i / goarch.PtrSize) + scanSize += typ.Size_ + } + } + + // Since we're never writing more than one uintptr's worth of bits, we're either going + // to do one or two writes. + dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) + dst := unsafe.Pointer(dstBase) + o := (x - span.base()) / goarch.PtrSize + i := o / ptrBits + j := o % ptrBits + const bits uintptr = elemsize / goarch.PtrSize + // In the if statement below, we have to do two uintptr writes if the bits + // we need to write straddle across two different memory locations. But if + // the number of bits we're writing divides evenly into the number of bits + // in the uintptr we're writing, this can never happen. Since bitsIsPowerOfTwo + // is a compile-time constant in the generated code, in the case where the size is + // a power of two less than or equal to ptrBits, the compiler can remove the + // 'two writes' branch of the if statement and always do only one write without + // the check. + const bitsIsPowerOfTwo = bits&(bits-1) == 0 + if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { + // Two writes. + bits0 := ptrBits - j + bits1 := bits - bits0 + dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) + dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) + *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) + *dst1 = (*dst1)&^((1<> bits0) + } else { + // One write. + dst := (*uintptr)(add(dst, i*goarch.PtrSize)) + *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)< ptrbits we always take the other branch + } + + const doubleCheck = false + if doubleCheck { + writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) + } + return scanSize +} + +func writeHeapBitsDoubleCheck(span *mspan, x, dataSize, src, src0, i, j, bits uintptr, typ *_type) { + srcRead := span.heapBitsSmallForAddr(x) + if srcRead != src { + print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n") + print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n") + print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n") + throw("bad pointer bits written for small object") + } +} diff --git a/src/runtime/malloc_tables_generated.go b/src/runtime/malloc_tables_generated.go new file mode 100644 index 0000000000..36650881fe --- /dev/null +++ b/src/runtime/malloc_tables_generated.go @@ -0,0 +1,1038 @@ +// Code generated by mkmalloc.go; DO NOT EDIT. +//go:build !plan9 + +package runtime + +import "unsafe" + +var mallocScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{ + mallocPanic, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC1, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC2, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC3, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC4, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC5, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC6, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC7, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC8, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC9, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC10, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC11, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC12, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC13, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC14, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC15, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC16, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC17, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC18, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC19, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC20, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC21, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC22, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC23, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC24, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC25, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, + mallocgcSmallScanNoHeaderSC26, +} + +var mallocNoScanTable = [513]func(size uintptr, typ *_type, needzero bool) unsafe.Pointer{ + mallocPanic, + mallocTiny1, + mallocTiny2, + mallocTiny3, + mallocTiny4, + mallocTiny5, + mallocTiny6, + mallocTiny7, + mallocTiny8, + mallocTiny9, + mallocTiny10, + mallocTiny11, + mallocTiny12, + mallocTiny13, + mallocTiny14, + mallocTiny15, + mallocgcSmallNoScanSC2, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC3, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC4, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC5, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC6, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC7, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC8, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC9, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC10, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC11, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC12, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC13, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC14, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC15, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC16, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC17, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC18, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC19, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC20, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC21, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC22, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC23, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC24, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC25, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, + mallocgcSmallNoScanSC26, +} diff --git a/src/runtime/malloc_tables_plan9.go b/src/runtime/malloc_tables_plan9.go new file mode 100644 index 0000000000..4d2740bbb2 --- /dev/null +++ b/src/runtime/malloc_tables_plan9.go @@ -0,0 +1,14 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 + +package runtime + +import "unsafe" + +var ( + mallocScanTable []func(size uintptr, typ *_type, needzero bool) unsafe.Pointer + mallocNoScanTable []func(size uintptr, typ *_type, needzero bool) unsafe.Pointer +) diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go index 6cd525d5e9..bf58947bbc 100644 --- a/src/runtime/malloc_test.go +++ b/src/runtime/malloc_test.go @@ -452,3 +452,13 @@ func BenchmarkGoroutineIdle(b *testing.B) { close(quit) time.Sleep(10 * time.Millisecond) } + +func TestMkmalloc(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveExternalNetwork(t) // To download the golang.org/x/tools dependency. + output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput() + t.Logf("test output:\n%s", output) + if err != nil { + t.Errorf("_mkmalloc tests failed: %v", err) + } +} -- cgit v1.3-5-g9baa From f15cd63ec4860c4f2c23cc992843546e0265c332 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 23 Sep 2025 16:31:26 -0700 Subject: cmd/compile: don't rely on loop info when there are irreducible loops Loop information is sketchy when there are irreducible loops. Sometimes blocks inside 2 loops can be recorded as only being part of the outer loop. That causes tighten to move values that want to move into such a block to move out of the loop altogether, breaking the invariant that operations have to be scheduled after their args. Fixes #75569 Change-Id: Idd80e6d2268094b8ae6387563081fdc1e211856a Reviewed-on: https://go-review.googlesource.com/c/go/+/706355 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/tighten.go | 27 +++++++----- test/fixedbugs/issue75569.go | 77 +++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 12 deletions(-) create mode 100644 test/fixedbugs/issue75569.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go index 48efdb5609..b1f787e03b 100644 --- a/src/cmd/compile/internal/ssa/tighten.go +++ b/src/cmd/compile/internal/ssa/tighten.go @@ -123,18 +123,21 @@ func tighten(f *Func) { // If the target location is inside a loop, // move the target location up to just before the loop head. - for _, b := range f.Blocks { - origloop := loops.b2l[b.ID] - for _, v := range b.Values { - t := target[v.ID] - if t == nil { - continue - } - targetloop := loops.b2l[t.ID] - for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { - t = idom[targetloop.header.ID] - target[v.ID] = t - targetloop = loops.b2l[t.ID] + if !loops.hasIrreducible { + // Loop info might not be correct for irreducible loops. See issue 75569. + for _, b := range f.Blocks { + origloop := loops.b2l[b.ID] + for _, v := range b.Values { + t := target[v.ID] + if t == nil { + continue + } + targetloop := loops.b2l[t.ID] + for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { + t = idom[targetloop.header.ID] + target[v.ID] = t + targetloop = loops.b2l[t.ID] + } } } } diff --git a/test/fixedbugs/issue75569.go b/test/fixedbugs/issue75569.go new file mode 100644 index 0000000000..8420641db2 --- /dev/null +++ b/test/fixedbugs/issue75569.go @@ -0,0 +1,77 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func fff(a []int, b bool, p, q *int) { +outer: + n := a[0] + a = a[1:] + switch n { + case 1: + goto one + case 2: + goto two + case 3: + goto three + case 4: + goto four + } + +one: + goto inner +two: + goto outer +three: + goto inner +four: + goto innerSideEntry + +inner: + n = a[0] + a = a[1:] + switch n { + case 1: + goto outer + case 2: + goto inner + case 3: + goto innerSideEntry + default: + return + } +innerSideEntry: + n = a[0] + a = a[1:] + switch n { + case 1: + goto outer + case 2: + goto inner + case 3: + goto inner + } + ggg(p, q) + goto inner +} + +var b bool + +func ggg(p, q *int) { + n := *p + 5 // this +5 ends up in the entry block, well before the *p load + if b { + *q = 0 + } + *p = n +} + +func main() { + var x, y int + fff([]int{4, 4, 4}, false, &x, &y) + if x != 5 { + panic(x) + } +} -- cgit v1.3-5-g9baa From 6b32c613ca2e69449b66ed552b93562e6be70577 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 18 Jul 2025 14:19:26 -0400 Subject: go/types: make typeset return an iterator typeset(t) now returns a func equivalent to iter.Seq2[Type, Type] for the sequence over (type, underlying) pairs in the typeset of t. underIs was modified to take advantage of the underlying iteration primitive, all, which computes the desired boolean conjunction directly. Change-Id: I3e17d5970fd2908c5dca0754db3e251bf1200af2 Reviewed-on: https://go-review.googlesource.com/c/go/+/688876 Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Findley --- src/cmd/compile/internal/types2/builtins.go | 47 ++++++++++------------- src/cmd/compile/internal/types2/index.go | 12 +++--- src/cmd/compile/internal/types2/signature.go | 7 ++-- src/cmd/compile/internal/types2/typeparam.go | 10 ++--- src/cmd/compile/internal/types2/typeset.go | 16 ++++---- src/cmd/compile/internal/types2/under.go | 57 ++++++++++++---------------- src/go/types/builtins.go | 47 ++++++++++------------- src/go/types/index.go | 3 +- src/go/types/signature.go | 7 ++-- src/go/types/typeparam.go | 10 ++--- src/go/types/typeset.go | 16 ++++---- src/go/types/under.go | 57 ++++++++++++---------------- 12 files changed, 129 insertions(+), 160 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index 3de2857ed4..df207a2746 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -98,17 +98,17 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { y := args[1] hasString := false - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} hasString = true - return true + } else { + y = nil + break } - y = nil - return false - }) + } if y != nil && hasString { // setting the signature also signals that we're done sig = makeSig(x.typ, x.typ, y.typ) @@ -368,16 +368,16 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( var special bool if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { special = true - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { - return true + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} + } else { + special = false + break } - special = false - return false - }) + } } // general case @@ -980,29 +980,22 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // or a type error if x is not a slice (or a type set of slices). func sliceElem(x *operand) (Type, *typeError) { var E Type - var err *typeError - typeset(x.typ, func(_, u Type) bool { + for _, u := range typeset(x.typ) { s, _ := u.(*Slice) if s == nil { if x.isNil() { // Printing x in this case would just print "nil". // Special case this so we can emphasize "untyped". - err = typeErrorf("argument must be a slice; have untyped nil") + return nil, typeErrorf("argument must be a slice; have untyped nil") } else { - err = typeErrorf("argument must be a slice; have %s", x) + return nil, typeErrorf("argument must be a slice; have %s", x) } - return false } if E == nil { E = s.elem } else if !Identical(E, s.elem) { - err = typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) - return false + return nil, typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) } - return true - }) - if err != nil { - return nil, err } return E, nil } diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go index 80e8514168..7e16a87332 100644 --- a/src/cmd/compile/internal/types2/index.go +++ b/src/cmd/compile/internal/types2/index.go @@ -216,11 +216,11 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { // determine common underlying type cu var ct, cu Type // type and respective common underlying type var hasString bool - typeset(x.typ, func(t, u Type) bool { + for t, u := range typeset(x.typ) { if u == nil { check.errorf(x, NonSliceableOperand, "cannot slice %s: no specific type in %s", x, x.typ) cu = nil - return false + break } // Treat strings like byte slices but remember that we saw a string. @@ -232,18 +232,16 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { // If this is the first type we're seeing, we're done. if cu == nil { ct, cu = t, u - return true + continue } // Otherwise, the current type must have the same underlying type as all previous types. if !Identical(cu, u) { check.errorf(x, NonSliceableOperand, "cannot slice %s: %s and %s have different underlying types", x, ct, t) cu = nil - return false + break } - - return true - }) + } if hasString { // If we saw a string, proceed with string type, // but don't go from untyped string to string. diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go index eaecb77af5..ea1cfd88cc 100644 --- a/src/cmd/compile/internal/types2/signature.go +++ b/src/cmd/compile/internal/types2/signature.go @@ -49,7 +49,7 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params } last := params.At(n - 1).typ var S *Slice - typeset(last, func(t, _ Type) bool { + for t := range typeset(last) { var s *Slice if isString(t) { s = NewSlice(universeByte) @@ -60,10 +60,9 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params S = s } else if !Identical(S, s) { S = nil - return false + break } - return true - }) + } if S == nil { panic(fmt.Sprintf("got %s, want variadic parameter of unnamed slice or string type", last)) } diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go index a04f928908..c60b5eb417 100644 --- a/src/cmd/compile/internal/types2/typeparam.go +++ b/src/cmd/compile/internal/types2/typeparam.go @@ -155,10 +155,10 @@ func (t *TypeParam) is(f func(*term) bool) bool { return t.iface().typeSet().is(f) } -// typeset is an iterator over the (type/underlying type) pairs of the +// typeset reports whether f(t, y) is true for all (type/underlying type) pairs of the // specific type terms of t's constraint. -// If there are no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (t *TypeParam) typeset(yield func(t, u Type) bool) { - t.iface().typeSet().typeset(yield) +// If there are no specific terms, typeset returns f(nil, nil). +// In any case, typeset is guaranteed to call f at least once. +func (t *TypeParam) typeset(f func(t, u Type) bool) bool { + return t.iface().typeSet().all(f) } diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index 74436952f2..ce487e74f7 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -104,13 +104,12 @@ func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll // subsetOf reports whether s1 ⊆ s2. func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) } -// typeset is an iterator over the (type/underlying type) pairs in s. -// If s has no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (s *_TypeSet) typeset(yield func(t, u Type) bool) { +// all reports whether f(t, u) is true for each (type/underlying type) pairs in s. +// If s has no specific terms, all calls f(nil, nil). +// In any case, all is guaranteed to call f at least once. +func (s *_TypeSet) all(f func(t, u Type) bool) bool { if !s.hasTerms() { - yield(nil, nil) - return + return f(nil, nil) } for _, t := range s.terms { @@ -123,10 +122,11 @@ func (s *_TypeSet) typeset(yield func(t, u Type) bool) { if debug { assert(Identical(u, under(u))) } - if !yield(t.typ, u) { - break + if !f(t.typ, u) { + return false } } + return true } // is calls f with the specific type terms of s and reports whether diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go index 9e5334b724..078ba9ab17 100644 --- a/src/cmd/compile/internal/types2/under.go +++ b/src/cmd/compile/internal/types2/under.go @@ -4,6 +4,8 @@ package types2 +import "iter" + // under returns the true expanded underlying type. // If it doesn't exist, the result is Typ[Invalid]. // under must only be called when a type is known @@ -18,12 +20,18 @@ func under(t Type) Type { // If typ is a type parameter, underIs returns the result of typ.underIs(f). // Otherwise, underIs returns the result of f(under(typ)). func underIs(typ Type, f func(Type) bool) bool { - var ok bool - typeset(typ, func(_, u Type) bool { - ok = f(u) - return ok + return all(typ, func(_, u Type) bool { + return f(u) }) - return ok +} + +// all reports whether f(t, u) is true for all (type/underlying type) +// pairs in the typeset of t. See [typeset] for details of sequence. +func all(t Type, f func(t, u Type) bool) bool { + if p, _ := Unalias(t).(*TypeParam); p != nil { + return p.typeset(f) + } + return f(t, under(t)) } // typeset is an iterator over the (type/underlying type) pairs of the @@ -32,12 +40,10 @@ func underIs(typ Type, f func(Type) bool) bool { // In that case, if there are no specific terms, typeset calls yield with (nil, nil). // If t is not a type parameter, the implied type set consists of just t. // In any case, typeset is guaranteed to call yield at least once. -func typeset(t Type, yield func(t, u Type) bool) { - if p, _ := Unalias(t).(*TypeParam); p != nil { - p.typeset(yield) - return +func typeset(t Type) iter.Seq2[Type, Type] { + return func(yield func(t, u Type) bool) { + _ = all(t, yield) } - yield(t, under(t)) } // A typeError describes a type error. @@ -80,35 +86,28 @@ func (err *typeError) format(check *Checker) string { // with the single type t in its type set. func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { var ct, cu Type // type and respective common underlying type - var err *typeError - - bad := func(format string, args ...any) bool { - err = typeErrorf(format, args...) - return false - } - - typeset(t, func(t, u Type) bool { + for t, u := range typeset(t) { if cond != nil { - if err = cond(t, u); err != nil { - return false + if err := cond(t, u); err != nil { + return nil, err } } if u == nil { - return bad("no specific type") + return nil, typeErrorf("no specific type") } // If this is the first type we're seeing, we're done. if cu == nil { ct, cu = t, u - return true + continue } // If we've seen a channel before, and we have a channel now, they must be compatible. if chu, _ := cu.(*Chan); chu != nil { if ch, _ := u.(*Chan); ch != nil { if !Identical(chu.elem, ch.elem) { - return bad("channels %s and %s have different element types", ct, t) + return nil, typeErrorf("channels %s and %s have different element types", ct, t) } // If we have different channel directions, keep the restricted one // and complain if they conflict. @@ -118,22 +117,16 @@ func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { case chu.dir == SendRecv: ct, cu = t, u // switch to restricted channel case ch.dir != SendRecv: - return bad("channels %s and %s have conflicting directions", ct, t) + return nil, typeErrorf("channels %s and %s have conflicting directions", ct, t) } - return true + continue } } // Otherwise, the current type must have the same underlying type as all previous types. if !Identical(cu, u) { - return bad("%s and %s have different underlying types", ct, t) + return nil, typeErrorf("%s and %s have different underlying types", ct, t) } - - return true - }) - - if err != nil { - return nil, err } return cu, nil } diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index 1163321ecd..9b03a40cbc 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -101,17 +101,17 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { y := args[1] hasString := false - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} hasString = true - return true + } else { + y = nil + break } - y = nil - return false - }) + } if y != nil && hasString { // setting the signature also signals that we're done sig = makeSig(x.typ, x.typ, y.typ) @@ -371,16 +371,16 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b var special bool if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { special = true - typeset(y.typ, func(_, u Type) bool { + for _, u := range typeset(y.typ) { if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) { - return true - } - if isString(u) { - return true + // typeset ⊇ {[]byte} + } else if isString(u) { + // typeset ⊇ {string} + } else { + special = false + break } - special = false - return false - }) + } } // general case @@ -983,29 +983,22 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // or a type error if x is not a slice (or a type set of slices). func sliceElem(x *operand) (Type, *typeError) { var E Type - var err *typeError - typeset(x.typ, func(_, u Type) bool { + for _, u := range typeset(x.typ) { s, _ := u.(*Slice) if s == nil { if x.isNil() { // Printing x in this case would just print "nil". // Special case this so we can emphasize "untyped". - err = typeErrorf("argument must be a slice; have untyped nil") + return nil, typeErrorf("argument must be a slice; have untyped nil") } else { - err = typeErrorf("argument must be a slice; have %s", x) + return nil, typeErrorf("argument must be a slice; have %s", x) } - return false } if E == nil { E = s.elem } else if !Identical(E, s.elem) { - err = typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) - return false + return nil, typeErrorf("mismatched slice element types %s and %s in %s", E, s.elem, x) } - return true - }) - if err != nil { - return nil, err } return E, nil } diff --git a/src/go/types/index.go b/src/go/types/index.go index 58c8893a8d..1d4f36dcf3 100644 --- a/src/go/types/index.go +++ b/src/go/types/index.go @@ -218,7 +218,8 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) { // determine common underlying type cu var ct, cu Type // type and respective common underlying type var hasString bool - typeset(x.typ, func(t, u Type) bool { + // TODO(adonovan): use go1.23 "range typeset()". + typeset(x.typ)(func(t, u Type) bool { if u == nil { check.errorf(x, NonSliceableOperand, "cannot slice %s: no specific type in %s", x, x.typ) cu = nil diff --git a/src/go/types/signature.go b/src/go/types/signature.go index f059ecb183..fa41c797b2 100644 --- a/src/go/types/signature.go +++ b/src/go/types/signature.go @@ -62,7 +62,7 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params } last := params.At(n - 1).typ var S *Slice - typeset(last, func(t, _ Type) bool { + for t := range typeset(last) { var s *Slice if isString(t) { s = NewSlice(universeByte) @@ -73,10 +73,9 @@ func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params S = s } else if !Identical(S, s) { S = nil - return false + break } - return true - }) + } if S == nil { panic(fmt.Sprintf("got %s, want variadic parameter of unnamed slice or string type", last)) } diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go index cdcd552739..2ffef8f613 100644 --- a/src/go/types/typeparam.go +++ b/src/go/types/typeparam.go @@ -158,10 +158,10 @@ func (t *TypeParam) is(f func(*term) bool) bool { return t.iface().typeSet().is(f) } -// typeset is an iterator over the (type/underlying type) pairs of the +// typeset reports whether f(t, y) is true for all (type/underlying type) pairs of the // specific type terms of t's constraint. -// If there are no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (t *TypeParam) typeset(yield func(t, u Type) bool) { - t.iface().typeSet().typeset(yield) +// If there are no specific terms, typeset returns f(nil, nil). +// In any case, typeset is guaranteed to call f at least once. +func (t *TypeParam) typeset(f func(t, u Type) bool) bool { + return t.iface().typeSet().all(f) } diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index dd384e8504..46ed5ce180 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -107,13 +107,12 @@ func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll // subsetOf reports whether s1 ⊆ s2. func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) } -// typeset is an iterator over the (type/underlying type) pairs in s. -// If s has no specific terms, typeset calls yield with (nil, nil). -// In any case, typeset is guaranteed to call yield at least once. -func (s *_TypeSet) typeset(yield func(t, u Type) bool) { +// all reports whether f(t, u) is true for each (type/underlying type) pairs in s. +// If s has no specific terms, all calls f(nil, nil). +// In any case, all is guaranteed to call f at least once. +func (s *_TypeSet) all(f func(t, u Type) bool) bool { if !s.hasTerms() { - yield(nil, nil) - return + return f(nil, nil) } for _, t := range s.terms { @@ -126,10 +125,11 @@ func (s *_TypeSet) typeset(yield func(t, u Type) bool) { if debug { assert(Identical(u, under(u))) } - if !yield(t.typ, u) { - break + if !f(t.typ, u) { + return false } } + return true } // is calls f with the specific type terms of s and reports whether diff --git a/src/go/types/under.go b/src/go/types/under.go index 2c09c49134..43bf0ad07c 100644 --- a/src/go/types/under.go +++ b/src/go/types/under.go @@ -7,6 +7,8 @@ package types +import "iter" + // under returns the true expanded underlying type. // If it doesn't exist, the result is Typ[Invalid]. // under must only be called when a type is known @@ -21,12 +23,18 @@ func under(t Type) Type { // If typ is a type parameter, underIs returns the result of typ.underIs(f). // Otherwise, underIs returns the result of f(under(typ)). func underIs(typ Type, f func(Type) bool) bool { - var ok bool - typeset(typ, func(_, u Type) bool { - ok = f(u) - return ok + return all(typ, func(_, u Type) bool { + return f(u) }) - return ok +} + +// all reports whether f(t, u) is true for all (type/underlying type) +// pairs in the typeset of t. See [typeset] for details of sequence. +func all(t Type, f func(t, u Type) bool) bool { + if p, _ := Unalias(t).(*TypeParam); p != nil { + return p.typeset(f) + } + return f(t, under(t)) } // typeset is an iterator over the (type/underlying type) pairs of the @@ -35,12 +43,10 @@ func underIs(typ Type, f func(Type) bool) bool { // In that case, if there are no specific terms, typeset calls yield with (nil, nil). // If t is not a type parameter, the implied type set consists of just t. // In any case, typeset is guaranteed to call yield at least once. -func typeset(t Type, yield func(t, u Type) bool) { - if p, _ := Unalias(t).(*TypeParam); p != nil { - p.typeset(yield) - return +func typeset(t Type) iter.Seq2[Type, Type] { + return func(yield func(t, u Type) bool) { + _ = all(t, yield) } - yield(t, under(t)) } // A typeError describes a type error. @@ -83,35 +89,28 @@ func (err *typeError) format(check *Checker) string { // with the single type t in its type set. func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { var ct, cu Type // type and respective common underlying type - var err *typeError - - bad := func(format string, args ...any) bool { - err = typeErrorf(format, args...) - return false - } - - typeset(t, func(t, u Type) bool { + for t, u := range typeset(t) { if cond != nil { - if err = cond(t, u); err != nil { - return false + if err := cond(t, u); err != nil { + return nil, err } } if u == nil { - return bad("no specific type") + return nil, typeErrorf("no specific type") } // If this is the first type we're seeing, we're done. if cu == nil { ct, cu = t, u - return true + continue } // If we've seen a channel before, and we have a channel now, they must be compatible. if chu, _ := cu.(*Chan); chu != nil { if ch, _ := u.(*Chan); ch != nil { if !Identical(chu.elem, ch.elem) { - return bad("channels %s and %s have different element types", ct, t) + return nil, typeErrorf("channels %s and %s have different element types", ct, t) } // If we have different channel directions, keep the restricted one // and complain if they conflict. @@ -121,22 +120,16 @@ func commonUnder(t Type, cond func(t, u Type) *typeError) (Type, *typeError) { case chu.dir == SendRecv: ct, cu = t, u // switch to restricted channel case ch.dir != SendRecv: - return bad("channels %s and %s have conflicting directions", ct, t) + return nil, typeErrorf("channels %s and %s have conflicting directions", ct, t) } - return true + continue } } // Otherwise, the current type must have the same underlying type as all previous types. if !Identical(cu, u) { - return bad("%s and %s have different underlying types", ct, t) + return nil, typeErrorf("%s and %s have different underlying types", ct, t) } - - return true - }) - - if err != nil { - return nil, err } return cu, nil } -- cgit v1.3-5-g9baa