aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorDavid Chase <drchase@google.com>2023-01-20 16:41:57 -0500
committerDavid Chase <drchase@google.com>2023-05-05 14:59:28 +0000
commitbdc6ae579aa86d21183c612c8c37916f397afaa8 (patch)
treecc63ff843ab5a7f3c196c8fa2c74924bfee61d93 /src/runtime/malloc.go
parentdace96b9a12905b34af609eedaa6b43e30e7cdb1 (diff)
downloadgo-bdc6ae579aa86d21183c612c8c37916f397afaa8.tar.xz
internal/abi: refactor (basic) type struct into one definition
This touches a lot of files, which is bad, but it is also good, since there's N copies of this information commoned into 1. The new files in internal/abi are copied from the end of the stack; ultimately this will all end up being used. Change-Id: Ia252c0055aaa72ca569411ef9f9e96e3d610889e Reviewed-on: https://go-review.googlesource.com/c/go/+/462995 TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go20
1 files changed, 10 insertions, 10 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index b53e10a435..c3a196e496 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1019,7 +1019,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
var span *mspan
var x unsafe.Pointer
- noscan := typ == nil || typ.ptrdata == 0
+ noscan := typ == nil || typ.PtrBytes == 0
// In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; delayedZeroing tracks that state.
delayedZeroing := false
@@ -1142,15 +1142,15 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if !noscan {
var scanSize uintptr
heapBitsSetType(uintptr(x), size, dataSize, typ)
- if dataSize > typ.size {
+ if dataSize > typ.Size_ {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
- if typ.ptrdata != 0 {
- scanSize = dataSize - typ.size + typ.ptrdata
+ if typ.PtrBytes != 0 {
+ scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
} else {
- scanSize = typ.ptrdata
+ scanSize = typ.PtrBytes
}
c.scanAlloc += scanSize
}
@@ -1321,25 +1321,25 @@ func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
// compiler (both frontend and SSA backend) knows the signature
// of this function.
func newobject(typ *_type) unsafe.Pointer {
- return mallocgc(typ.size, typ, true)
+ return mallocgc(typ.Size_, typ, true)
}
//go:linkname reflect_unsafe_New reflect.unsafe_New
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
- return mallocgc(typ.size, typ, true)
+ return mallocgc(typ.Size_, typ, true)
}
//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
- return mallocgc(typ.size, typ, true)
+ return mallocgc(typ.Size_, typ, true)
}
// newarray allocates an array of n elements of type typ.
func newarray(typ *_type, n int) unsafe.Pointer {
if n == 1 {
- return mallocgc(typ.size, typ, true)
+ return mallocgc(typ.Size_, typ, true)
}
- mem, overflow := math.MulUintptr(typ.size, uintptr(n))
+ mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
if overflow || mem > maxAlloc || n < 0 {
panic(plainError("runtime: allocation size out of range"))
}