aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorPouriya <pouriya.jahanbakhsh@gmail.com>2024-02-27 21:51:31 +0000
committerGopher Robot <gobot@golang.org>2024-03-04 17:34:30 +0000
commit4c08c125936b4ae3daff04cecf5309dd5dd1e2c5 (patch)
treefac8b4deb533caf5caaf89b0c12ab777d2c07f39 /src/runtime
parent566e08fc649cb8c65d2a9f0f362ce76d79e0fc4d (diff)
downloadgo-4c08c125936b4ae3daff04cecf5309dd5dd1e2c5.tar.xz
runtime: use .Pointers() instead of manual checking
Change-Id: Ib78c1513616089f4942297cd17212b1b11871fd5 GitHub-Last-Rev: f97fe5b5bffffe25dc31de7964588640cb70ec41 GitHub-Pull-Request: golang/go#65819 Reviewed-on: https://go-review.googlesource.com/c/go/+/565515 Reviewed-by: Jorropo <jorropo.pgm@gmail.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Keith Randall <khr@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/arena.go4
-rw-r--r--src/runtime/cgocall.go6
-rw-r--r--src/runtime/cgocheck.go6
-rw-r--r--src/runtime/chan.go2
-rw-r--r--src/runtime/checkptr.go2
-rw-r--r--src/runtime/export_test.go2
-rw-r--r--src/runtime/heapdump.go2
-rw-r--r--src/runtime/malloc.go4
-rw-r--r--src/runtime/map.go14
-rw-r--r--src/runtime/map_fast32.go10
-rw-r--r--src/runtime/map_fast64.go10
-rw-r--r--src/runtime/map_faststr.go6
-rw-r--r--src/runtime/mbarrier.go12
-rw-r--r--src/runtime/mfinal.go2
-rw-r--r--src/runtime/slice.go8
15 files changed, 45 insertions, 45 deletions
diff --git a/src/runtime/arena.go b/src/runtime/arena.go
index e1fae834d7..3fdd4cbdd6 100644
--- a/src/runtime/arena.go
+++ b/src/runtime/arena.go
@@ -482,7 +482,7 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
mp.mallocing = 1
var ptr unsafe.Pointer
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
// Allocate pointer-less objects from the tail end of the chunk.
v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_)
if ok {
@@ -504,7 +504,7 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
throw("arena chunk needs zeroing, but should already be zeroed")
}
// Set up heap bitmap and do extra accounting.
- if typ.PtrBytes != 0 {
+ if typ.Pointers() {
if cap >= 0 {
userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
} else {
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index f2dd98702d..05fa47158a 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -541,7 +541,7 @@ const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned G
// level, where Go pointers are allowed. Go pointers to pinned objects are
// allowed as long as they don't reference other unpinned pointers.
func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
- if t.PtrBytes == 0 || p == nil {
+ if !t.Pointers() || p == nil {
// If the type has no pointers there is nothing to do.
return
}
@@ -604,7 +604,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if !top && !isPinned(p) {
panic(errorString(msg))
}
- if st.Elem.PtrBytes == 0 {
+ if !st.Elem.Pointers() {
return
}
for i := 0; i < s.cap; i++ {
@@ -629,7 +629,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
return
}
for _, f := range st.Fields {
- if f.Typ.PtrBytes == 0 {
+ if !f.Typ.Pointers() {
continue
}
cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg)
diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go
index 3d6de4f855..fd87723dfc 100644
--- a/src/runtime/cgocheck.go
+++ b/src/runtime/cgocheck.go
@@ -90,7 +90,7 @@ func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) {
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
return
}
if !cgoIsGoPointer(src) {
@@ -111,7 +111,7 @@ func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
return
}
if !cgoIsGoPointer(src) {
@@ -247,7 +247,7 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
return
}
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index c48b85f576..c793d6cef3 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -96,7 +96,7 @@ func makechan(t *chantype, size int) *hchan {
c = (*hchan)(mallocgc(hchanSize, nil, true))
// Race detector uses this location for synchronization.
c.buf = c.raceaddr()
- case elem.PtrBytes == 0:
+ case !elem.Pointers():
// Elements do not contain pointers.
// Allocate hchan and buf in one call.
c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
diff --git a/src/runtime/checkptr.go b/src/runtime/checkptr.go
index 3c49645a44..810787bff5 100644
--- a/src/runtime/checkptr.go
+++ b/src/runtime/checkptr.go
@@ -16,7 +16,7 @@ func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
// Note that we allow unaligned pointers if the types they point to contain
// no pointers themselves. See issue 37298.
// TODO(mdempsky): What about fieldAlign?
- if elem.PtrBytes != 0 && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
+ if elem.Pointers() && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
throw("checkptr: misaligned pointer conversion")
}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 4588240f9e..9b84e96e50 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -351,7 +351,7 @@ func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type)
// Round up the size to the size class to make the benchmark a little more
// realistic. However, validate it, to make sure this is safe.
- allocSize := roundupsize(size, t.PtrBytes == 0)
+ allocSize := roundupsize(size, !t.Pointers())
if s.npages*pageSize < allocSize {
panic("backing span not large enough for benchmark")
}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index cca6172960..8bae8c0636 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -206,7 +206,7 @@ func dumptype(t *_type) {
dwritebyte('.')
dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
}
- dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0)
+ dumpbool(t.Kind_&kindDirectIface == 0 || t.Pointers())
}
// dump an object.
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index e2cb2e456e..271e4c43db 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1043,7 +1043,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var span *mspan
var header **_type
var x unsafe.Pointer
- noscan := typ == nil || typ.PtrBytes == 0
+ noscan := typ == nil || !typ.Pointers()
// In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; delayedZeroing tracks that state.
delayedZeroing := false
@@ -1188,7 +1188,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
- if typ.PtrBytes != 0 {
+ if typ.Pointers() {
scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
} else {
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 98bd792d2a..bb3ac39e94 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -256,7 +256,7 @@ func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
ovf = (*bmap)(newobject(t.Bucket))
}
h.incrnoverflow()
- if t.Bucket.PtrBytes == 0 {
+ if !t.Bucket.Pointers() {
h.createOverflow()
*h.extra.overflow = append(*h.extra.overflow, ovf)
}
@@ -346,7 +346,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// used with this value of b.
nbuckets += bucketShift(b - 4)
sz := t.Bucket.Size_ * nbuckets
- up := roundupsize(sz, t.Bucket.PtrBytes == 0)
+ up := roundupsize(sz, !t.Bucket.Pointers())
if up != sz {
nbuckets = up / t.Bucket.Size_
}
@@ -360,7 +360,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// but may not be empty.
buckets = dirtyalloc
size := t.Bucket.Size_ * nbuckets
- if t.Bucket.PtrBytes != 0 {
+ if t.Bucket.Pointers() {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
@@ -741,13 +741,13 @@ search:
// Only clear key if there are pointers in it.
if t.IndirectKey() {
*(*unsafe.Pointer)(k) = nil
- } else if t.Key.PtrBytes != 0 {
+ } else if t.Key.Pointers() {
memclrHasPointers(k, t.Key.Size_)
}
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
*(*unsafe.Pointer)(e) = nil
- } else if t.Elem.PtrBytes != 0 {
+ } else if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
@@ -824,7 +824,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
- if t.Bucket.PtrBytes == 0 {
+ if !t.Bucket.Pointers() {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
@@ -1252,7 +1252,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index 3290321782..01a81439e3 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -302,13 +302,13 @@ search:
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
- if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
+ if goarch.PtrSize == 4 && t.Key.Pointers() {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
}
- e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
- if t.Elem.PtrBytes != 0 {
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
+ if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
@@ -428,7 +428,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
@@ -446,7 +446,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index 48dea02e39..f47bc96f70 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -300,7 +300,7 @@ search:
continue
}
// Only clear key if there are pointers in it.
- if t.Key.PtrBytes != 0 {
+ if t.Key.Pointers() {
if goarch.PtrSize == 8 {
*(*unsafe.Pointer)(k) = nil
} else {
@@ -309,8 +309,8 @@ search:
memclrHasPointers(k, 8)
}
}
- e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
- if t.Elem.PtrBytes != 0 {
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
+ if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
@@ -430,7 +430,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if t.Key.PtrBytes != 0 && writeBarrier.enabled {
+ if t.Key.Pointers() && writeBarrier.enabled {
if goarch.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
@@ -454,7 +454,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 19636e777f..a9898ba1ca 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -335,8 +335,8 @@ search:
}
// Clear key's pointer.
k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
- if t.Elem.PtrBytes != 0 {
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+ if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
@@ -469,7 +469,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index c4b6c2a789..dc6922da54 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -153,7 +153,7 @@ func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
if dst == src {
return
}
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// This always copies a full value of type typ so it's safe
// to pass typ along as an optimization. See the comment on
// bulkBarrierPreWrite.
@@ -232,7 +232,7 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
- if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
+ if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
// Pass nil for the type. dst does not point to value of type typ,
// but rather points into one, so applying the optimization is not
// safe. See the comment on this function.
@@ -305,7 +305,7 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
- if elemType.PtrBytes == 0 {
+ if !elemType.Pointers() {
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
}
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
@@ -323,7 +323,7 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// This always clears a whole value of type typ, so it's
// safe to pass a type here and apply the optimization.
// See the comment on bulkBarrierPreWrite.
@@ -339,7 +339,7 @@ func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// Pass nil for the type. ptr does not point to value of type typ,
// but rather points into one so it's not safe to apply the optimization.
// See the comment on this function in the reflect package and the
@@ -352,7 +352,7 @@ func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintpt
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
size := typ.Size_ * uintptr(len)
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// This always clears whole elements of an array, so it's
// safe to pass a type here. See the comment on bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(ptr), 0, size, typ)
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index 7d9d547c0f..ea3d8a4579 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -449,7 +449,7 @@ func SetFinalizer(obj any, finalizer any) {
if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
- if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize {
+ if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize {
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 8c1023c1e8..4fbe056b78 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -53,7 +53,7 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
}
var to unsafe.Pointer
- if et.PtrBytes == 0 {
+ if !et.Pointers() {
to = mallocgc(tomem, nil, false)
if copymem < tomem {
memclrNoHeapPointers(add(to, copymem), tomem-copymem)
@@ -183,7 +183,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
// For 1 we don't need any division/multiplication.
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
// For powers of 2, use a variable shift.
- noscan := et.PtrBytes == 0
+ noscan := !et.Pointers()
switch {
case et.Size_ == 1:
lenmem = uintptr(oldLen)
@@ -238,7 +238,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
}
var p unsafe.Pointer
- if et.PtrBytes == 0 {
+ if !et.Pointers() {
p = mallocgc(capmem, nil, false)
// The append() that calls growslice is going to overwrite from oldLen to newLen.
// Only clear the part that will not be overwritten.
@@ -308,7 +308,7 @@ func reflect_growslice(et *_type, old slice, num int) slice {
// the memory will be overwritten by an append() that called growslice.
// Since the caller of reflect_growslice is not append(),
// zero out this region before returning the slice to the reflect package.
- if et.PtrBytes == 0 {
+ if !et.Pointers() {
oldcapmem := uintptr(old.cap) * et.Size_
newlenmem := uintptr(new.len) * et.Size_
memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)