aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mbitmap.go
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2022-10-20 14:20:41 -0700
committerKeith Randall <khr@golang.org>2023-02-16 00:16:24 +0000
commit55044288ad22f0c46ac55375ed9ef3de1babb77c (patch)
treebbb4c7a8682272d27e21b877d14bed24363e84b6 /src/runtime/mbitmap.go
parent44d22e75dd9a0cbffbb04c9ce6d6bf9030634cc1 (diff)
downloadgo-55044288ad22f0c46ac55375ed9ef3de1babb77c.tar.xz
runtime: reimplement GODEBUG=cgocheck=2 as a GOEXPERIMENT
Move this knob from a binary-startup thing to a build-time thing. This will enable followon optmizations to the write barrier. Change-Id: Ic3323348621c76a7dc390c09ff55016b19c43018 Reviewed-on: https://go-review.googlesource.com/c/go/+/447778 Reviewed-by: Michael Knyszek <mknyszek@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime/mbitmap.go')
-rw-r--r--src/runtime/mbitmap.go16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 088b566729..a3a8b2e70a 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -528,7 +528,7 @@ func (h heapBits) nextFast() (heapBits, uintptr) {
// make sure the underlying allocation contains pointers, usually
// by checking typ.ptrdata.
//
-// Callers must perform cgo checks if writeBarrier.cgo.
+// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
@@ -574,7 +574,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
}
dstx := (*uintptr)(unsafe.Pointer(addr))
if !buf.putFast(*dstx, 0) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
} else {
@@ -586,7 +586,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
dstx := (*uintptr)(unsafe.Pointer(addr))
srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
@@ -618,7 +618,7 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
}
srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
if !buf.putFast(0, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
@@ -651,12 +651,12 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if src == 0 {
if !buf.putFast(*dstx, 0) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
} else {
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
@@ -678,7 +678,7 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
// Must not be preempted because it typically runs right before memmove,
// and the GC must observe them as an atomic action.
//
-// Callers must perform cgo checks if writeBarrier.cgo.
+// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
@@ -710,7 +710,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}