aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2022-10-20 14:20:41 -0700
committerKeith Randall <khr@golang.org>2023-02-16 00:16:24 +0000
commit55044288ad22f0c46ac55375ed9ef3de1babb77c (patch)
treebbb4c7a8682272d27e21b877d14bed24363e84b6 /src/runtime
parent44d22e75dd9a0cbffbb04c9ce6d6bf9030634cc1 (diff)
downloadgo-55044288ad22f0c46ac55375ed9ef3de1babb77c.tar.xz
runtime: reimplement GODEBUG=cgocheck=2 as a GOEXPERIMENT
Move this knob from a binary-startup thing to a build-time thing. This will enable followon optmizations to the write barrier. Change-Id: Ic3323348621c76a7dc390c09ff55016b19c43018 Reviewed-on: https://go-review.googlesource.com/c/go/+/447778 Reviewed-by: Michael Knyszek <mknyszek@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/atomic_pointer.go18
-rw-r--r--src/runtime/cgocall.go5
-rw-r--r--src/runtime/cgocheck.go34
-rw-r--r--src/runtime/extern.go6
-rw-r--r--src/runtime/mbarrier.go11
-rw-r--r--src/runtime/mbitmap.go16
-rw-r--r--src/runtime/mgc.go5
-rw-r--r--src/runtime/mgcmark.go4
-rw-r--r--src/runtime/mwbbuf.go29
-rw-r--r--src/runtime/proc.go11
-rw-r--r--src/runtime/runtime1.go4
11 files changed, 74 insertions, 69 deletions
diff --git a/src/runtime/atomic_pointer.go b/src/runtime/atomic_pointer.go
index 25e0e651b4..26dfbfc2cc 100644
--- a/src/runtime/atomic_pointer.go
+++ b/src/runtime/atomic_pointer.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/goexperiment"
"runtime/internal/atomic"
"unsafe"
)
@@ -21,7 +22,7 @@ import (
func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
slot := (*uintptr)(unsafe.Pointer(ptr))
if !getg().m.p.ptr().wbBuf.putFast(*slot, uintptr(new)) {
- wbBufFlush(slot, uintptr(new))
+ wbBufFlush()
}
}
@@ -32,6 +33,9 @@ func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
if writeBarrier.enabled {
atomicwb((*unsafe.Pointer)(ptr), new)
}
+ if goexperiment.CgoCheck2 {
+ cgoCheckPtrWrite((*unsafe.Pointer)(ptr), new)
+ }
atomic.StorepNoWB(noescape(ptr), new)
}
@@ -53,6 +57,9 @@ func atomic_casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
if writeBarrier.enabled {
atomicwb(ptr, new)
}
+ if goexperiment.CgoCheck2 {
+ cgoCheckPtrWrite(ptr, new)
+ }
return atomic.Casp1(ptr, old, new)
}
@@ -69,6 +76,9 @@ func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
if writeBarrier.enabled {
atomicwb(ptr, new)
}
+ if goexperiment.CgoCheck2 {
+ cgoCheckPtrWrite(ptr, new)
+ }
sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
}
@@ -81,6 +91,9 @@ func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Poi
if writeBarrier.enabled {
atomicwb(ptr, new)
}
+ if goexperiment.CgoCheck2 {
+ cgoCheckPtrWrite(ptr, new)
+ }
old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new)))
return old
}
@@ -94,5 +107,8 @@ func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Poin
if writeBarrier.enabled {
atomicwb(ptr, new)
}
+ if goexperiment.CgoCheck2 {
+ cgoCheckPtrWrite(ptr, new)
+ }
return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new))
}
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 9c75280d62..f9d79eca4b 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -86,6 +86,7 @@ package runtime
import (
"internal/goarch"
+ "internal/goexperiment"
"runtime/internal/sys"
"unsafe"
)
@@ -391,7 +392,7 @@ var racecgosync uint64 // represents possible synchronization in C code
// cgoCheckPointer checks if the argument contains a Go pointer that
// points to a Go pointer, and panics if it does.
func cgoCheckPointer(ptr any, arg any) {
- if debug.cgocheck == 0 {
+ if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
return
}
@@ -631,7 +632,7 @@ func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
// exported Go function. It panics if the result is or contains a Go
// pointer.
func cgoCheckResult(val any) {
- if debug.cgocheck == 0 {
+ if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
return
}
diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go
index 84e7516758..af75b5c0b4 100644
--- a/src/runtime/cgocheck.go
+++ b/src/runtime/cgocheck.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Code to check that pointer writes follow the cgo rules.
-// These functions are invoked via the write barrier when debug.cgocheck > 1.
+// These functions are invoked when GOEXPERIMENT=cgocheck2 is enabled.
package runtime
@@ -14,16 +14,21 @@ import (
const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
-// cgoCheckWriteBarrier is called whenever a pointer is stored into memory.
+// cgoCheckPtrWrite is called whenever a pointer is stored into memory.
// It throws if the program is storing a Go pointer into non-Go memory.
//
-// This is called from the write barrier, so its entire call tree must
-// be nosplit.
+// This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled.
//
//go:nosplit
//go:nowritebarrier
-func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
- if !cgoIsGoPointer(unsafe.Pointer(src)) {
+func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer) {
+ if !mainStarted {
+ // Something early in startup hates this function.
+ // Don't start doing any actual checking until the
+ // runtime has set itself up.
+ return
+ }
+ if !cgoIsGoPointer(src) {
return
}
if cgoIsGoPointer(unsafe.Pointer(dst)) {
@@ -51,20 +56,31 @@ func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
}
systemstack(func() {
- println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
+ println("write of Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
throw(cgoWriteBarrierFail)
})
}
// cgoCheckMemmove is called when moving a block of memory.
+// It throws if the program is copying a block that contains a Go pointer
+// into non-Go memory.
+//
+// This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled.
+//
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) {
+ cgoCheckMemmove2(typ, dst, src, 0, typ.size)
+}
+
+// cgoCheckMemmove2 is called when moving a block of memory.
// dst and src point off bytes into the value to copy.
// size is the number of bytes to copy.
// It throws if the program is copying a block that contains a Go pointer
// into non-Go memory.
-//
//go:nosplit
//go:nowritebarrier
-func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
+func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if typ.ptrdata == 0 {
return
}
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index 6c41c62694..55dfbff7c4 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -51,9 +51,9 @@ It is a comma-separated list of name=val pairs setting these named variables:
cgocheck: setting cgocheck=0 disables all checks for packages
using cgo to incorrectly pass Go pointers to non-Go code.
Setting cgocheck=1 (the default) enables relatively cheap
- checks that may miss some errors. Setting cgocheck=2 enables
- expensive checks that should not miss any errors, but will
- cause your program to run slower.
+ checks that may miss some errors. A more complete, but slow,
+ cgocheck mode can be enabled using GOEXPERIMENT (which
+ requires a rebuild), see https://pkg.go.dev/internal/goexperiment for details.
efence: setting efence=1 causes the allocator to run in a mode
where each object is allocated on a unique page and addresses are
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index dbcd4db868..0e49794854 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -16,6 +16,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
+ "internal/goexperiment"
"unsafe"
)
@@ -169,8 +170,8 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
// barrier, so at worst we've unnecessarily greyed the old
// pointer that was in src.
memmove(dst, src, typ.size)
- if writeBarrier.cgo {
- cgoCheckMemmove(typ, dst, src, 0, typ.size)
+ if goexperiment.CgoCheck2 {
+ cgoCheckMemmove2(typ, dst, src, 0, typ.size)
}
}
@@ -214,8 +215,8 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
}
memmove(dst, src, size)
- if writeBarrier.cgo {
- cgoCheckMemmove(typ, dst, src, off, size)
+ if goexperiment.CgoCheck2 {
+ cgoCheckMemmove2(typ, dst, src, off, size)
}
}
@@ -272,7 +273,7 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe
asanread(srcPtr, uintptr(n)*typ.size)
}
- if writeBarrier.cgo {
+ if goexperiment.CgoCheck2 {
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 088b566729..a3a8b2e70a 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -528,7 +528,7 @@ func (h heapBits) nextFast() (heapBits, uintptr) {
// make sure the underlying allocation contains pointers, usually
// by checking typ.ptrdata.
//
-// Callers must perform cgo checks if writeBarrier.cgo.
+// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
@@ -574,7 +574,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
}
dstx := (*uintptr)(unsafe.Pointer(addr))
if !buf.putFast(*dstx, 0) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
} else {
@@ -586,7 +586,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
dstx := (*uintptr)(unsafe.Pointer(addr))
srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
@@ -618,7 +618,7 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
}
srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
if !buf.putFast(0, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
@@ -651,12 +651,12 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if src == 0 {
if !buf.putFast(*dstx, 0) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
} else {
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
@@ -678,7 +678,7 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
// Must not be preempted because it typically runs right before memmove,
// and the GC must observe them as an atomic action.
//
-// Callers must perform cgo checks if writeBarrier.cgo.
+// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
@@ -710,7 +710,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
+ wbBufFlush()
}
}
}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 169999460a..f630577914 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -193,8 +193,7 @@ var gcphase uint32
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
- needed bool // whether we need a write barrier for current GC phase
- cgo bool // whether we need a write barrier for a cgo check
+ needed bool // identical to enabled, for now (TODO: dedup)
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
@@ -213,7 +212,7 @@ const (
func setGCPhase(x uint32) {
atomic.Store(&gcphase, x)
writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
- writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
+ writeBarrier.enabled = writeBarrier.needed
}
// gcMarkWorkerMode represents the mode that a concurrent mark worker
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index fa8c81d8ef..bbb1ca2f6b 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -1092,7 +1092,7 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
// Flush the write barrier
// buffer; this may create
// more work.
- wbBufFlush(nil, 0)
+ wbBufFlush()
b = gcw.tryGet()
}
}
@@ -1171,7 +1171,7 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
if b == 0 {
// Flush the write barrier buffer;
// this may create more work.
- wbBufFlush(nil, 0)
+ wbBufFlush()
b = gcw.tryGet()
}
}
diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go
index 3b7cbf8f1f..9b92c92675 100644
--- a/src/runtime/mwbbuf.go
+++ b/src/runtime/mwbbuf.go
@@ -80,11 +80,7 @@ const (
func (b *wbBuf) reset() {
start := uintptr(unsafe.Pointer(&b.buf[0]))
b.next = start
- if writeBarrier.cgo {
- // Effectively disable the buffer by forcing a flush
- // on every barrier.
- b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
- } else if testSmallBuf {
+ if testSmallBuf {
// For testing, allow two barriers in the buffer. If
// we only did one, then barriers of non-heap pointers
// would be no-ops. This lets us combine a buffered
@@ -118,15 +114,10 @@ func (b *wbBuf) empty() bool {
//
// buf := &getg().m.p.ptr().wbBuf
// if !buf.putFast(old, new) {
-// wbBufFlush(...)
+// wbBufFlush()
// }
// ... actual memory write ...
//
-// The arguments to wbBufFlush depend on whether the caller is doing
-// its own cgo pointer checks. If it is, then this can be
-// wbBufFlush(nil, 0). Otherwise, it must pass the slot address and
-// new.
-//
// The caller must ensure there are no preemption points during the
// above sequence. There must be no preemption points while buf is in
// use because it is a per-P resource. There must be no preemption
@@ -150,8 +141,7 @@ func (b *wbBuf) putFast(old, new uintptr) bool {
}
// wbBufFlush flushes the current P's write barrier buffer to the GC
-// workbufs. It is passed the slot and value of the write barrier that
-// caused the flush so that it can implement cgocheck.
+// workbufs.
//
// This must not have write barriers because it is part of the write
// barrier implementation.
@@ -165,7 +155,7 @@ func (b *wbBuf) putFast(old, new uintptr) bool {
//
//go:nowritebarrierrec
//go:nosplit
-func wbBufFlush(dst *uintptr, src uintptr) {
+func wbBufFlush() {
// Note: Every possible return from this function must reset
// the buffer's next pointer to prevent buffer overflow.
@@ -184,17 +174,6 @@ func wbBufFlush(dst *uintptr, src uintptr) {
return
}
- if writeBarrier.cgo && dst != nil {
- // This must be called from the stack that did the
- // write. It's nosplit all the way down.
- cgoCheckWriteBarrier(dst, src)
- if !writeBarrier.needed {
- // We were only called for cgocheck.
- getg().m.p.ptr().wbBuf.discard()
- return
- }
- }
-
// Switch to the system stack so we don't have to worry about
// the untyped stack slots or safe points.
systemstack(func() {
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index d100d6c8c0..d57a31ce45 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -751,17 +751,6 @@ func schedinit() {
// World is effectively started now, as P's can run.
worldStarted()
- // For cgocheck > 1, we turn on the write barrier at all times
- // and check all pointer writes. We can't do this until after
- // procresize because the write barrier needs a P.
- if debug.cgocheck > 1 {
- writeBarrier.cgo = true
- writeBarrier.enabled = true
- for _, pp := range allp {
- pp.wbBuf.reset()
- }
- }
-
if buildVersion == "" {
// Condition should never trigger. This code just serves
// to ensure runtimeĀ·buildVersion is kept in the resulting binary.
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index f5d74b7aed..991b92a0af 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -489,6 +489,10 @@ func parsegodebug(godebug string, seen map[string]bool) {
}
}
}
+
+ if debug.cgocheck > 1 {
+ throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
+ }
}
//go:linkname setTraceback runtime/debug.SetTraceback