aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2025-08-22 13:47:42 -0700
committerGopher Robot <gobot@golang.org>2025-11-25 22:10:16 -0800
commiteb63ef9d6676dc0edb112cd297820306a327017a (patch)
tree589b7cc96275ff412c7bb6c3686017df05906a03 /src/runtime
parent06412288cfd31ab365fe8ee6368742dffa759803 (diff)
downloadgo-eb63ef9d6676dc0edb112cd297820306a327017a.tar.xz
runtime: panic if cleanup function closes over cleanup pointer
This would catch problems like https://go.dev/cl/696295. Benchmark effect with this CL plus CL 697535: goos: linux goarch: amd64 pkg: runtime cpu: 12th Gen Intel(R) Core(TM) i7-1260P │ /tmp/foo.1 │ /tmp/foo.2 │ │ sec/op │ sec/op vs base │ AddCleanupAndStop-16 81.93n ± 1% 82.87n ± 1% +1.14% (p=0.041 n=10) For #75066 Change-Id: Ia41d0d6b88baebf6055cb7e5d42bc8210b31630f Reviewed-on: https://go-review.googlesource.com/c/go/+/714000 Reviewed-by: Michael Pratt <mpratt@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> Auto-Submit: Ian Lance Taylor <iant@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/mcleanup.go24
-rw-r--r--src/runtime/mcleanup_test.go20
-rw-r--r--src/runtime/testdata/testprog/checkfinalizers.go27
3 files changed, 68 insertions, 3 deletions
diff --git a/src/runtime/mcleanup.go b/src/runtime/mcleanup.go
index d190653213..e6f2fb00b3 100644
--- a/src/runtime/mcleanup.go
+++ b/src/runtime/mcleanup.go
@@ -71,7 +71,14 @@ import (
// mentions it. To ensure a cleanup does not get called prematurely,
// pass the object to the [KeepAlive] function after the last point
// where the object must remain reachable.
+//
+//go:nocheckptr
func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
+ // This is marked nocheckptr because checkptr doesn't understand the
+ // pointer manipulation done when looking at closure pointers.
+ // Similar code in mbitmap.go works because the functions are
+ // go:nosplit, which implies go:nocheckptr (CL 202158).
+
// Explicitly force ptr and cleanup to escape to the heap.
ptr = abi.Escape(ptr)
cleanup = abi.Escape(cleanup)
@@ -145,6 +152,23 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
}
}
+ // Check that the cleanup function doesn't close over the pointer.
+ cleanupFV := unsafe.Pointer(*(**funcval)(unsafe.Pointer(&cleanup)))
+ cBase, cSpan, _ := findObject(uintptr(cleanupFV), 0, 0)
+ if cBase != 0 {
+ tp := cSpan.typePointersOfUnchecked(cBase)
+ for {
+ var addr uintptr
+ if tp, addr = tp.next(cBase + cSpan.elemsize); addr == 0 {
+ break
+ }
+ ptr := *(*uintptr)(unsafe.Pointer(addr))
+ if ptr >= base && ptr < base+span.elemsize {
+ panic("runtime.AddCleanup: cleanup function closes over ptr, cleanup will never run")
+ }
+ }
+ }
+
// Create another G if necessary.
if gcCleanups.needG() {
gcCleanups.createGs()
diff --git a/src/runtime/mcleanup_test.go b/src/runtime/mcleanup_test.go
index 28fc7f407f..5afe85e103 100644
--- a/src/runtime/mcleanup_test.go
+++ b/src/runtime/mcleanup_test.go
@@ -337,7 +337,7 @@ func TestCleanupLost(t *testing.T) {
}
}
-func TestCleanupUnreachable(t *testing.T) {
+func TestCleanupUnreachablePointer(t *testing.T) {
defer func() {
if recover() == nil {
t.Error("AddCleanup failed to detect self-pointer")
@@ -354,6 +354,24 @@ func TestCleanupUnreachable(t *testing.T) {
}, &v.f)
}
+func TestCleanupUnreachableClosure(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Error("AddCleanup failed to detect closure pointer")
+ }
+ }()
+
+ type T struct {
+ p *byte // use *byte to avoid tiny allocator
+ f int
+ }
+ v := &T{}
+ runtime.AddCleanup(v, func(int) {
+ t.Log(v.f)
+ t.Error("cleanup ran unexpectedly")
+ }, 0)
+}
+
// BenchmarkAddCleanupAndStop benchmarks adding and removing a cleanup
// from the same allocation.
//
diff --git a/src/runtime/testdata/testprog/checkfinalizers.go b/src/runtime/testdata/testprog/checkfinalizers.go
index ea352a4e3e..9f082a25ff 100644
--- a/src/runtime/testdata/testprog/checkfinalizers.go
+++ b/src/runtime/testdata/testprog/checkfinalizers.go
@@ -28,17 +28,40 @@ func DetectFinalizerAndCleanupLeaks() {
// Leak a cleanup.
cLeak := new(T)
+
+ // Use an extra closure to avoid the simple
+ // checking done by AddCleanup.
+ var closeOverCLeak func(int)
+ closeOverCLeak = func(x int) {
+ // Use recursion to avoid inlining.
+ if x <= 0 {
+ **cLeak = x
+ } else {
+ closeOverCLeak(x - 1)
+ }
+ }
+
runtime.AddCleanup(cLeak, func(x int) {
- **cLeak = x
+ closeOverCLeak(x)
}, int(0))
// Have a regular cleanup to make sure it doesn't trip the detector.
cNoLeak := new(T)
runtime.AddCleanup(cNoLeak, func(_ int) {}, int(0))
+ // Like closeOverCLeak.
+ var closeOverCNoLeak func(int)
+ closeOverCNoLeak = func(x int) {
+ if x <= 0 {
+ **cNoLeak = x
+ } else {
+ closeOverCNoLeak(x - 1)
+ }
+ }
+
// Add a cleanup that only temporarily leaks cNoLeak.
runtime.AddCleanup(cNoLeak, func(x int) {
- **cNoLeak = x
+ closeOverCNoLeak(x)
}, int(0)).Stop()
if !asan.Enabled && !race.Enabled {