aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/tracebuf.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2024-05-22 21:46:29 +0000
committerMichael Knyszek <mknyszek@google.com>2024-07-25 14:38:21 +0000
commite76353d5a923dbc5e22713267104b56a2c856302 (patch)
tree05899b2c153819d3665b6cf51edfbdbd67177530 /src/runtime/tracebuf.go
parentbd6f911f852f4a608e2cf11c1ce5b55ff0347866 (diff)
downloadgo-e76353d5a923dbc5e22713267104b56a2c856302.tar.xz
runtime: allow the tracer to be reentrant
This change allows the tracer to be reentrant by restructuring the internals such that writing an event is atomic with respect to stack growth. Essentially, a core set of functions that are involved in acquiring a trace buffer and writing to it are all marked nosplit. Stack growth is currently the only hidden place where the tracer may be accidentally reentrant, preventing the tracer from being used everywhere. It already lacks write barriers, lacks allocations, and is non-preemptible. This change thus makes the tracer fully reentrant, since the only reentry case it needs to handle is stack growth. Since the invariants needed to attain this are subtle, this change also extends the debugTraceReentrancy debug mode to check these invariants as well. Specifically, the invariants are checked by setting the throwsplit flag. A side benefit of this change is it simplifies the trace event writing API a good bit: there's no need to actually thread the event writer through things, and most callsites look a bit simpler. Change-Id: I7c329fb7a6cb936bd363c44cf882ea0a925132f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/587599 Reviewed-by: Austin Clements <austin@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime/tracebuf.go')
-rw-r--r--src/runtime/tracebuf.go109
1 files changed, 108 insertions, 1 deletions
diff --git a/src/runtime/tracebuf.go b/src/runtime/tracebuf.go
index 908a63d273..be6e3e582b 100644
--- a/src/runtime/tracebuf.go
+++ b/src/runtime/tracebuf.go
@@ -27,8 +27,26 @@ type traceWriter struct {
*traceBuf
}
-// write returns an a traceWriter that writes into the current M's stream.
+// writer returns an a traceWriter that writes into the current M's stream.
+//
+// Once this is called, the caller must guard against stack growth until
+// end is called on it. Therefore, it's highly recommended to use this
+// API in a "fluent" style, for example tl.writer().event(...).end().
+// Better yet, callers just looking to write events should use eventWriter
+// when possible, which is a much safer wrapper around this function.
+//
+// nosplit to allow for safe reentrant tracing from stack growth paths.
+//
+//go:nosplit
func (tl traceLocker) writer() traceWriter {
+ if debugTraceReentrancy {
+ // Checks that the invariants of this function are being upheld.
+ gp := getg()
+ if gp == gp.m.curg {
+ tl.mp.trace.oldthrowsplit = gp.throwsplit
+ gp.throwsplit = true
+ }
+ }
return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
}
@@ -38,12 +56,49 @@ func (tl traceLocker) writer() traceWriter {
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
+// This does not have the same stack growth restrictions as traceLocker.writer.
+//
// buf may be nil.
func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
}
+// event writes out the bytes of an event into the event stream.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
+func (w traceWriter) event(ev traceEv, args ...traceArg) traceWriter {
+ // N.B. Everything in this call must be nosplit to maintain
+ // the stack growth related invariants for writing events.
+
+ // Make sure we have room.
+ w, _ = w.ensure(1 + (len(args)+1)*traceBytesPerNumber)
+
+ // Compute the timestamp diff that we'll put in the trace.
+ ts := traceClockNow()
+ if ts <= w.traceBuf.lastTime {
+ ts = w.traceBuf.lastTime + 1
+ }
+ tsDiff := uint64(ts - w.traceBuf.lastTime)
+ w.traceBuf.lastTime = ts
+
+ // Write out event.
+ w.byte(byte(ev))
+ w.varint(tsDiff)
+ for _, arg := range args {
+ w.varint(uint64(arg))
+ }
+ return w
+}
+
// end writes the buffer back into the m.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) end() {
if w.mp == nil {
// Tolerate a nil mp. It makes code that creates traceWriters directly
@@ -51,11 +106,24 @@ func (w traceWriter) end() {
return
}
w.mp.trace.buf[w.gen%2] = w.traceBuf
+ if debugTraceReentrancy {
+ // The writer is no longer live, we can drop throwsplit (if it wasn't
+ // already set upon entry).
+ gp := getg()
+ if gp == gp.m.curg {
+ gp.throwsplit = w.mp.trace.oldthrowsplit
+ }
+ }
}
// ensure makes sure that at least maxSize bytes are available to write.
//
// Returns whether the buffer was flushed.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
refill := w.traceBuf == nil || !w.available(maxSize)
if refill {
@@ -65,6 +133,11 @@ func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
}
// flush puts w.traceBuf on the queue of full buffers.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) flush() traceWriter {
systemstack(func() {
lock(&trace.lock)
@@ -80,6 +153,11 @@ func (w traceWriter) flush() traceWriter {
// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
//
// exp indicates whether the refilled batch should be EvExperimentalBatch.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) refill(exp traceExperiment) traceWriter {
systemstack(func() {
lock(&trace.lock)
@@ -179,12 +257,22 @@ type traceBuf struct {
}
// byte appends v to buf.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) byte(v byte) {
buf.arr[buf.pos] = v
buf.pos++
}
// varint appends v to buf in little-endian-base-128 encoding.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) varint(v uint64) {
pos := buf.pos
arr := buf.arr[pos : pos+traceBytesPerNumber]
@@ -203,6 +291,11 @@ func (buf *traceBuf) varint(v uint64) {
// varintReserve reserves enough space in buf to hold any varint.
//
// Space reserved this way can be filled in with the varintAt method.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) varintReserve() int {
p := buf.pos
buf.pos += traceBytesPerNumber
@@ -210,10 +303,19 @@ func (buf *traceBuf) varintReserve() int {
}
// stringData appends s's data directly to buf.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) stringData(s string) {
buf.pos += copy(buf.arr[buf.pos:], s)
}
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) available(size int) bool {
return len(buf.arr)-buf.pos >= size
}
@@ -222,6 +324,11 @@ func (buf *traceBuf) available(size int) bool {
// consumes traceBytesPerNumber bytes. This is intended for when the caller
// needs to reserve space for a varint but can't populate it until later.
// Use varintReserve to reserve this space.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) varintAt(pos int, v uint64) {
for i := 0; i < traceBytesPerNumber; i++ {
if i < traceBytesPerNumber-1 {