aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/trace.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/trace.go')
-rw-r--r--src/runtime/trace.go120
1 files changed, 88 insertions, 32 deletions
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index cfb1ae7a92..a3bcdee010 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -258,6 +258,8 @@ func traceBufPtrOf(b *traceBuf) traceBufPtr {
// traceEnabled returns true if the trace is currently enabled.
//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
//go:nosplit
func traceEnabled() bool {
return trace.enabled
@@ -270,6 +272,52 @@ func traceShuttingDown() bool {
return trace.shutdown
}
+// traceLocker represents an M writing trace events. While a traceLocker value
+// is valid, the tracer observes all operations on the G/M/P or trace events being
+// written as happening atomically.
+//
+// This doesn't do much for the current tracer, because the current tracer doesn't
+// need atomicity around non-trace runtime operations. All the state it needs it
+// collects carefully during a STW.
+type traceLocker struct {
+ enabled bool
+}
+
+// traceAcquire prepares this M for writing one or more trace events.
+//
+// This exists for compatibility with the upcoming new tracer; it doesn't do much
+// in the current tracer.
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func traceAcquire() traceLocker {
+ if !traceEnabled() {
+ return traceLocker{false}
+ }
+ return traceLocker{true}
+}
+
+// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func (tl traceLocker) ok() bool {
+ return tl.enabled
+}
+
+// traceRelease indicates that this M is done writing trace events.
+//
+// This exists for compatibility with the upcoming new tracer; it doesn't do anything
+// in the current tracer.
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func traceRelease(tl traceLocker) {
+}
+
// StartTrace enables tracing for the current process.
// While tracing, the data will be buffered and available via ReadTrace.
// StartTrace returns an error if tracing is already enabled.
@@ -367,8 +415,10 @@ func StartTrace() error {
gp.trace.tracedSyscallEnter = false
}
})
- traceProcStart()
- traceGoStart()
+ // Use a dummy traceLocker. The trace isn't enabled yet, but we can still write events.
+ tl := traceLocker{}
+ tl.ProcStart()
+ tl.GoStart()
// Note: startTicks needs to be set after we emit traceEvGoInSyscall events.
// If we do it the other way around, it is possible that exitsyscall will
// query sysExitTime after startTicks but before traceEvGoInSyscall timestamp.
@@ -401,7 +451,10 @@ func StartTrace() error {
unlock(&sched.sysmonlock)
// Record the current state of HeapGoal to avoid information loss in trace.
- traceHeapGoal()
+ //
+ // Use the same dummy trace locker. The trace can't end until after we start
+ // the world, and we can safely trace from here.
+ tl.HeapGoal()
startTheWorldGC()
return nil
@@ -427,7 +480,10 @@ func StopTrace() {
return
}
- traceGoSched()
+ // Trace GoSched for us, and use a dummy locker. The world is stopped
+ // and we control whether the trace is enabled, so this is safe.
+ tl := traceLocker{}
+ tl.GoSched()
atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
trace.cpuLogRead.close()
@@ -847,7 +903,7 @@ func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev by
// profiling buffer. It is called from a signal handler, so is limited in what
// it can do.
func traceCPUSample(gp *g, pp *p, stk []uintptr) {
- if !trace.enabled {
+ if !traceEnabled() {
// Tracing is usually turned off; don't spend time acquiring the signal
// lock unless it's active.
return
@@ -1475,15 +1531,15 @@ func (a *traceAlloc) drop() {
// The following functions write specific events to trace.
-func traceGomaxprocs(procs int32) {
+func (_ traceLocker) Gomaxprocs(procs int32) {
traceEvent(traceEvGomaxprocs, 1, uint64(procs))
}
-func traceProcStart() {
+func (_ traceLocker) ProcStart() {
traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
}
-func traceProcStop(pp *p) {
+func (_ traceLocker) ProcStop(pp *p) {
// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
// to handle this we temporary employ the P.
mp := acquirem()
@@ -1494,16 +1550,16 @@ func traceProcStop(pp *p) {
releasem(mp)
}
-func traceGCStart() {
+func (_ traceLocker) GCStart() {
traceEvent(traceEvGCStart, 3, trace.seqGC)
trace.seqGC++
}
-func traceGCDone() {
+func (_ traceLocker) GCDone() {
traceEvent(traceEvGCDone, -1)
}
-func traceSTWStart(reason stwReason) {
+func (_ traceLocker) STWStart(reason stwReason) {
// Don't trace if this STW is for trace start/stop, since traceEnabled
// switches during a STW.
if reason == stwStartTrace || reason == stwStopTrace {
@@ -1513,7 +1569,7 @@ func traceSTWStart(reason stwReason) {
traceEvent(traceEvSTWStart, -1, uint64(reason))
}
-func traceSTWDone() {
+func (_ traceLocker) STWDone() {
mp := getg().m
if !mp.trace.tracedSTWStart {
return
@@ -1527,7 +1583,7 @@ func traceSTWDone() {
//
// traceGCSweepStart must be paired with traceGCSweepDone and there
// must be no preemption points between these two calls.
-func traceGCSweepStart() {
+func (_ traceLocker) GCSweepStart() {
// Delay the actual GCSweepStart event until the first span
// sweep. If we don't sweep anything, don't emit any events.
pp := getg().m.p.ptr()
@@ -1541,7 +1597,7 @@ func traceGCSweepStart() {
//
// This may be called outside a traceGCSweepStart/traceGCSweepDone
// pair; however, it will not emit any trace events in this case.
-func traceGCSweepSpan(bytesSwept uintptr) {
+func (_ traceLocker) GCSweepSpan(bytesSwept uintptr) {
pp := getg().m.p.ptr()
if pp.trace.inSweep {
if pp.trace.swept == 0 {
@@ -1551,7 +1607,7 @@ func traceGCSweepSpan(bytesSwept uintptr) {
}
}
-func traceGCSweepDone() {
+func (_ traceLocker) GCSweepDone() {
pp := getg().m.p.ptr()
if !pp.trace.inSweep {
throw("missing traceGCSweepStart")
@@ -1562,15 +1618,15 @@ func traceGCSweepDone() {
pp.trace.inSweep = false
}
-func traceGCMarkAssistStart() {
+func (_ traceLocker) GCMarkAssistStart() {
traceEvent(traceEvGCMarkAssistStart, 1)
}
-func traceGCMarkAssistDone() {
+func (_ traceLocker) GCMarkAssistDone() {
traceEvent(traceEvGCMarkAssistDone, -1)
}
-func traceGoCreate(newg *g, pc uintptr) {
+func (_ traceLocker) GoCreate(newg *g, pc uintptr) {
newg.trace.seq = 0
newg.trace.lastP = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
@@ -1578,7 +1634,7 @@ func traceGoCreate(newg *g, pc uintptr) {
traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
}
-func traceGoStart() {
+func (_ traceLocker) GoStart() {
gp := getg().m.curg
pp := gp.m.p
gp.trace.seq++
@@ -1592,29 +1648,29 @@ func traceGoStart() {
}
}
-func traceGoEnd() {
+func (_ traceLocker) GoEnd() {
traceEvent(traceEvGoEnd, -1)
}
-func traceGoSched() {
+func (_ traceLocker) GoSched() {
gp := getg()
gp.trace.lastP = gp.m.p
traceEvent(traceEvGoSched, 1)
}
-func traceGoPreempt() {
+func (_ traceLocker) GoPreempt() {
gp := getg()
gp.trace.lastP = gp.m.p
traceEvent(traceEvGoPreempt, 1)
}
-func traceGoPark(reason traceBlockReason, skip int) {
+func (_ traceLocker) GoPark(reason traceBlockReason, skip int) {
// Convert the block reason directly to a trace event type.
// See traceBlockReason for more information.
traceEvent(byte(reason), skip)
}
-func traceGoUnpark(gp *g, skip int) {
+func (_ traceLocker) GoUnpark(gp *g, skip int) {
pp := getg().m.p
gp.trace.seq++
if gp.trace.lastP == pp {
@@ -1625,7 +1681,7 @@ func traceGoUnpark(gp *g, skip int) {
}
}
-func traceGoSysCall() {
+func (_ traceLocker) GoSysCall() {
var skip int
switch {
case tracefpunwindoff():
@@ -1646,7 +1702,7 @@ func traceGoSysCall() {
traceEvent(traceEvGoSysCall, skip)
}
-func traceGoSysExit() {
+func (_ traceLocker) GoSysExit() {
gp := getg().m.curg
if !gp.trace.tracedSyscallEnter {
// There was no syscall entry traced for us at all, so there's definitely
@@ -1673,7 +1729,7 @@ func traceGoSysExit() {
traceEvent(traceEvGoSysExit, -1, gp.goid, gp.trace.seq, uint64(ts))
}
-func traceGoSysBlock(pp *p) {
+func (_ traceLocker) GoSysBlock(pp *p) {
// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
// to handle this we temporary employ the P.
mp := acquirem()
@@ -1684,11 +1740,11 @@ func traceGoSysBlock(pp *p) {
releasem(mp)
}
-func traceHeapAlloc(live uint64) {
+func (_ traceLocker) HeapAlloc(live uint64) {
traceEvent(traceEvHeapAlloc, -1, live)
}
-func traceHeapGoal() {
+func (_ traceLocker) HeapGoal() {
heapGoal := gcController.heapGoal()
if heapGoal == ^uint64(0) {
// Heap-based triggering is disabled.
@@ -1789,15 +1845,15 @@ func startPCforTrace(pc uintptr) uintptr {
return f.datap.textAddr(*(*uint32)(w))
}
-// traceOneNewExtraM registers the fact that a new extra M was created with
+// OneNewExtraM registers the fact that a new extra M was created with
// the tracer. This matters if the M (which has an attached G) is used while
// the trace is still active because if it is, we need the fact that it exists
// to show up in the final trace.
-func traceOneNewExtraM(gp *g) {
+func (tl traceLocker) OneNewExtraM(gp *g) {
// Trigger two trace events for the locked g in the extra m,
// since the next event of the g will be traceEvGoSysExit in exitsyscall,
// while calling from C thread to Go.
- traceGoCreate(gp, 0) // no start pc
+ tl.GoCreate(gp, 0) // no start pc
gp.trace.seq++
traceEvent(traceEvGoInSyscall, -1, gp.goid)
}