aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRhys Hiltner <rhys.hiltner@gmail.com>2024-05-13 12:23:58 -0700
committerGopher Robot <gobot@golang.org>2024-05-21 17:17:24 +0000
commitbe0b569caa0eab1a7f30edf64e550bbf5f6ff235 (patch)
tree5a64e37f8e0b3314666430823e3fa32d615e31fa /src
parentd68d485405d8e604921e6d63ed32ae344fd3049e (diff)
downloadgo-be0b569caa0eab1a7f30edf64e550bbf5f6ff235.tar.xz
runtime: prepare for extensions to waiting M list
Move the nextwaitm field into a small struct, in preparation for additional metadata to track how long Ms need to wait for locks. For #66999 Change-Id: Ib40e43c15cde22f7e35922641107973d99439ecd Reviewed-on: https://go-review.googlesource.com/c/go/+/585635 Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Rhys Hiltner <rhys.hiltner@gmail.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/runtime/lock_sema.go10
-rw-r--r--src/runtime/mprof.go11
-rw-r--r--src/runtime/runtime2.go2
3 files changed, 17 insertions, 6 deletions
diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go
index 32d2235ad3..1c24cf6d30 100644
--- a/src/runtime/lock_sema.go
+++ b/src/runtime/lock_sema.go
@@ -77,11 +77,11 @@ Loop:
osyield()
} else {
// Someone else has it.
- // l->waitm points to a linked list of M's waiting
- // for this lock, chained through m->nextwaitm.
+ // l.key points to a linked list of M's waiting
+ // for this lock, chained through m.mWaitList.next.
// Queue this M.
for {
- gp.m.nextwaitm = muintptr(v &^ locked)
+ gp.m.mWaitList.next = muintptr(v &^ locked)
if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
break
}
@@ -119,7 +119,7 @@ func unlock2(l *mutex) {
// Other M's are waiting for the lock.
// Dequeue an M.
mp = muintptr(v &^ locked).ptr()
- if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
+ if atomic.Casuintptr(&l.key, v, uintptr(mp.mWaitList.next)) {
// Dequeued an M. Wake it.
semawakeup(mp)
break
@@ -200,7 +200,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
// This reduces the nosplit footprint of notetsleep_internal.
gp = getg()
- // Register for wakeup on n->waitm.
+ // Register for wakeup on n.key.
if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
// Must be locked (got wakeup).
if n.key != locked {
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index b51a1ad3ce..b97fac787e 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -667,6 +667,17 @@ func (lt *lockTimer) end() {
}
}
+// mWaitList is part of the M struct, and holds the list of Ms that are waiting
+// for a particular runtime.mutex.
+//
+// When an M is unable to immediately obtain a lock, it adds itself to the list
+// of Ms waiting for the lock. It does that via this struct's next field,
+// forming a singly-linked list with the mutex's key field pointing to the head
+// of the list.
+type mWaitList struct {
+ next muintptr // next m waiting for lock (set by us, cleared by another during unlock)
+}
+
type mLockProfile struct {
waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
stack []uintptr // stack that experienced contention in runtime.lockWithRank
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index ff11414e3e..470b829912 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -596,8 +596,8 @@ type m struct {
createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
lockedExt uint32 // tracking for external LockOSThread
lockedInt uint32 // tracking for internal lockOSThread
- nextwaitm muintptr // next m waiting for lock
+ mWaitList mWaitList // list of runtime lock waiters
mLockProfile mLockProfile // fields relating to runtime.lock contention
profStack []uintptr // used for memory/block/mutex stack traces