]> Cypherpunks repositories - gostls13.git/commitdiff
Revert "runtime: prepare for extensions to waiting M list"
authorRhys Hiltner <rhys.hiltner@gmail.com>
Wed, 29 May 2024 16:43:38 +0000 (16:43 +0000)
committerGopher Robot <gobot@golang.org>
Thu, 30 May 2024 17:57:37 +0000 (17:57 +0000)
This reverts commit be0b569caa0eab1a7f30edf64e550bbf5f6ff235 (CL 585635).

Reason for revert: This is part of a patch series that changed the
handling of contended lock2/unlock2 calls, reducing the maximum
throughput of contended runtime.mutex values, and causing a performance
regression on applications where that is (or became) the bottleneck.

Updates #66999
Updates #67585

Change-Id: I7843ccaecbd273b7ceacfa0f420dd993b4b15a0a
Reviewed-on: https://go-review.googlesource.com/c/go/+/589117
Auto-Submit: Rhys Hiltner <rhys.hiltner@gmail.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Than McIntosh <thanm@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
src/runtime/lock_sema.go
src/runtime/mprof.go
src/runtime/runtime2.go

index 1c24cf6d30fef52016144875ea7b8ada0220e51f..32d2235ad3ab900371a8241e6ba399a348b59025 100644 (file)
@@ -77,11 +77,11 @@ Loop:
                        osyield()
                } else {
                        // Someone else has it.
-                       // l.key points to a linked list of M's waiting
-                       // for this lock, chained through m.mWaitList.next.
+                       // l->waitm points to a linked list of M's waiting
+                       // for this lock, chained through m->nextwaitm.
                        // Queue this M.
                        for {
-                               gp.m.mWaitList.next = muintptr(v &^ locked)
+                               gp.m.nextwaitm = muintptr(v &^ locked)
                                if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
                                        break
                                }
@@ -119,7 +119,7 @@ func unlock2(l *mutex) {
                        // Other M's are waiting for the lock.
                        // Dequeue an M.
                        mp = muintptr(v &^ locked).ptr()
-                       if atomic.Casuintptr(&l.key, v, uintptr(mp.mWaitList.next)) {
+                       if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
                                // Dequeued an M.  Wake it.
                                semawakeup(mp)
                                break
@@ -200,7 +200,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
        // This reduces the nosplit footprint of notetsleep_internal.
        gp = getg()
 
-       // Register for wakeup on n.key.
+       // Register for wakeup on n->waitm.
        if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
                // Must be locked (got wakeup).
                if n.key != locked {
index b97fac787eb165c977082e21de63b77c338c410c..b51a1ad3ce3d16ffb24d115ed78e09d3dd8b24dc 100644 (file)
@@ -667,17 +667,6 @@ func (lt *lockTimer) end() {
        }
 }
 
-// mWaitList is part of the M struct, and holds the list of Ms that are waiting
-// for a particular runtime.mutex.
-//
-// When an M is unable to immediately obtain a lock, it adds itself to the list
-// of Ms waiting for the lock. It does that via this struct's next field,
-// forming a singly-linked list with the mutex's key field pointing to the head
-// of the list.
-type mWaitList struct {
-       next muintptr // next m waiting for lock (set by us, cleared by another during unlock)
-}
-
 type mLockProfile struct {
        waitTime   atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
        stack      []uintptr    // stack that experienced contention in runtime.lockWithRank
index 8645532ae03e0e6693922fbdff75c4c511757ede..4a789639611fb732228086acfed57dacdde521c6 100644 (file)
@@ -596,8 +596,8 @@ type m struct {
        createstack   [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
        lockedExt     uint32      // tracking for external LockOSThread
        lockedInt     uint32      // tracking for internal lockOSThread
+       nextwaitm     muintptr    // next m waiting for lock
 
-       mWaitList    mWaitList    // list of runtime lock waiters
        mLockProfile mLockProfile // fields relating to runtime.lock contention
        profStack    []uintptr    // used for memory/block/mutex stack traces