1 // Copyright 2024 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 5 // Package sync provides basic synchronization primitives such as mutual
6 // exclusion locks to internal packages (including ones that depend on sync).
7 //
8 // Tests are defined in package [sync].
9 package sync
10 11 import (
12 "internal/race"
13 "sync/atomic"
14 "unsafe"
15 )
16 17 // A Mutex is a mutual exclusion lock.
18 //
19 // See package [sync.Mutex] documentation.
20 type Mutex struct {
21 state int32
22 sema uint32
23 }
24 25 const (
26 mutexLocked = 1 << iota // mutex is locked
27 mutexWoken
28 mutexStarving
29 mutexWaiterShift = iota
30 31 // Mutex fairness.
32 //
33 // Mutex can be in 2 modes of operations: normal and starvation.
34 // In normal mode waiters are queued in FIFO order, but a woken up waiter
35 // does not own the mutex and competes with new arriving goroutines over
36 // the ownership. New arriving goroutines have an advantage -- they are
37 // already running on CPU and there can be lots of them, so a woken up
38 // waiter has good chances of losing. In such case it is queued at front
39 // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
40 // it switches mutex to the starvation mode.
41 //
42 // In starvation mode ownership of the mutex is directly handed off from
43 // the unlocking goroutine to the waiter at the front of the queue.
44 // New arriving goroutines don't try to acquire the mutex even if it appears
45 // to be unlocked, and don't try to spin. Instead they queue themselves at
46 // the tail of the wait queue.
47 //
48 // If a waiter receives ownership of the mutex and sees that either
49 // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
50 // it switches mutex back to normal operation mode.
51 //
52 // Normal mode has considerably better performance as a goroutine can acquire
53 // a mutex several times in a row even if there are blocked waiters.
54 // Starvation mode is important to prevent pathological cases of tail latency.
55 starvationThresholdNs = 1e6
56 )
57 58 // Lock locks m.
59 //
60 // See package [sync.Mutex] documentation.
61 func (m *Mutex) Lock() {
62 // Fast path: grab unlocked mutex.
63 if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
64 if race.Enabled {
65 race.Acquire(unsafe.Pointer(m))
66 }
67 return
68 }
69 // Slow path (outlined so that the fast path can be inlined)
70 m.lockSlow()
71 }
72 73 // TryLock tries to lock m and reports whether it succeeded.
74 //
75 // See package [sync.Mutex] documentation.
76 func (m *Mutex) TryLock() bool {
77 old := m.state
78 if old&(mutexLocked|mutexStarving) != 0 {
79 return false
80 }
81 82 // There may be a goroutine waiting for the mutex, but we are
83 // running now and can try to grab the mutex before that
84 // goroutine wakes up.
85 if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
86 return false
87 }
88 89 if race.Enabled {
90 race.Acquire(unsafe.Pointer(m))
91 }
92 return true
93 }
94 95 func (m *Mutex) lockSlow() {
96 var waitStartTime int64
97 starving := false
98 awoke := false
99 iter := 0
100 old := m.state
101 for {
102 // Don't spin in starvation mode, ownership is handed off to waiters
103 // so we won't be able to acquire the mutex anyway.
104 if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
105 // Active spinning makes sense.
106 // Try to set mutexWoken flag to inform Unlock
107 // to not wake other blocked goroutines.
108 if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
109 atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
110 awoke = true
111 }
112 runtime_doSpin()
113 iter++
114 old = m.state
115 continue
116 }
117 new := old
118 // Don't try to acquire starving mutex, new arriving goroutines must queue.
119 if old&mutexStarving == 0 {
120 new |= mutexLocked
121 }
122 if old&(mutexLocked|mutexStarving) != 0 {
123 new += 1 << mutexWaiterShift
124 }
125 // The current goroutine switches mutex to starvation mode.
126 // But if the mutex is currently unlocked, don't do the switch.
127 // Unlock expects that starving mutex has waiters, which will not
128 // be true in this case.
129 if starving && old&mutexLocked != 0 {
130 new |= mutexStarving
131 }
132 if awoke {
133 // The goroutine has been woken from sleep,
134 // so we need to reset the flag in either case.
135 if new&mutexWoken == 0 {
136 throw("sync: inconsistent mutex state")
137 }
138 new &^= mutexWoken
139 }
140 if atomic.CompareAndSwapInt32(&m.state, old, new) {
141 if old&(mutexLocked|mutexStarving) == 0 {
142 break // locked the mutex with CAS
143 }
144 // If we were already waiting before, queue at the front of the queue.
145 queueLifo := waitStartTime != 0
146 if waitStartTime == 0 {
147 waitStartTime = runtime_nanotime()
148 }
149 runtime_SemacquireMutex(&m.sema, queueLifo, 2)
150 starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
151 old = m.state
152 if old&mutexStarving != 0 {
153 // If this goroutine was woken and mutex is in starvation mode,
154 // ownership was handed off to us but mutex is in somewhat
155 // inconsistent state: mutexLocked is not set and we are still
156 // accounted as waiter. Fix that.
157 if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
158 throw("sync: inconsistent mutex state")
159 }
160 delta := int32(mutexLocked - 1<<mutexWaiterShift)
161 if !starving || old>>mutexWaiterShift == 1 {
162 // Exit starvation mode.
163 // Critical to do it here and consider wait time.
164 // Starvation mode is so inefficient, that two goroutines
165 // can go lock-step infinitely once they switch mutex
166 // to starvation mode.
167 delta -= mutexStarving
168 }
169 atomic.AddInt32(&m.state, delta)
170 break
171 }
172 awoke = true
173 iter = 0
174 } else {
175 old = m.state
176 }
177 }
178 179 if race.Enabled {
180 race.Acquire(unsafe.Pointer(m))
181 }
182 }
183 184 // Unlock unlocks m.
185 //
186 // See package [sync.Mutex] documentation.
187 func (m *Mutex) Unlock() {
188 if race.Enabled {
189 _ = m.state
190 race.Release(unsafe.Pointer(m))
191 }
192 193 // Fast path: drop lock bit.
194 new := atomic.AddInt32(&m.state, -mutexLocked)
195 if new != 0 {
196 // Outlined slow path to allow inlining the fast path.
197 // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
198 m.unlockSlow(new)
199 }
200 }
201 202 func (m *Mutex) unlockSlow(new int32) {
203 if (new+mutexLocked)&mutexLocked == 0 {
204 fatal("sync: unlock of unlocked mutex")
205 }
206 if new&mutexStarving == 0 {
207 old := new
208 for {
209 // If there are no waiters or a goroutine has already
210 // been woken or grabbed the lock, no need to wake anyone.
211 // In starvation mode ownership is directly handed off from unlocking
212 // goroutine to the next waiter. We are not part of this chain,
213 // since we did not observe mutexStarving when we unlocked the mutex above.
214 // So get off the way.
215 if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
216 return
217 }
218 // Grab the right to wake someone.
219 new = (old - 1<<mutexWaiterShift) | mutexWoken
220 if atomic.CompareAndSwapInt32(&m.state, old, new) {
221 runtime_Semrelease(&m.sema, false, 2)
222 return
223 }
224 old = m.state
225 }
226 } else {
227 // Starving mode: handoff mutex ownership to the next waiter, and yield
228 // our time slice so that the next waiter can start to run immediately.
229 // Note: mutexLocked is not set, the waiter will set it after wakeup.
230 // But mutex is still considered locked if mutexStarving is set,
231 // so new coming goroutines won't acquire it.
232 runtime_Semrelease(&m.sema, true, 2)
233 }
234 }
235