atomics_critical.mx raw
1 //go:build baremetal && !moxie.wasm
2
3 // Automatically generated file. DO NOT EDIT.
4 // This file implements standins for non-native atomics using critical sections.
5
6 package runtime
7
8 import (
9 _ "unsafe"
10 )
11
12 // Documentation:
13 // * https://llvm.org/docs/Atomics.html
14 // * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
15 //
16 // Some atomic operations are emitted inline while others are emitted as libcalls.
17 // How many are emitted as libcalls depends on the MCU arch and core variant.
18
19 // 16-bit atomics.
20
21 //export __atomic_load_2
22 func __atomic_load_2(ptr *uint16, ordering uintptr) uint16 {
23 // The LLVM docs for this say that there is a val argument after the pointer.
24 // That is a typo, and the GCC docs omit it.
25 mask := lockAtomics()
26 val := *ptr
27 unlockAtomics(mask)
28 return val
29 }
30
31 //export __atomic_store_2
32 func __atomic_store_2(ptr *uint16, val uint16, ordering uintptr) {
33 mask := lockAtomics()
34 *ptr = val
35 unlockAtomics(mask)
36 }
37
38 //go:inline
39 func doAtomicCAS16(ptr *uint16, expected, desired uint16) uint16 {
40 mask := lockAtomics()
41 old := *ptr
42 if old == expected {
43 *ptr = desired
44 }
45 unlockAtomics(mask)
46 return old
47 }
48
49 //export __sync_val_compare_and_swap_2
50 func __sync_val_compare_and_swap_2(ptr *uint16, expected, desired uint16) uint16 {
51 return doAtomicCAS16(ptr, expected, desired)
52 }
53
54 //export __atomic_compare_exchange_2
55 func __atomic_compare_exchange_2(ptr, expected *uint16, desired uint16, successOrder, failureOrder uintptr) bool {
56 exp := *expected
57 old := doAtomicCAS16(ptr, exp, desired)
58 return old == exp
59 }
60
61 //go:inline
62 func doAtomicSwap16(ptr *uint16, new uint16) uint16 {
63 mask := lockAtomics()
64 old := *ptr
65 *ptr = new
66 unlockAtomics(mask)
67 return old
68 }
69
70 //export __sync_lock_test_and_set_2
71 func __sync_lock_test_and_set_2(ptr *uint16, new uint16) uint16 {
72 return doAtomicSwap16(ptr, new)
73 }
74
75 //export __atomic_exchange_2
76 func __atomic_exchange_2(ptr *uint16, new uint16, ordering uintptr) uint16 {
77 return doAtomicSwap16(ptr, new)
78 }
79
80 //go:inline
81 func doAtomicAdd16(ptr *uint16, value uint16) (old, new uint16) {
82 mask := lockAtomics()
83 old = *ptr
84 new = old + value
85 *ptr = new
86 unlockAtomics(mask)
87 return old, new
88 }
89
90 //export __atomic_fetch_add_2
91 func __atomic_fetch_add_2(ptr *uint16, value uint16, ordering uintptr) uint16 {
92 old, _ := doAtomicAdd16(ptr, value)
93 return old
94 }
95
96 //export __sync_fetch_and_add_2
97 func __sync_fetch_and_add_2(ptr *uint16, value uint16) uint16 {
98 old, _ := doAtomicAdd16(ptr, value)
99 return old
100 }
101
102 //export __atomic_add_fetch_2
103 func __atomic_add_fetch_2(ptr *uint16, value uint16, ordering uintptr) uint16 {
104 _, new := doAtomicAdd16(ptr, value)
105 return new
106 }
107
108 // 32-bit atomics.
109
110 //export __atomic_load_4
111 func __atomic_load_4(ptr *uint32, ordering uintptr) uint32 {
112 // The LLVM docs for this say that there is a val argument after the pointer.
113 // That is a typo, and the GCC docs omit it.
114 mask := lockAtomics()
115 val := *ptr
116 unlockAtomics(mask)
117 return val
118 }
119
120 //export __atomic_store_4
121 func __atomic_store_4(ptr *uint32, val uint32, ordering uintptr) {
122 mask := lockAtomics()
123 *ptr = val
124 unlockAtomics(mask)
125 }
126
127 //go:inline
128 func doAtomicCAS32(ptr *uint32, expected, desired uint32) uint32 {
129 mask := lockAtomics()
130 old := *ptr
131 if old == expected {
132 *ptr = desired
133 }
134 unlockAtomics(mask)
135 return old
136 }
137
138 //export __sync_val_compare_and_swap_4
139 func __sync_val_compare_and_swap_4(ptr *uint32, expected, desired uint32) uint32 {
140 return doAtomicCAS32(ptr, expected, desired)
141 }
142
143 //export __atomic_compare_exchange_4
144 func __atomic_compare_exchange_4(ptr, expected *uint32, desired uint32, successOrder, failureOrder uintptr) bool {
145 exp := *expected
146 old := doAtomicCAS32(ptr, exp, desired)
147 return old == exp
148 }
149
150 //go:inline
151 func doAtomicSwap32(ptr *uint32, new uint32) uint32 {
152 mask := lockAtomics()
153 old := *ptr
154 *ptr = new
155 unlockAtomics(mask)
156 return old
157 }
158
159 //export __sync_lock_test_and_set_4
160 func __sync_lock_test_and_set_4(ptr *uint32, new uint32) uint32 {
161 return doAtomicSwap32(ptr, new)
162 }
163
164 //export __atomic_exchange_4
165 func __atomic_exchange_4(ptr *uint32, new uint32, ordering uintptr) uint32 {
166 return doAtomicSwap32(ptr, new)
167 }
168
169 //go:inline
170 func doAtomicAdd32(ptr *uint32, value uint32) (old, new uint32) {
171 mask := lockAtomics()
172 old = *ptr
173 new = old + value
174 *ptr = new
175 unlockAtomics(mask)
176 return old, new
177 }
178
179 //export __atomic_fetch_add_4
180 func __atomic_fetch_add_4(ptr *uint32, value uint32, ordering uintptr) uint32 {
181 old, _ := doAtomicAdd32(ptr, value)
182 return old
183 }
184
185 //export __sync_fetch_and_add_4
186 func __sync_fetch_and_add_4(ptr *uint32, value uint32) uint32 {
187 old, _ := doAtomicAdd32(ptr, value)
188 return old
189 }
190
191 //export __atomic_add_fetch_4
192 func __atomic_add_fetch_4(ptr *uint32, value uint32, ordering uintptr) uint32 {
193 _, new := doAtomicAdd32(ptr, value)
194 return new
195 }
196
197 // 64-bit atomics.
198
199 //export __atomic_load_8
200 func __atomic_load_8(ptr *uint64, ordering uintptr) uint64 {
201 // The LLVM docs for this say that there is a val argument after the pointer.
202 // That is a typo, and the GCC docs omit it.
203 mask := lockAtomics()
204 val := *ptr
205 unlockAtomics(mask)
206 return val
207 }
208
209 //export __atomic_store_8
210 func __atomic_store_8(ptr *uint64, val uint64, ordering uintptr) {
211 mask := lockAtomics()
212 *ptr = val
213 unlockAtomics(mask)
214 }
215
216 //go:inline
217 func doAtomicCAS64(ptr *uint64, expected, desired uint64) uint64 {
218 mask := lockAtomics()
219 old := *ptr
220 if old == expected {
221 *ptr = desired
222 }
223 unlockAtomics(mask)
224 return old
225 }
226
227 //export __sync_val_compare_and_swap_8
228 func __sync_val_compare_and_swap_8(ptr *uint64, expected, desired uint64) uint64 {
229 return doAtomicCAS64(ptr, expected, desired)
230 }
231
232 //export __atomic_compare_exchange_8
233 func __atomic_compare_exchange_8(ptr, expected *uint64, desired uint64, successOrder, failureOrder uintptr) bool {
234 exp := *expected
235 old := doAtomicCAS64(ptr, exp, desired)
236 return old == exp
237 }
238
239 //go:inline
240 func doAtomicSwap64(ptr *uint64, new uint64) uint64 {
241 mask := lockAtomics()
242 old := *ptr
243 *ptr = new
244 unlockAtomics(mask)
245 return old
246 }
247
248 //export __sync_lock_test_and_set_8
249 func __sync_lock_test_and_set_8(ptr *uint64, new uint64) uint64 {
250 return doAtomicSwap64(ptr, new)
251 }
252
253 //export __atomic_exchange_8
254 func __atomic_exchange_8(ptr *uint64, new uint64, ordering uintptr) uint64 {
255 return doAtomicSwap64(ptr, new)
256 }
257
258 //go:inline
259 func doAtomicAdd64(ptr *uint64, value uint64) (old, new uint64) {
260 mask := lockAtomics()
261 old = *ptr
262 new = old + value
263 *ptr = new
264 unlockAtomics(mask)
265 return old, new
266 }
267
268 //export __atomic_fetch_add_8
269 func __atomic_fetch_add_8(ptr *uint64, value uint64, ordering uintptr) uint64 {
270 old, _ := doAtomicAdd64(ptr, value)
271 return old
272 }
273
274 //export __sync_fetch_and_add_8
275 func __sync_fetch_and_add_8(ptr *uint64, value uint64) uint64 {
276 old, _ := doAtomicAdd64(ptr, value)
277 return old
278 }
279
280 //export __atomic_add_fetch_8
281 func __atomic_add_fetch_8(ptr *uint64, value uint64, ordering uintptr) uint64 {
282 _, new := doAtomicAdd64(ptr, value)
283 return new
284 }
285