atomic_arm.mx raw
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build arm
6
7 package atomic
8
9 import (
10 "internal/cpu"
11 "unsafe"
12 )
13
14 const (
15 offsetARMHasV7Atomics = unsafe.Offsetof(cpu.ARM.HasV7Atomics)
16 )
17
18 // Export some functions via linkname to assembly in sync/atomic.
19 //
20 //go:linkname Xchg
21 //go:linkname Xchguintptr
22 //go:linkname Xadd
23
24 type spinlock struct {
25 v uint32
26 }
27
28 //go:nosplit
29 func (l *spinlock) lock() {
30 for {
31 if Cas(&l.v, 0, 1) {
32 return
33 }
34 }
35 }
36
37 //go:nosplit
38 func (l *spinlock) unlock() {
39 Store(&l.v, 0)
40 }
41
42 var locktab [57]struct {
43 l spinlock
44 pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
45 }
46
47 func addrLock(addr *uint64) *spinlock {
48 return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
49 }
50
51 // Atomic add and return new value.
52 //
53 //go:nosplit
54 func Xadd(val *uint32, delta int32) uint32 {
55 for {
56 oval := *val
57 nval := oval + uint32(delta)
58 if Cas(val, oval, nval) {
59 return nval
60 }
61 }
62 }
63
64 //go:noescape
65 func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
66
67 //go:nosplit
68 func Xchg(addr *uint32, v uint32) uint32 {
69 for {
70 old := *addr
71 if Cas(addr, old, v) {
72 return old
73 }
74 }
75 }
76
77 //go:noescape
78 func Xchg8(addr *uint8, v uint8) uint8
79
80 //go:nosplit
81 func Xchguintptr(addr *uintptr, v uintptr) uintptr {
82 return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
83 }
84
85 // Not noescape -- it installs a pointer to addr.
86 func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
87
88 //go:noescape
89 func Store(addr *uint32, v uint32)
90
91 //go:noescape
92 func StoreRel(addr *uint32, v uint32)
93
94 //go:noescape
95 func StoreReluintptr(addr *uintptr, v uintptr)
96
97 //go:nosplit
98 func goCas64(addr *uint64, old, new uint64) bool {
99 if uintptr(unsafe.Pointer(addr))&7 != 0 {
100 *(*int)(nil) = 0 // crash on unaligned uint64
101 }
102 _ = *addr // if nil, fault before taking the lock
103 var ok bool
104 addrLock(addr).lock()
105 if *addr == old {
106 *addr = new
107 ok = true
108 }
109 addrLock(addr).unlock()
110 return ok
111 }
112
113 //go:nosplit
114 func goXadd64(addr *uint64, delta int64) uint64 {
115 if uintptr(unsafe.Pointer(addr))&7 != 0 {
116 *(*int)(nil) = 0 // crash on unaligned uint64
117 }
118 _ = *addr // if nil, fault before taking the lock
119 var r uint64
120 addrLock(addr).lock()
121 r = *addr + uint64(delta)
122 *addr = r
123 addrLock(addr).unlock()
124 return r
125 }
126
127 //go:nosplit
128 func goXchg64(addr *uint64, v uint64) uint64 {
129 if uintptr(unsafe.Pointer(addr))&7 != 0 {
130 *(*int)(nil) = 0 // crash on unaligned uint64
131 }
132 _ = *addr // if nil, fault before taking the lock
133 var r uint64
134 addrLock(addr).lock()
135 r = *addr
136 *addr = v
137 addrLock(addr).unlock()
138 return r
139 }
140
141 //go:nosplit
142 func goLoad64(addr *uint64) uint64 {
143 if uintptr(unsafe.Pointer(addr))&7 != 0 {
144 *(*int)(nil) = 0 // crash on unaligned uint64
145 }
146 _ = *addr // if nil, fault before taking the lock
147 var r uint64
148 addrLock(addr).lock()
149 r = *addr
150 addrLock(addr).unlock()
151 return r
152 }
153
154 //go:nosplit
155 func goStore64(addr *uint64, v uint64) {
156 if uintptr(unsafe.Pointer(addr))&7 != 0 {
157 *(*int)(nil) = 0 // crash on unaligned uint64
158 }
159 _ = *addr // if nil, fault before taking the lock
160 addrLock(addr).lock()
161 *addr = v
162 addrLock(addr).unlock()
163 }
164
165 //go:noescape
166 func Or8(addr *uint8, v uint8)
167
168 //go:nosplit
169 func goOr8(addr *uint8, v uint8) {
170 // Align down to 4 bytes and use 32-bit CAS.
171 addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
172 word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
173 for {
174 old := *addr32
175 if Cas(addr32, old, old|word) {
176 return
177 }
178 }
179 }
180
181 //go:noescape
182 func And8(addr *uint8, v uint8)
183
184 //go:nosplit
185 func goAnd8(addr *uint8, v uint8) {
186 // Align down to 4 bytes and use 32-bit CAS.
187 addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
188 word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
189 mask := uint32(0xFF) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
190 word |= ^mask
191 for {
192 old := *addr32
193 if Cas(addr32, old, old&word) {
194 return
195 }
196 }
197 }
198
199 //go:nosplit
200 func Or(addr *uint32, v uint32) {
201 for {
202 old := *addr
203 if Cas(addr, old, old|v) {
204 return
205 }
206 }
207 }
208
209 //go:nosplit
210 func And(addr *uint32, v uint32) {
211 for {
212 old := *addr
213 if Cas(addr, old, old&v) {
214 return
215 }
216 }
217 }
218
219 //go:nosplit
220 func armcas(ptr *uint32, old, new uint32) bool
221
222 //go:noescape
223 func Load(addr *uint32) uint32
224
225 // NO go:noescape annotation; *addr escapes if result escapes (#31525)
226 func Loadp(addr unsafe.Pointer) unsafe.Pointer
227
228 //go:noescape
229 func Load8(addr *uint8) uint8
230
231 //go:noescape
232 func LoadAcq(addr *uint32) uint32
233
234 //go:noescape
235 func LoadAcquintptr(ptr *uintptr) uintptr
236
237 //go:noescape
238 func Cas64(addr *uint64, old, new uint64) bool
239
240 //go:noescape
241 func CasRel(addr *uint32, old, new uint32) bool
242
243 //go:noescape
244 func Xadd64(addr *uint64, delta int64) uint64
245
246 //go:noescape
247 func Xchg64(addr *uint64, v uint64) uint64
248
249 //go:noescape
250 func Load64(addr *uint64) uint64
251
252 //go:noescape
253 func Store8(addr *uint8, v uint8)
254
255 //go:noescape
256 func Store64(addr *uint64, v uint64)
257