atomic_amd64.s raw

   1  // Copyright 2015 The Go Authors. All rights reserved.
   2  // Use of this source code is governed by a BSD-style
   3  // license that can be found in the LICENSE file.
   4  
   5  // Note: some of these functions are semantically inlined
   6  // by the compiler (in src/cmd/compile/internal/gc/ssa.go).
   7  
   8  #include "textflag.h"
   9  
  10  TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
  11  	JMP	·Load64(SB)
  12  
  13  TEXT ·Loaduint(SB), NOSPLIT, $0-16
  14  	JMP	·Load64(SB)
  15  
  16  TEXT ·Loadint32(SB), NOSPLIT, $0-12
  17  	JMP	·Load(SB)
  18  
  19  TEXT ·Loadint64(SB), NOSPLIT, $0-16
  20  	JMP	·Load64(SB)
  21  
  22  // func Cas(ptr *int32, old, new int32) bool
  23  // Atomically:
  24  //	if *ptr == old {
  25  //		*ptr = new
  26  //		return true
  27  //	} else {
  28  //		return false
  29  //	}
  30  TEXT ·Cas(SB),NOSPLIT,$0-17
  31  	MOVQ	ptr+0(FP), BX
  32  	MOVL	old+8(FP), AX
  33  	MOVL	new+12(FP), CX
  34  	LOCK
  35  	CMPXCHGL	CX, 0(BX)
  36  	SETEQ	ret+16(FP)
  37  	RET
  38  
  39  // func Cas64(ptr *uint64, old, new uint64) bool
  40  // Atomically:
  41  //	if *ptr == old {
  42  //		*ptr = new
  43  //		return true
  44  //	} else {
  45  //		return false
  46  //	}
  47  TEXT ·Cas64(SB), NOSPLIT, $0-25
  48  	MOVQ	ptr+0(FP), BX
  49  	MOVQ	old+8(FP), AX
  50  	MOVQ	new+16(FP), CX
  51  	LOCK
  52  	CMPXCHGQ	CX, 0(BX)
  53  	SETEQ	ret+24(FP)
  54  	RET
  55  
  56  // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
  57  // Atomically:
  58  //	if *ptr == old {
  59  //		*ptr = new
  60  //		return true
  61  //	} else {
  62  //		return false
  63  // 	}
  64  TEXT ·Casp1(SB), NOSPLIT, $0-25
  65  	MOVQ	ptr+0(FP), BX
  66  	MOVQ	old+8(FP), AX
  67  	MOVQ	new+16(FP), CX
  68  	LOCK
  69  	CMPXCHGQ	CX, 0(BX)
  70  	SETEQ	ret+24(FP)
  71  	RET
  72  
  73  TEXT ·Casint32(SB), NOSPLIT, $0-17
  74  	JMP	·Cas(SB)
  75  
  76  TEXT ·Casint64(SB), NOSPLIT, $0-25
  77  	JMP	·Cas64(SB)
  78  
  79  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
  80  	JMP	·Cas64(SB)
  81  
  82  TEXT ·CasRel(SB), NOSPLIT, $0-17
  83  	JMP	·Cas(SB)
  84  
  85  // uint32 Xadd(uint32 volatile *val, int32 delta)
  86  // Atomically:
  87  //	*val += delta;
  88  //	return *val;
  89  TEXT ·Xadd(SB), NOSPLIT, $0-20
  90  	MOVQ	ptr+0(FP), BX
  91  	MOVL	delta+8(FP), AX
  92  	MOVL	AX, CX
  93  	LOCK
  94  	XADDL	AX, 0(BX)
  95  	ADDL	CX, AX
  96  	MOVL	AX, ret+16(FP)
  97  	RET
  98  
  99  // uint64 Xadd64(uint64 volatile *val, int64 delta)
 100  // Atomically:
 101  //	*val += delta;
 102  //	return *val;
 103  TEXT ·Xadd64(SB), NOSPLIT, $0-24
 104  	MOVQ	ptr+0(FP), BX
 105  	MOVQ	delta+8(FP), AX
 106  	MOVQ	AX, CX
 107  	LOCK
 108  	XADDQ	AX, 0(BX)
 109  	ADDQ	CX, AX
 110  	MOVQ	AX, ret+16(FP)
 111  	RET
 112  
 113  TEXT ·Xaddint32(SB), NOSPLIT, $0-20
 114  	JMP	·Xadd(SB)
 115  
 116  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
 117  	JMP	·Xadd64(SB)
 118  
 119  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
 120  	JMP	·Xadd64(SB)
 121  
 122  // uint8 Xchg(ptr *uint8, new uint8)
 123  // Atomically:
 124  //	old := *ptr;
 125  //	*ptr = new;
 126  //	return old;
 127  TEXT ·Xchg8(SB), NOSPLIT, $0-17
 128  	MOVQ	ptr+0(FP), BX
 129  	MOVB	new+8(FP), AX
 130  	XCHGB	AX, 0(BX)
 131  	MOVB	AX, ret+16(FP)
 132  	RET
 133  
 134  // uint32 Xchg(ptr *uint32, new uint32)
 135  // Atomically:
 136  //	old := *ptr;
 137  //	*ptr = new;
 138  //	return old;
 139  TEXT ·Xchg(SB), NOSPLIT, $0-20
 140  	MOVQ	ptr+0(FP), BX
 141  	MOVL	new+8(FP), AX
 142  	XCHGL	AX, 0(BX)
 143  	MOVL	AX, ret+16(FP)
 144  	RET
 145  
 146  // uint64 Xchg64(ptr *uint64, new uint64)
 147  // Atomically:
 148  //	old := *ptr;
 149  //	*ptr = new;
 150  //	return old;
 151  TEXT ·Xchg64(SB), NOSPLIT, $0-24
 152  	MOVQ	ptr+0(FP), BX
 153  	MOVQ	new+8(FP), AX
 154  	XCHGQ	AX, 0(BX)
 155  	MOVQ	AX, ret+16(FP)
 156  	RET
 157  
 158  TEXT ·Xchgint32(SB), NOSPLIT, $0-20
 159  	JMP	·Xchg(SB)
 160  
 161  TEXT ·Xchgint64(SB), NOSPLIT, $0-24
 162  	JMP	·Xchg64(SB)
 163  
 164  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
 165  	JMP	·Xchg64(SB)
 166  
 167  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
 168  	MOVQ	ptr+0(FP), BX
 169  	MOVQ	val+8(FP), AX
 170  	XCHGQ	AX, 0(BX)
 171  	RET
 172  
 173  TEXT ·Store(SB), NOSPLIT, $0-12
 174  	MOVQ	ptr+0(FP), BX
 175  	MOVL	val+8(FP), AX
 176  	XCHGL	AX, 0(BX)
 177  	RET
 178  
 179  TEXT ·Store8(SB), NOSPLIT, $0-9
 180  	MOVQ	ptr+0(FP), BX
 181  	MOVB	val+8(FP), AX
 182  	XCHGB	AX, 0(BX)
 183  	RET
 184  
 185  TEXT ·Store64(SB), NOSPLIT, $0-16
 186  	MOVQ	ptr+0(FP), BX
 187  	MOVQ	val+8(FP), AX
 188  	XCHGQ	AX, 0(BX)
 189  	RET
 190  
 191  TEXT ·Storeint32(SB), NOSPLIT, $0-12
 192  	JMP	·Store(SB)
 193  
 194  TEXT ·Storeint64(SB), NOSPLIT, $0-16
 195  	JMP	·Store64(SB)
 196  
 197  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
 198  	JMP	·Store64(SB)
 199  
 200  TEXT ·StoreRel(SB), NOSPLIT, $0-12
 201  	JMP	·Store(SB)
 202  
 203  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
 204  	JMP	·Store64(SB)
 205  
 206  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
 207  	JMP	·Store64(SB)
 208  
 209  // void	·Or8(byte volatile*, byte);
 210  TEXT ·Or8(SB), NOSPLIT, $0-9
 211  	MOVQ	ptr+0(FP), AX
 212  	MOVB	val+8(FP), BX
 213  	LOCK
 214  	ORB	BX, (AX)
 215  	RET
 216  
 217  // void	·And8(byte volatile*, byte);
 218  TEXT ·And8(SB), NOSPLIT, $0-9
 219  	MOVQ	ptr+0(FP), AX
 220  	MOVB	val+8(FP), BX
 221  	LOCK
 222  	ANDB	BX, (AX)
 223  	RET
 224  
 225  // func Or(addr *uint32, v uint32)
 226  TEXT ·Or(SB), NOSPLIT, $0-12
 227  	MOVQ	ptr+0(FP), AX
 228  	MOVL	val+8(FP), BX
 229  	LOCK
 230  	ORL	BX, (AX)
 231  	RET
 232  
 233  // func And(addr *uint32, v uint32)
 234  TEXT ·And(SB), NOSPLIT, $0-12
 235  	MOVQ	ptr+0(FP), AX
 236  	MOVL	val+8(FP), BX
 237  	LOCK
 238  	ANDL	BX, (AX)
 239  	RET
 240  
 241  // func Or32(addr *uint32, v uint32) old uint32
 242  TEXT ·Or32(SB), NOSPLIT, $0-20
 243  	MOVQ	ptr+0(FP), BX
 244  	MOVL	val+8(FP), CX
 245  casloop:
 246  	MOVL 	CX, DX
 247  	MOVL	(BX), AX
 248  	ORL	AX, DX
 249  	LOCK
 250  	CMPXCHGL	DX, (BX)
 251  	JNZ casloop
 252  	MOVL 	AX, ret+16(FP)
 253  	RET
 254  
 255  // func And32(addr *uint32, v uint32) old uint32
 256  TEXT ·And32(SB), NOSPLIT, $0-20
 257  	MOVQ	ptr+0(FP), BX
 258  	MOVL	val+8(FP), CX
 259  casloop:
 260  	MOVL 	CX, DX
 261  	MOVL	(BX), AX
 262  	ANDL	AX, DX
 263  	LOCK
 264  	CMPXCHGL	DX, (BX)
 265  	JNZ casloop
 266  	MOVL 	AX, ret+16(FP)
 267  	RET
 268  
 269  // func Or64(addr *uint64, v uint64) old uint64
 270  TEXT ·Or64(SB), NOSPLIT, $0-24
 271  	MOVQ	ptr+0(FP), BX
 272  	MOVQ	val+8(FP), CX
 273  casloop:
 274  	MOVQ 	CX, DX
 275  	MOVQ	(BX), AX
 276  	ORQ	AX, DX
 277  	LOCK
 278  	CMPXCHGQ	DX, (BX)
 279  	JNZ casloop
 280  	MOVQ 	AX, ret+16(FP)
 281  	RET
 282  
 283  // func And64(addr *uint64, v uint64) old uint64
 284  TEXT ·And64(SB), NOSPLIT, $0-24
 285  	MOVQ	ptr+0(FP), BX
 286  	MOVQ	val+8(FP), CX
 287  casloop:
 288  	MOVQ 	CX, DX
 289  	MOVQ	(BX), AX
 290  	ANDQ	AX, DX
 291  	LOCK
 292  	CMPXCHGQ	DX, (BX)
 293  	JNZ casloop
 294  	MOVQ 	AX, ret+16(FP)
 295  	RET
 296  
 297  // func Anduintptr(addr *uintptr, v uintptr) old uintptr
 298  TEXT ·Anduintptr(SB), NOSPLIT, $0-24
 299  	JMP	·And64(SB)
 300  
 301  // func Oruintptr(addr *uintptr, v uintptr) old uintptr
 302  TEXT ·Oruintptr(SB), NOSPLIT, $0-24
 303  	JMP	·Or64(SB)
 304