runtime_fast32_swiss.mx raw

   1  // Copyright 2024 The Go Authors. All rights reserved.
   2  // Use of this source code is governed by a BSD-style
   3  // license that can be found in the LICENSE file.
   4  
   5  //go:build goexperiment.swissmap
   6  
   7  package maps
   8  
   9  import (
  10  	"internal/abi"
  11  	"internal/race"
  12  	"internal/runtime/sys"
  13  	"unsafe"
  14  )
  15  
  16  //go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
  17  func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
  18  	if race.Enabled && m != nil {
  19  		callerpc := sys.GetCallerPC()
  20  		pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32)
  21  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
  22  	}
  23  
  24  	if m == nil || m.Used() == 0 {
  25  		return unsafe.Pointer(&zeroVal[0])
  26  	}
  27  
  28  	if m.writing != 0 {
  29  		fatal("concurrent map read and map write")
  30  		return nil
  31  	}
  32  
  33  	if m.dirLen == 0 {
  34  		g := groupReference{
  35  			data: m.dirPtr,
  36  		}
  37  		full := g.ctrls().matchFull()
  38  		slotKey := g.key(typ, 0)
  39  		slotSize := typ.SlotSize
  40  		for full != 0 {
  41  			if key == *(*uint32)(slotKey) && full.lowestSet() {
  42  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
  43  				return slotElem
  44  			}
  45  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
  46  			full = full.shiftOutLowest()
  47  		}
  48  		return unsafe.Pointer(&zeroVal[0])
  49  	}
  50  
  51  	k := key
  52  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
  53  
  54  	// Select table.
  55  	idx := m.directoryIndex(hash)
  56  	t := m.directoryAt(idx)
  57  
  58  	// Probe table.
  59  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
  60  	for ; ; seq = seq.next() {
  61  		g := t.groups.group(typ, seq.offset)
  62  
  63  		match := g.ctrls().matchH2(h2(hash))
  64  
  65  		for match != 0 {
  66  			i := match.first()
  67  
  68  			slotKey := g.key(typ, i)
  69  			if key == *(*uint32)(slotKey) {
  70  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
  71  				return slotElem
  72  			}
  73  			match = match.removeFirst()
  74  		}
  75  
  76  		match = g.ctrls().matchEmpty()
  77  		if match != 0 {
  78  			// Finding an empty slot means we've reached the end of
  79  			// the probe sequence.
  80  			return unsafe.Pointer(&zeroVal[0])
  81  		}
  82  	}
  83  }
  84  
  85  //go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
  86  func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) {
  87  	if race.Enabled && m != nil {
  88  		callerpc := sys.GetCallerPC()
  89  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32)
  90  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
  91  	}
  92  
  93  	if m == nil || m.Used() == 0 {
  94  		return unsafe.Pointer(&zeroVal[0]), false
  95  	}
  96  
  97  	if m.writing != 0 {
  98  		fatal("concurrent map read and map write")
  99  		return nil, false
 100  	}
 101  
 102  	if m.dirLen == 0 {
 103  		g := groupReference{
 104  			data: m.dirPtr,
 105  		}
 106  		full := g.ctrls().matchFull()
 107  		slotKey := g.key(typ, 0)
 108  		slotSize := typ.SlotSize
 109  		for full != 0 {
 110  			if key == *(*uint32)(slotKey) && full.lowestSet() {
 111  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
 112  				return slotElem, true
 113  			}
 114  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
 115  			full = full.shiftOutLowest()
 116  		}
 117  		return unsafe.Pointer(&zeroVal[0]), false
 118  	}
 119  
 120  	k := key
 121  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
 122  
 123  	// Select table.
 124  	idx := m.directoryIndex(hash)
 125  	t := m.directoryAt(idx)
 126  
 127  	// Probe table.
 128  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
 129  	for ; ; seq = seq.next() {
 130  		g := t.groups.group(typ, seq.offset)
 131  
 132  		match := g.ctrls().matchH2(h2(hash))
 133  
 134  		for match != 0 {
 135  			i := match.first()
 136  
 137  			slotKey := g.key(typ, i)
 138  			if key == *(*uint32)(slotKey) {
 139  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
 140  				return slotElem, true
 141  			}
 142  			match = match.removeFirst()
 143  		}
 144  
 145  		match = g.ctrls().matchEmpty()
 146  		if match != 0 {
 147  			// Finding an empty slot means we've reached the end of
 148  			// the probe sequence.
 149  			return unsafe.Pointer(&zeroVal[0]), false
 150  		}
 151  	}
 152  }
 153  
 154  func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer {
 155  	g := groupReference{
 156  		data: m.dirPtr,
 157  	}
 158  
 159  	match := g.ctrls().matchH2(h2(hash))
 160  
 161  	// Look for an existing slot containing this key.
 162  	for match != 0 {
 163  		i := match.first()
 164  
 165  		slotKey := g.key(typ, i)
 166  		if key == *(*uint32)(slotKey) {
 167  			slotElem := g.elem(typ, i)
 168  			return slotElem
 169  		}
 170  		match = match.removeFirst()
 171  	}
 172  
 173  	// There can't be deleted slots, small maps can't have them
 174  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
 175  	// more efficient than matchEmpty.
 176  	match = g.ctrls().matchEmptyOrDeleted()
 177  	if match == 0 {
 178  		fatal("small map with no empty slot (concurrent map writes?)")
 179  	}
 180  
 181  	i := match.first()
 182  
 183  	slotKey := g.key(typ, i)
 184  	*(*uint32)(slotKey) = key
 185  
 186  	slotElem := g.elem(typ, i)
 187  
 188  	g.ctrls().set(i, ctrl(h2(hash)))
 189  	m.used++
 190  
 191  	return slotElem
 192  }
 193  
 194  //go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
 195  func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
 196  	if m == nil {
 197  		panic(errNilAssign)
 198  	}
 199  	if race.Enabled {
 200  		callerpc := sys.GetCallerPC()
 201  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast32)
 202  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
 203  	}
 204  	if m.writing != 0 {
 205  		fatal("concurrent map writes")
 206  	}
 207  
 208  	k := key
 209  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
 210  
 211  	// Set writing after calling Hasher, since Hasher may panic, in which
 212  	// case we have not actually done a write.
 213  	m.writing ^= 1 // toggle, see comment on writing
 214  
 215  	if m.dirPtr == nil {
 216  		m.growToSmall(typ)
 217  	}
 218  
 219  	if m.dirLen == 0 {
 220  		if m.used < abi.SwissMapGroupSlots {
 221  			elem := m.putSlotSmallFast32(typ, hash, key)
 222  
 223  			if m.writing == 0 {
 224  				fatal("concurrent map writes")
 225  			}
 226  			m.writing ^= 1
 227  
 228  			return elem
 229  		}
 230  
 231  		// Can't fit another entry, grow to full size map.
 232  		m.growToTable(typ)
 233  	}
 234  
 235  	var slotElem unsafe.Pointer
 236  outer:
 237  	for {
 238  		// Select table.
 239  		idx := m.directoryIndex(hash)
 240  		t := m.directoryAt(idx)
 241  
 242  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
 243  
 244  		// As we look for a match, keep track of the first deleted slot
 245  		// we find, which we'll use to insert the new entry if
 246  		// necessary.
 247  		var firstDeletedGroup groupReference
 248  		var firstDeletedSlot uintptr
 249  
 250  		for ; ; seq = seq.next() {
 251  			g := t.groups.group(typ, seq.offset)
 252  			match := g.ctrls().matchH2(h2(hash))
 253  
 254  			// Look for an existing slot containing this key.
 255  			for match != 0 {
 256  				i := match.first()
 257  
 258  				slotKey := g.key(typ, i)
 259  				if key == *(*uint32)(slotKey) {
 260  					slotElem = g.elem(typ, i)
 261  
 262  					t.checkInvariants(typ, m)
 263  					break outer
 264  				}
 265  				match = match.removeFirst()
 266  			}
 267  
 268  			// No existing slot for this key in this group. Is this the end
 269  			// of the probe sequence?
 270  			match = g.ctrls().matchEmptyOrDeleted()
 271  			if match == 0 {
 272  				continue // nothing but filled slots. Keep probing.
 273  			}
 274  			i := match.first()
 275  			if g.ctrls().get(i) == ctrlDeleted {
 276  				// There are some deleted slots. Remember
 277  				// the first one, and keep probing.
 278  				if firstDeletedGroup.data == nil {
 279  					firstDeletedGroup = g
 280  					firstDeletedSlot = i
 281  				}
 282  				continue
 283  			}
 284  			// We've found an empty slot, which means we've reached the end of
 285  			// the probe sequence.
 286  
 287  			// If we found a deleted slot along the way, we can
 288  			// replace it without consuming growthLeft.
 289  			if firstDeletedGroup.data != nil {
 290  				g = firstDeletedGroup
 291  				i = firstDeletedSlot
 292  				t.growthLeft++ // will be decremented below to become a no-op.
 293  			}
 294  
 295  			// If we have no space left, first try to remove some tombstones.
 296  			if t.growthLeft == 0 {
 297  				t.pruneTombstones(typ, m)
 298  			}
 299  
 300  			// If there is room left to grow, just insert the new entry.
 301  			if t.growthLeft > 0 {
 302  				slotKey := g.key(typ, i)
 303  				*(*uint32)(slotKey) = key
 304  
 305  				slotElem = g.elem(typ, i)
 306  
 307  				g.ctrls().set(i, ctrl(h2(hash)))
 308  				t.growthLeft--
 309  				t.used++
 310  				m.used++
 311  
 312  				t.checkInvariants(typ, m)
 313  				break outer
 314  			}
 315  
 316  			t.rehash(typ, m)
 317  			continue outer
 318  		}
 319  	}
 320  
 321  	if m.writing == 0 {
 322  		fatal("concurrent map writes")
 323  	}
 324  	m.writing ^= 1
 325  
 326  	return slotElem
 327  }
 328  
 329  // Key is a 32-bit pointer (only called on 32-bit GOARCH). This source is identical to fast64ptr.
 330  //
 331  // TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
 332  //
 333  //go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
 334  func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
 335  	if m == nil {
 336  		panic(errNilAssign)
 337  	}
 338  	if race.Enabled {
 339  		callerpc := sys.GetCallerPC()
 340  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast32ptr)
 341  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
 342  	}
 343  	if m.writing != 0 {
 344  		fatal("concurrent map writes")
 345  	}
 346  
 347  	k := key
 348  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
 349  
 350  	// Set writing after calling Hasher, since Hasher may panic, in which
 351  	// case we have not actually done a write.
 352  	m.writing ^= 1 // toggle, see comment on writing
 353  
 354  	if m.dirPtr == nil {
 355  		m.growToSmall(typ)
 356  	}
 357  
 358  	if m.dirLen == 0 {
 359  		if m.used < abi.SwissMapGroupSlots {
 360  			elem := m.putSlotSmallFastPtr(typ, hash, key)
 361  
 362  			if m.writing == 0 {
 363  				fatal("concurrent map writes")
 364  			}
 365  			m.writing ^= 1
 366  
 367  			return elem
 368  		}
 369  
 370  		// Can't fit another entry, grow to full size map.
 371  		m.growToTable(typ)
 372  	}
 373  
 374  	var slotElem unsafe.Pointer
 375  outer:
 376  	for {
 377  		// Select table.
 378  		idx := m.directoryIndex(hash)
 379  		t := m.directoryAt(idx)
 380  
 381  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
 382  
 383  		// As we look for a match, keep track of the first deleted slot we
 384  		// find, which we'll use to insert the new entry if necessary.
 385  		var firstDeletedGroup groupReference
 386  		var firstDeletedSlot uintptr
 387  
 388  		for ; ; seq = seq.next() {
 389  			g := t.groups.group(typ, seq.offset)
 390  			match := g.ctrls().matchH2(h2(hash))
 391  
 392  			// Look for an existing slot containing this key.
 393  			for match != 0 {
 394  				i := match.first()
 395  
 396  				slotKey := g.key(typ, i)
 397  				if key == *(*unsafe.Pointer)(slotKey) {
 398  					slotElem = g.elem(typ, i)
 399  
 400  					t.checkInvariants(typ, m)
 401  					break outer
 402  				}
 403  				match = match.removeFirst()
 404  			}
 405  
 406  			// No existing slot for this key in this group. Is this the end
 407  			// of the probe sequence?
 408  			match = g.ctrls().matchEmptyOrDeleted()
 409  			if match == 0 {
 410  				continue // nothing but filled slots. Keep probing.
 411  			}
 412  			i := match.first()
 413  			if g.ctrls().get(i) == ctrlDeleted {
 414  				// There are some deleted slots. Remember
 415  				// the first one, and keep probing.
 416  				if firstDeletedGroup.data == nil {
 417  					firstDeletedGroup = g
 418  					firstDeletedSlot = i
 419  				}
 420  				continue
 421  			}
 422  			// We've found an empty slot, which means we've reached the end of
 423  			// the probe sequence.
 424  
 425  			// If we found a deleted slot along the way, we can
 426  			// replace it without consuming growthLeft.
 427  			if firstDeletedGroup.data != nil {
 428  				g = firstDeletedGroup
 429  				i = firstDeletedSlot
 430  				t.growthLeft++ // will be decremented below to become a no-op.
 431  			}
 432  
 433  			// If there is room left to grow, just insert the new entry.
 434  			if t.growthLeft > 0 {
 435  				slotKey := g.key(typ, i)
 436  				*(*unsafe.Pointer)(slotKey) = key
 437  
 438  				slotElem = g.elem(typ, i)
 439  
 440  				g.ctrls().set(i, ctrl(h2(hash)))
 441  				t.growthLeft--
 442  				t.used++
 443  				m.used++
 444  
 445  				t.checkInvariants(typ, m)
 446  				break outer
 447  			}
 448  
 449  			t.rehash(typ, m)
 450  			continue outer
 451  		}
 452  	}
 453  
 454  	if m.writing == 0 {
 455  		fatal("concurrent map writes")
 456  	}
 457  	m.writing ^= 1
 458  
 459  	return slotElem
 460  }
 461  
 462  //go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
 463  func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) {
 464  	if race.Enabled {
 465  		callerpc := sys.GetCallerPC()
 466  		pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32)
 467  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
 468  	}
 469  
 470  	if m == nil || m.Used() == 0 {
 471  		return
 472  	}
 473  
 474  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
 475  }
 476