package maps
import (
"internal/abi"
"internal/race"
"internal/runtime/sys"
"unsafe"
)
func runtime_mapaccess1_fast64(typ *abi .SwissMapType , m *Map , key uint64 ) unsafe .Pointer {
if race .Enabled && m != nil {
callerpc := sys .GetCallerPC ()
pc := abi .FuncPCABIInternal (runtime_mapaccess1 )
race .ReadPC (unsafe .Pointer (m ), callerpc , pc )
}
if m == nil || m .Used () == 0 {
return unsafe .Pointer (&zeroVal [0 ])
}
if m .writing != 0 {
fatal ("concurrent map read and map write" )
return nil
}
if m .dirLen == 0 {
g := groupReference {
data : m .dirPtr ,
}
full := g .ctrls ().matchFull ()
slotKey := g .key (typ , 0 )
slotSize := typ .SlotSize
for full != 0 {
if key == *(*uint64 )(slotKey ) && full .lowestSet () {
slotElem := unsafe .Pointer (uintptr (slotKey ) + 8 )
return slotElem
}
slotKey = unsafe .Pointer (uintptr (slotKey ) + slotSize )
full = full .shiftOutLowest ()
}
return unsafe .Pointer (&zeroVal [0 ])
}
k := key
hash := typ .Hasher (abi .NoEscape (unsafe .Pointer (&k )), m .seed )
idx := m .directoryIndex (hash )
t := m .directoryAt (idx )
seq := makeProbeSeq (h1 (hash ), t .groups .lengthMask )
for ; ; seq = seq .next () {
g := t .groups .group (typ , seq .offset )
match := g .ctrls ().matchH2 (h2 (hash ))
for match != 0 {
i := match .first ()
slotKey := g .key (typ , i )
if key == *(*uint64 )(slotKey ) {
slotElem := unsafe .Pointer (uintptr (slotKey ) + 8 )
return slotElem
}
match = match .removeFirst ()
}
match = g .ctrls ().matchEmpty ()
if match != 0 {
return unsafe .Pointer (&zeroVal [0 ])
}
}
}
func runtime_mapaccess2_fast64(typ *abi .SwissMapType , m *Map , key uint64 ) (unsafe .Pointer , bool ) {
if race .Enabled && m != nil {
callerpc := sys .GetCallerPC ()
pc := abi .FuncPCABIInternal (runtime_mapaccess1 )
race .ReadPC (unsafe .Pointer (m ), callerpc , pc )
}
if m == nil || m .Used () == 0 {
return unsafe .Pointer (&zeroVal [0 ]), false
}
if m .writing != 0 {
fatal ("concurrent map read and map write" )
return nil , false
}
if m .dirLen == 0 {
g := groupReference {
data : m .dirPtr ,
}
full := g .ctrls ().matchFull ()
slotKey := g .key (typ , 0 )
slotSize := typ .SlotSize
for full != 0 {
if key == *(*uint64 )(slotKey ) && full .lowestSet () {
slotElem := unsafe .Pointer (uintptr (slotKey ) + 8 )
return slotElem , true
}
slotKey = unsafe .Pointer (uintptr (slotKey ) + slotSize )
full = full .shiftOutLowest ()
}
return unsafe .Pointer (&zeroVal [0 ]), false
}
k := key
hash := typ .Hasher (abi .NoEscape (unsafe .Pointer (&k )), m .seed )
idx := m .directoryIndex (hash )
t := m .directoryAt (idx )
seq := makeProbeSeq (h1 (hash ), t .groups .lengthMask )
for ; ; seq = seq .next () {
g := t .groups .group (typ , seq .offset )
match := g .ctrls ().matchH2 (h2 (hash ))
for match != 0 {
i := match .first ()
slotKey := g .key (typ , i )
if key == *(*uint64 )(slotKey ) {
slotElem := unsafe .Pointer (uintptr (slotKey ) + 8 )
return slotElem , true
}
match = match .removeFirst ()
}
match = g .ctrls ().matchEmpty ()
if match != 0 {
return unsafe .Pointer (&zeroVal [0 ]), false
}
}
}
func (m *Map ) putSlotSmallFast64 (typ *abi .SwissMapType , hash uintptr , key uint64 ) unsafe .Pointer {
g := groupReference {
data : m .dirPtr ,
}
match := g .ctrls ().matchH2 (h2 (hash ))
for match != 0 {
i := match .first ()
slotKey := g .key (typ , i )
if key == *(*uint64 )(slotKey ) {
slotElem := g .elem (typ , i )
return slotElem
}
match = match .removeFirst ()
}
match = g .ctrls ().matchEmptyOrDeleted ()
if match == 0 {
fatal ("small map with no empty slot (concurrent map writes?)" )
}
i := match .first ()
slotKey := g .key (typ , i )
*(*uint64 )(slotKey ) = key
slotElem := g .elem (typ , i )
g .ctrls ().set (i , ctrl (h2 (hash )))
m .used ++
return slotElem
}
func runtime_mapassign_fast64(typ *abi .SwissMapType , m *Map , key uint64 ) unsafe .Pointer {
if m == nil {
panic (errNilAssign )
}
if race .Enabled {
callerpc := sys .GetCallerPC ()
pc := abi .FuncPCABIInternal (runtime_mapassign )
race .WritePC (unsafe .Pointer (m ), callerpc , pc )
}
if m .writing != 0 {
fatal ("concurrent map writes" )
}
k := key
hash := typ .Hasher (abi .NoEscape (unsafe .Pointer (&k )), m .seed )
m .writing ^= 1
if m .dirPtr == nil {
m .growToSmall (typ )
}
if m .dirLen == 0 {
if m .used < abi .SwissMapGroupSlots {
elem := m .putSlotSmallFast64 (typ , hash , key )
if m .writing == 0 {
fatal ("concurrent map writes" )
}
m .writing ^= 1
return elem
}
m .growToTable (typ )
}
var slotElem unsafe .Pointer
outer :
for {
idx := m .directoryIndex (hash )
t := m .directoryAt (idx )
seq := makeProbeSeq (h1 (hash ), t .groups .lengthMask )
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
for ; ; seq = seq .next () {
g := t .groups .group (typ , seq .offset )
match := g .ctrls ().matchH2 (h2 (hash ))
for match != 0 {
i := match .first ()
slotKey := g .key (typ , i )
if key == *(*uint64 )(slotKey ) {
slotElem = g .elem (typ , i )
t .checkInvariants (typ , m )
break outer
}
match = match .removeFirst ()
}
match = g .ctrls ().matchEmptyOrDeleted ()
if match == 0 {
continue
}
i := match .first ()
if g .ctrls ().get (i ) == ctrlDeleted {
if firstDeletedGroup .data == nil {
firstDeletedGroup = g
firstDeletedSlot = i
}
continue
}
if firstDeletedGroup .data != nil {
g = firstDeletedGroup
i = firstDeletedSlot
t .growthLeft ++
}
if t .growthLeft > 0 {
slotKey := g .key (typ , i )
*(*uint64 )(slotKey ) = key
slotElem = g .elem (typ , i )
g .ctrls ().set (i , ctrl (h2 (hash )))
t .growthLeft --
t .used ++
m .used ++
t .checkInvariants (typ , m )
break outer
}
t .rehash (typ , m )
continue outer
}
}
if m .writing == 0 {
fatal ("concurrent map writes" )
}
m .writing ^= 1
return slotElem
}
func (m *Map ) putSlotSmallFastPtr (typ *abi .SwissMapType , hash uintptr , key unsafe .Pointer ) unsafe .Pointer {
g := groupReference {
data : m .dirPtr ,
}
match := g .ctrls ().matchH2 (h2 (hash ))
for match != 0 {
i := match .first ()
slotKey := g .key (typ , i )
if key == *(*unsafe .Pointer )(slotKey ) {
slotElem := g .elem (typ , i )
return slotElem
}
match = match .removeFirst ()
}
match = g .ctrls ().matchEmptyOrDeleted ()
if match == 0 {
fatal ("small map with no empty slot (concurrent map writes?)" )
}
i := match .first ()
slotKey := g .key (typ , i )
*(*unsafe .Pointer )(slotKey ) = key
slotElem := g .elem (typ , i )
g .ctrls ().set (i , ctrl (h2 (hash )))
m .used ++
return slotElem
}
func runtime_mapassign_fast64ptr(typ *abi .SwissMapType , m *Map , key unsafe .Pointer ) unsafe .Pointer {
if m == nil {
panic (errNilAssign )
}
if race .Enabled {
callerpc := sys .GetCallerPC ()
pc := abi .FuncPCABIInternal (runtime_mapassign )
race .WritePC (unsafe .Pointer (m ), callerpc , pc )
}
if m .writing != 0 {
fatal ("concurrent map writes" )
}
k := key
hash := typ .Hasher (abi .NoEscape (unsafe .Pointer (&k )), m .seed )
m .writing ^= 1
if m .dirPtr == nil {
m .growToSmall (typ )
}
if m .dirLen == 0 {
if m .used < abi .SwissMapGroupSlots {
elem := m .putSlotSmallFastPtr (typ , hash , key )
if m .writing == 0 {
fatal ("concurrent map writes" )
}
m .writing ^= 1
return elem
}
m .growToTable (typ )
}
var slotElem unsafe .Pointer
outer :
for {
idx := m .directoryIndex (hash )
t := m .directoryAt (idx )
seq := makeProbeSeq (h1 (hash ), t .groups .lengthMask )
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
for ; ; seq = seq .next () {
g := t .groups .group (typ , seq .offset )
match := g .ctrls ().matchH2 (h2 (hash ))
for match != 0 {
i := match .first ()
slotKey := g .key (typ , i )
if key == *(*unsafe .Pointer )(slotKey ) {
slotElem = g .elem (typ , i )
t .checkInvariants (typ , m )
break outer
}
match = match .removeFirst ()
}
match = g .ctrls ().matchEmptyOrDeleted ()
if match == 0 {
continue
}
i := match .first ()
if g .ctrls ().get (i ) == ctrlDeleted {
if firstDeletedGroup .data == nil {
firstDeletedGroup = g
firstDeletedSlot = i
}
continue
}
if firstDeletedGroup .data != nil {
g = firstDeletedGroup
i = firstDeletedSlot
t .growthLeft ++
}
if t .growthLeft > 0 {
slotKey := g .key (typ , i )
*(*unsafe .Pointer )(slotKey ) = key
slotElem = g .elem (typ , i )
g .ctrls ().set (i , ctrl (h2 (hash )))
t .growthLeft --
t .used ++
m .used ++
t .checkInvariants (typ , m )
break outer
}
t .rehash (typ , m )
continue outer
}
}
if m .writing == 0 {
fatal ("concurrent map writes" )
}
m .writing ^= 1
return slotElem
}
func runtime_mapdelete_fast64(typ *abi .SwissMapType , m *Map , key uint64 ) {
if race .Enabled {
callerpc := sys .GetCallerPC ()
pc := abi .FuncPCABIInternal (runtime_mapassign )
race .WritePC (unsafe .Pointer (m ), callerpc , pc )
}
if m == nil || m .Used () == 0 {
return
}
m .Delete (typ , abi .NoEscape (unsafe .Pointer (&key )))
}
The pages are generated with Golds v0.7.3-preview . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds .