package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
const (
mutex_unlocked = 0
mutex_locked = 1
mutex_sleeping = 2
active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)
func key32(p *uintptr ) *uint32 {
return (*uint32 )(unsafe .Pointer (p ))
}
func mutexContended(l *mutex ) bool {
return atomic .Load (key32 (&l .key )) > mutex_locked
}
func lock(l *mutex ) {
lockWithRank (l , getLockRank (l ))
}
func lock2(l *mutex ) {
gp := getg ()
if gp .m .locks < 0 {
throw ("runtime·lock: lock count" )
}
gp .m .locks ++
v := atomic .Xchg (key32 (&l .key ), mutex_locked )
if v == mutex_unlocked {
return
}
wait := v
timer := &lockTimer {lock : l }
timer .begin ()
spin := 0
if ncpu > 1 {
spin = active_spin
}
for {
for i := 0 ; i < spin ; i ++ {
for l .key == mutex_unlocked {
if atomic .Cas (key32 (&l .key ), mutex_unlocked , wait ) {
timer .end ()
return
}
}
procyield (active_spin_cnt )
}
for i := 0 ; i < passive_spin ; i ++ {
for l .key == mutex_unlocked {
if atomic .Cas (key32 (&l .key ), mutex_unlocked , wait ) {
timer .end ()
return
}
}
osyield ()
}
v = atomic .Xchg (key32 (&l .key ), mutex_sleeping )
if v == mutex_unlocked {
timer .end ()
return
}
wait = mutex_sleeping
futexsleep (key32 (&l .key ), mutex_sleeping , -1 )
}
}
func unlock(l *mutex ) {
unlockWithRank (l )
}
func unlock2(l *mutex ) {
v := atomic .Xchg (key32 (&l .key ), mutex_unlocked )
if v == mutex_unlocked {
throw ("unlock of unlocked lock" )
}
if v == mutex_sleeping {
futexwakeup (key32 (&l .key ), 1 )
}
gp := getg ()
gp .m .mLockProfile .recordUnlock (l )
gp .m .locks --
if gp .m .locks < 0 {
throw ("runtime·unlock: lock count" )
}
if gp .m .locks == 0 && gp .preempt {
gp .stackguard0 = stackPreempt
}
}
func noteclear(n *note ) {
n .key = 0
}
func notewakeup(n *note ) {
old := atomic .Xchg (key32 (&n .key ), 1 )
if old != 0 {
print ("notewakeup - double wakeup (" , old , ")\n" )
throw ("notewakeup - double wakeup" )
}
futexwakeup (key32 (&n .key ), 1 )
}
func notesleep(n *note ) {
gp := getg ()
if gp != gp .m .g0 {
throw ("notesleep not on g0" )
}
ns := int64 (-1 )
if *cgo_yield != nil {
ns = 10e6
}
for atomic .Load (key32 (&n .key )) == 0 {
gp .m .blocked = true
futexsleep (key32 (&n .key ), 0 , ns )
if *cgo_yield != nil {
asmcgocall (*cgo_yield , nil )
}
gp .m .blocked = false
}
}
func notetsleep_internal(n *note , ns int64 ) bool {
gp := getg ()
if ns < 0 {
if *cgo_yield != nil {
ns = 10e6
}
for atomic .Load (key32 (&n .key )) == 0 {
gp .m .blocked = true
futexsleep (key32 (&n .key ), 0 , ns )
if *cgo_yield != nil {
asmcgocall (*cgo_yield , nil )
}
gp .m .blocked = false
}
return true
}
if atomic .Load (key32 (&n .key )) != 0 {
return true
}
deadline := nanotime () + ns
for {
if *cgo_yield != nil && ns > 10e6 {
ns = 10e6
}
gp .m .blocked = true
futexsleep (key32 (&n .key ), 0 , ns )
if *cgo_yield != nil {
asmcgocall (*cgo_yield , nil )
}
gp .m .blocked = false
if atomic .Load (key32 (&n .key )) != 0 {
break
}
now := nanotime ()
if now >= deadline {
break
}
ns = deadline - now
}
return atomic .Load (key32 (&n .key )) != 0
}
func notetsleep(n *note , ns int64 ) bool {
gp := getg ()
if gp != gp .m .g0 && gp .m .preemptoff != "" {
throw ("notetsleep not on g0" )
}
return notetsleep_internal (n , ns )
}
func notetsleepg(n *note , ns int64 ) bool {
gp := getg ()
if gp == gp .m .g0 {
throw ("notetsleepg on g0" )
}
entersyscallblock ()
ok := notetsleep_internal (n , ns )
exitsyscall ()
return ok
}
func beforeIdle(int64 , int64 ) (*g , bool ) {
return nil , false
}
func checkTimeouts() {}
The pages are generated with Golds v0.6.9-preview . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds .