1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
use std
use sys
use "atomic"
use "common"
pkg thread =
type mutex = struct
_state : uint32
;;
const mkmtx : (-> mutex)
const mtxlock : (mtx : mutex# -> void)
const mtxtrylock : (mtx : mutex# -> bool)
const mtxunlock : (mtx : mutex# -> void)
pkglocal const Unlocked = 0
pkglocal const Locked = 1
pkglocal const Contended = 2
;;
var nspin = 10 /* FIXME: pick a sane number, based on CPU count */
const mkmtx = {
-> [._state = Unlocked]
}
const mtxlock = {mtx
var c
/*
Uncontended case: we get an unlocked mutex, and we lock it.
*/
c = Locked
for var i = 0; i < nspin; i++
c = xcas(&mtx._state, Unlocked, Locked)
if c == Unlocked
-> void
;;
sys.sched_yield()
;;
/*
Contended case: we set the lock state to Contended. This indicates that there
the lock is locked, and we potentially have threads waiting on it, which means
that we will need to wake them up.
*/
if c == Locked
c = xchg(&mtx._state, Contended)
;;
while c != Unlocked
sys.umtx_op( \
(&mtx._state : void#), \
sys.Umtxwaituintpriv, \
(Contended : uint64), \
Zptr, Zptr)
c = xchg(&mtx._state, Contended)
;;
}
const mtxtrylock = {mtx
-> xcas(&mtx._state, Unlocked, Locked) == Unlocked
}
const mtxunlock = {mtx
/*
Uncontended case: If the mutex state is not contended, and we still
are uncontended by the xchg() call, then it's safe to simply return;
nobody was waiting for us.
*/
if mtx._state == Contended
mtx._state = Unlocked
elif xchg(&mtx._state, Unlocked) == Locked
-> void
;;
/* wake all threads: for some reason nwake */
sys.umtx_op((&mtx._state : void#), sys.Umtxwakepriv, 1, Zptr, Zptr)
}
|