summaryrefslogtreecommitdiff
path: root/lib/thread/mutex+futex.myr
blob: a6453fa9730322ab176542cbceb41a92c9b44034 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
use "atomic"
use "common"
use "futex"

pkg thread =
	type mutex = struct
		_state	: ftxtag
	;;	

	const mkmtx	: (-> mutex)
	const mtxlock	: (mtx : mutex# -> void)
	const mtxtrylock	: (mtx : mutex# -> bool)
	const mtxunlock	: (mtx : mutex# -> void)

	pkglocal const mtxcontended	: (mtx : mutex# -> void)
;;

const Unlocked = 0
const Locked = 1
const Contended = 2

var nspin = 10	/* FIXME: pick a sane number, based on CPU count */

const mkmtx = {
	-> [._state = Unlocked]
}

const mtxlock = {mtx
	var c

	/* 
	Uncontended case: we get an unlocked mutex, and we lock it.
	*/
        c = Locked
	for var i = 0; i < nspin; i++
		c = xcas(&mtx._state, Unlocked, Locked) 
		if c == Unlocked
			-> void
		;;
	;;

	/*
	Contended case: we set the lock state to Contended. This indicates that
	the lock is locked, and we potentially have threads waiting on it,
	which means that we will need to wake them up.
	*/
	if c == Locked
		c = xchg(&mtx._state, Contended)
	;;

	while c != Unlocked
		ftxwait(&mtx._state, Contended, Zptr)
		c = xchg(&mtx._state, Contended)
	;;
}

const mtxtrylock = {mtx
	-> xcas(&mtx._state, Unlocked, Locked) == Unlocked
}

const mtxunlock = {mtx
	/*
	Either the lock is contended or it's uncontended. Any other
	state is a bug.

	Uncontended case: If the mutex state is not contended, and we still
	are uncontended by the xchg() call, then it's safe to simply return;
	nobody was waiting for us.
	*/
	if xchg(&mtx._state, Unlocked) == Contended
		ftxwake(&mtx._state)
	;;
}

const mtxcontended = {mtx
	while xchg(&mtx._state, Contended) != Unlocked
		ftxwait(&mtx._state, Contended, Zptr)
	;;
}