summaryrefslogtreecommitdiff
path: root/lib/thread/atomic-impl+x64.s
blob: 3f28277518c437cecbecd203a720f183280dc759 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# get variants
.globl thread$xget8
.globl _thread$xget8
thread$xget8:
_thread$xget8:
	movb	(%rdi), %al
	ret
.globl thread$xget32
.globl _thread$xget32
thread$xget32:
_thread$xget32:
	movl	(%rdi), %eax
	ret
.globl thread$xget64
.globl thread$xgetp
.globl _thread$xget64
.globl _thread$xgetp
thread$xget64:
thread$xgetp:
_thread$xget64:
_thread$xgetp:
	movq	(%rdi), %rax
	ret

# set variants
.globl thread$xset8
.globl _thread$xset8
thread$xset8:
_thread$xset8:
	movl	%esi, (%rdi)
	ret
.globl thread$xset32
.globl _thread$xset32
thread$xset32:
_thread$xset32:
	movl	%esi, (%rdi)
	ret
.globl thread$xset64
.globl thread$xsetp
.globl _thread$xset64
.globl _thread$xsetp
thread$xset64:
thread$xsetp:
_thread$xset64:
_thread$xsetp:
	movq	%rsi, (%rdi)
	ret

# add variants
.globl thread$xadd8
.globl _thread$xadd8
thread$xadd8:
_thread$xadd8:
	lock xaddb	%sil, (%rdi)
	movb %sil,%al
	ret
.globl thread$xadd32
.globl _thread$xadd32
thread$xadd32:
_thread$xadd32:
	lock xaddl	%esi, (%rdi)
	movl %esi,%eax
	ret
.globl thread$xadd64
.globl thread$xaddp
.globl _thread$xadd64
.globl _thread$xaddp
thread$xadd64:
thread$xaddp:
_thread$xadd64:
_thread$xaddp:
	lock xaddq	%rsi, (%rdi)
	movq %rsi,%rax
	ret

# cas variants 
.globl thread$xcas8
.globl _thread$xcas8
thread$xcas8:
_thread$xcas8:
	movb	%sil, %al
	lock cmpxchgb	%dl, (%rdi)
	ret
.globl thread$xcas32
.globl _thread$xcas32
thread$xcas32:
_thread$xcas32:
	movl	%esi, %eax
	lock cmpxchgl	%edx, (%rdi)
	ret
.globl thread$xcas64
.globl thread$xcasp
.globl _thread$xcas64
.globl _thread$xcasp
thread$xcas64:
thread$xcasp:
_thread$xcas64:
_thread$xcasp:
	movq		%rsi, %rax
	lock cmpxchgq	%rdx, (%rdi)
	ret

# xchg variants
.globl thread$xchg8
.globl _thread$xchg8
thread$xchg8:
_thread$xchg8:
	movb		%sil, %al
	lock xchgb	(%rdi), %al
	ret
.globl thread$xchg32
.globl _thread$xchg32
thread$xchg32:
_thread$xchg32:
	movl	%esi, %eax
	lock xchgl	(%rdi), %eax
	ret
.globl thread$xchg64
.globl thread$xchgp
.globl _thread$xchg64
.globl _thread$xchgp
thread$xchg64:
thread$xchgp:
_thread$xchg64:
_thread$xchgp:
	movq	%rsi, %rax
	lock xchgq	(%rdi), %rax
	ret