This source file includes following definitions.
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_destroy
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_init
- do_lock
- do_unlock
- memory_barrier
- atomic_compare_and_swap
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_init
- do_lock
- do_unlock
- asyncsafe_spin_destroy
- asyncsafe_spin_lock
- asyncsafe_spin_unlock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <config.h>
20
21
22 #include "asyncsafe-spin.h"
23
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #if defined _AIX
27 # include <sys/atomic_op.h>
28 #endif
29
30 #if 0x590 <= __SUNPRO_C && __STDC__
31 # define asm __asm
32 #endif
33
34 #if defined _WIN32 && ! defined __CYGWIN__
35
36
37 void
38 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
39 {
40 glwthread_spin_init (lock);
41 }
42
43 static inline void
44 do_lock (asyncsafe_spinlock_t *lock)
45 {
46 glwthread_spin_lock (lock);
47 }
48
49 static inline void
50 do_unlock (asyncsafe_spinlock_t *lock)
51 {
52 if (glwthread_spin_unlock (lock))
53 abort ();
54 }
55
56 void
57 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
58 {
59 glwthread_spin_destroy (lock);
60 }
61
62 #else
63
64 # if HAVE_PTHREAD_H
65
66
67
68
69
70
71
72
73
74 # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) \
75 || __clang_major > 3 || (__clang_major__ == 3 && __clang_minor__ >= 1)) \
76 && !defined __ibmxl__
77
78
79
80
81
82
83 # if 1
84
85
86 void
87 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
88 {
89 __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
90 }
91
92 static inline void
93 do_lock (asyncsafe_spinlock_t *lock)
94 {
95
96 asyncsafe_spinlock_t zero;
97 while (!(zero = 0,
98 __atomic_compare_exchange_n (lock, &zero, 1, false,
99 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
100 ;
101 }
102
103 static inline void
104 do_unlock (asyncsafe_spinlock_t *lock)
105 {
106
107 asyncsafe_spinlock_t one = 1;
108 if (!__atomic_compare_exchange_n (lock, &one, 0, false,
109 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
110 abort ();
111 }
112
113 # else
114
115
116
117 void
118 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
119 {
120 __atomic_clear (lock, __ATOMIC_SEQ_CST);
121 }
122
123 static inline void
124 do_lock (asyncsafe_spinlock_t *lock)
125 {
126 while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
127 ;
128 }
129
130 static inline void
131 do_unlock (asyncsafe_spinlock_t *lock)
132 {
133 __atomic_clear (lock, __ATOMIC_SEQ_CST);
134 }
135
136 # endif
137
138 # elif (((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) \
139 && !defined __sparc__) \
140 || __clang_major__ >= 3) \
141 && !defined __ibmxl__
142
143
144
145
146
147 void
148 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
149 {
150 volatile unsigned int *vp = lock;
151 *vp = 0;
152 __sync_synchronize ();
153 }
154
155 static inline void
156 do_lock (asyncsafe_spinlock_t *lock)
157 {
158
159 while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
160 ;
161 }
162
163 static inline void
164 do_unlock (asyncsafe_spinlock_t *lock)
165 {
166
167 if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
168 abort ();
169 }
170
171 # elif defined _AIX
172
173
174 void
175 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
176 {
177 atomic_p vp = (int *) lock;
178 _clear_lock (vp, 0);
179 }
180
181 static inline void
182 do_lock (asyncsafe_spinlock_t *lock)
183 {
184 atomic_p vp = (int *) lock;
185 while (_check_lock (vp, 0, 1))
186 ;
187 }
188
189 static inline void
190 do_unlock (asyncsafe_spinlock_t *lock)
191 {
192 atomic_p vp = (int *) lock;
193 if (_check_lock (vp, 1, 0))
194 abort ();
195 }
196
197 # elif ((defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)) || (defined __TINYC__ && (defined __i386 || defined __x86_64__))
198
199
200
201
202
203
204 static void
205 memory_barrier (void)
206 {
207 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
208 # if defined __i386 || defined __x86_64__
209 # if defined __TINYC__ && defined __i386
210
211 asm volatile ("lock orl $0,(%esp)");
212 # else
213 asm volatile ("mfence");
214 # endif
215 # endif
216 # if defined __sparc
217 asm volatile ("membar 2");
218 # endif
219 # else
220 # if defined __i386 || defined __x86_64__
221 asm ("mfence");
222 # endif
223 # if defined __sparc
224 asm ("membar 2");
225 # endif
226 # endif
227 }
228
229
230
231 static unsigned int
232 atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
233 unsigned int newval)
234 {
235 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
236 unsigned int oldval;
237 # if defined __i386 || defined __x86_64__
238 asm volatile (" lock\n cmpxchgl %3,(%1)"
239 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
240 # endif
241 # if defined __sparc
242 asm volatile (" cas [%1],%2,%3\n"
243 " mov %3,%0"
244 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
245 # endif
246 return oldval;
247 # else
248 # if defined __x86_64__
249 asm (" movl %esi,%eax\n"
250 " lock\n cmpxchgl %edx,(%rdi)");
251 # elif defined __i386
252 asm (" movl 16(%ebp),%ecx\n"
253 " movl 12(%ebp),%eax\n"
254 " movl 8(%ebp),%edx\n"
255 " lock\n cmpxchgl %ecx,(%edx)");
256 # endif
257 # if defined __sparc
258 asm (" cas [%i0],%i1,%i2\n"
259 " mov %i2,%i0");
260 # endif
261 # endif
262 }
263
264 void
265 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
266 {
267 volatile unsigned int *vp = lock;
268 *vp = 0;
269 memory_barrier ();
270 }
271
272 static inline void
273 do_lock (asyncsafe_spinlock_t *lock)
274 {
275 volatile unsigned int *vp = lock;
276 while (atomic_compare_and_swap (vp, 0, 1) != 0)
277 ;
278 }
279
280 static inline void
281 do_unlock (asyncsafe_spinlock_t *lock)
282 {
283 volatile unsigned int *vp = lock;
284 if (atomic_compare_and_swap (vp, 1, 0) != 1)
285 abort ();
286 }
287
288 # else
289
290
291 void
292 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
293 {
294 volatile unsigned int *vp = lock;
295 *vp = 0;
296 }
297
298 static inline void
299 do_lock (asyncsafe_spinlock_t *lock)
300 {
301 volatile unsigned int *vp = lock;
302 while (*vp)
303 ;
304 *vp = 1;
305 }
306
307 static inline void
308 do_unlock (asyncsafe_spinlock_t *lock)
309 {
310 volatile unsigned int *vp = lock;
311 *vp = 0;
312 }
313
314 # endif
315
316 # else
317
318
319 void
320 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
321 {
322 }
323
324 static inline void
325 do_lock (asyncsafe_spinlock_t *lock)
326 {
327 }
328
329 static inline void
330 do_unlock (asyncsafe_spinlock_t *lock)
331 {
332 }
333
334 # endif
335
336 void
337 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
338 {
339 }
340
341 #endif
342
343 void
344 asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
345 const sigset_t *mask, sigset_t *saved_mask)
346 {
347 sigprocmask (SIG_BLOCK, mask, saved_mask);
348 do_lock (lock);
349 }
350
351 void
352 asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
353 {
354 do_unlock (lock);
355 sigprocmask (SIG_SETMASK, saved_mask, NULL);
356 }