root/maint/gnulib/lib/asyncsafe-spin.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. asyncsafe_spin_init
  2. do_lock
  3. do_unlock
  4. asyncsafe_spin_destroy
  5. asyncsafe_spin_init
  6. do_lock
  7. do_unlock
  8. asyncsafe_spin_init
  9. do_lock
  10. do_unlock
  11. asyncsafe_spin_init
  12. do_lock
  13. do_unlock
  14. asyncsafe_spin_init
  15. do_lock
  16. do_unlock
  17. memory_barrier
  18. atomic_compare_and_swap
  19. asyncsafe_spin_init
  20. do_lock
  21. do_unlock
  22. asyncsafe_spin_init
  23. do_lock
  24. do_unlock
  25. asyncsafe_spin_init
  26. do_lock
  27. do_unlock
  28. asyncsafe_spin_destroy
  29. asyncsafe_spin_lock
  30. asyncsafe_spin_unlock

   1 /* Spin locks for communication between threads and signal handlers.
   2    Copyright (C) 2020-2021 Free Software Foundation, Inc.
   3 
   4    This file is free software: you can redistribute it and/or modify
   5    it under the terms of the GNU Lesser General Public License as
   6    published by the Free Software Foundation; either version 2.1 of the
   7    License, or (at your option) any later version.
   8 
   9    This file is distributed in the hope that it will be useful,
  10    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12    GNU Lesser General Public License for more details.
  13 
  14    You should have received a copy of the GNU Lesser General Public License
  15    along with this program.  If not, see <https://www.gnu.org/licenses/>.  */
  16 
  17 /* Written by Bruno Haible <bruno@clisp.org>, 2020.  */
  18 
  19 #include <config.h>
  20 
  21 /* Specification.  */
  22 #include "asyncsafe-spin.h"
  23 
  24 #include <stdbool.h>
  25 #include <stdlib.h>
  26 #if defined _AIX
  27 # include <sys/atomic_op.h>
  28 #endif
  29 
  30 #if 0x590 <= __SUNPRO_C && __STDC__
  31 # define asm __asm
  32 #endif
  33 
  34 #if defined _WIN32 && ! defined __CYGWIN__
  35 /* Use Windows threads.  */
  36 
  37 void
  38 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
  39 {
  40   glwthread_spin_init (lock);
  41 }
  42 
  43 static inline void
  44 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
  45 {
  46   glwthread_spin_lock (lock);
  47 }
  48 
  49 static inline void
  50 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
  51 {
  52   if (glwthread_spin_unlock (lock))
  53     abort ();
  54 }
  55 
  56 void
  57 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
  58 {
  59   glwthread_spin_destroy (lock);
  60 }
  61 
  62 #else
  63 
  64 # if HAVE_PTHREAD_H
  65 /* Use POSIX threads.  */
  66 
  67 /* We don't use semaphores (although sem_post() is allowed in signal handlers),
  68    because it would require to link with -lrt on HP-UX 11, OSF/1, Solaris 10,
  69    and also because on macOS only named semaphores work.
  70 
  71    We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
  72    require to link with -latomic.  */
  73 
  74 #  if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) \
  75        || __clang_major > 3 || (__clang_major__ == 3 && __clang_minor__ >= 1)) \
  76       && !defined __ibmxl__
  77 /* Use GCC built-ins (available in GCC >= 4.7 and clang >= 3.1) that operate on
  78    the first byte of the lock.
  79    Documentation:
  80    <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
  81  */
  82 
  83 #   if 1
  84 /* An implementation that verifies the unlocks.  */
  85 
  86 void
  87 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89   __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
  90 }
  91 
  92 static inline void
  93 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95   /* Wait until *lock becomes 0, then replace it with 1.  */
  96   asyncsafe_spinlock_t zero;
  97   while (!(zero = 0,
  98            __atomic_compare_exchange_n (lock, &zero, 1, false,
  99                                         __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
 100     ;
 101 }
 102 
 103 static inline void
 104 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 105 {
 106   /* If *lock is 1, then replace it with 0.  */
 107   asyncsafe_spinlock_t one = 1;
 108   if (!__atomic_compare_exchange_n (lock, &one, 0, false,
 109                                     __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
 110     abort ();
 111 }
 112 
 113 #   else
 114 /* An implementation that is a little bit more optimized, but does not verify
 115    the unlocks.  */
 116 
 117 void
 118 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 119 {
 120   __atomic_clear (lock, __ATOMIC_SEQ_CST);
 121 }
 122 
 123 static inline void
 124 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126   while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
 127     ;
 128 }
 129 
 130 static inline void
 131 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133   __atomic_clear (lock, __ATOMIC_SEQ_CST);
 134 }
 135 
 136 #   endif
 137 
 138 #  elif (((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) \
 139           && !defined __sparc__) \
 140          || __clang_major__ >= 3) \
 141         && !defined __ibmxl__
 142 /* Use GCC built-ins (available in GCC >= 4.1, except on SPARC, and
 143    clang >= 3.0).
 144    Documentation:
 145    <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html>  */
 146 
 147 void
 148 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150   volatile unsigned int *vp = lock;
 151   *vp = 0;
 152   __sync_synchronize ();
 153 }
 154 
 155 static inline void
 156 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 157 {
 158   /* Wait until *lock becomes 0, then replace it with 1.  */
 159   while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
 160     ;
 161 }
 162 
 163 static inline void
 164 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166   /* If *lock is 1, then replace it with 0.  */
 167   if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
 168     abort ();
 169 }
 170 
 171 #  elif defined _AIX
 172 /* AIX */
 173 
 174 void
 175 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177   atomic_p vp = (int *) lock;
 178   _clear_lock (vp, 0);
 179 }
 180 
 181 static inline void
 182 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 183 {
 184   atomic_p vp = (int *) lock;
 185   while (_check_lock (vp, 0, 1))
 186     ;
 187 }
 188 
 189 static inline void
 190 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 191 {
 192   atomic_p vp = (int *) lock;
 193   if (_check_lock (vp, 1, 0))
 194     abort ();
 195 }
 196 
 197 #  elif ((defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)) || (defined __TINYC__ && (defined __i386 || defined __x86_64__))
 198 /* For older versions of GCC or clang, use inline assembly.
 199    GCC, clang, and the Oracle Studio C 12 compiler understand GCC's extended
 200    asm syntax, but the plain Oracle Studio C 11 compiler understands only
 201    simple asm.  */
 202 /* An implementation that verifies the unlocks.  */
 203 
 204 static void
 205 memory_barrier (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 206 {
 207 #   if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
 208 #    if defined __i386 || defined __x86_64__
 209 #     if defined __TINYC__ && defined __i386
 210   /* Cannot use the SSE instruction "mfence" with this compiler.  */
 211   asm volatile ("lock orl $0,(%esp)");
 212 #     else
 213   asm volatile ("mfence");
 214 #     endif
 215 #    endif
 216 #    if defined __sparc
 217   asm volatile ("membar 2");
 218 #    endif
 219 #   else
 220 #    if defined __i386 || defined __x86_64__
 221   asm ("mfence");
 222 #    endif
 223 #    if defined __sparc
 224   asm ("membar 2");
 225 #    endif
 226 #   endif
 227 }
 228 
 229 /* Store NEWVAL in *VP if the old value *VP is == CMP.
 230    Return the old value.  */
 231 static unsigned int
 232 atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
     /* [previous][next][first][last][top][bottom][index][help] */
 233                          unsigned int newval)
 234 {
 235 #   if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
 236   unsigned int oldval;
 237 #    if defined __i386 || defined __x86_64__
 238   asm volatile (" lock\n cmpxchgl %3,(%1)"
 239                 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
 240 #    endif
 241 #    if defined __sparc
 242   asm volatile (" cas [%1],%2,%3\n"
 243                 " mov %3,%0"
 244                 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
 245 #    endif
 246   return oldval;
 247 #   else /* __SUNPRO_C */
 248 #    if defined __x86_64__
 249   asm (" movl %esi,%eax\n"
 250        " lock\n cmpxchgl %edx,(%rdi)");
 251 #    elif defined __i386
 252   asm (" movl 16(%ebp),%ecx\n"
 253        " movl 12(%ebp),%eax\n"
 254        " movl 8(%ebp),%edx\n"
 255        " lock\n cmpxchgl %ecx,(%edx)");
 256 #    endif
 257 #    if defined __sparc
 258   asm (" cas [%i0],%i1,%i2\n"
 259        " mov %i2,%i0");
 260 #    endif
 261 #   endif
 262 }
 263 
 264 void
 265 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267   volatile unsigned int *vp = lock;
 268   *vp = 0;
 269   memory_barrier ();
 270 }
 271 
 272 static inline void
 273 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 274 {
 275   volatile unsigned int *vp = lock;
 276   while (atomic_compare_and_swap (vp, 0, 1) != 0)
 277     ;
 278 }
 279 
 280 static inline void
 281 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283   volatile unsigned int *vp = lock;
 284   if (atomic_compare_and_swap (vp, 1, 0) != 1)
 285     abort ();
 286 }
 287 
 288 #  else
 289 /* Fallback code.  It has some race conditions.  */
 290 
 291 void
 292 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 293 {
 294   volatile unsigned int *vp = lock;
 295   *vp = 0;
 296 }
 297 
 298 static inline void
 299 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301   volatile unsigned int *vp = lock;
 302   while (*vp)
 303     ;
 304   *vp = 1;
 305 }
 306 
 307 static inline void
 308 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 309 {
 310   volatile unsigned int *vp = lock;
 311   *vp = 0;
 312 }
 313 
 314 #  endif
 315 
 316 # else
 317 /* Provide a dummy implementation for single-threaded applications.  */
 318 
 319 void
 320 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 321 {
 322 }
 323 
 324 static inline void
 325 do_lock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 326 {
 327 }
 328 
 329 static inline void
 330 do_unlock (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 331 {
 332 }
 333 
 334 # endif
 335 
 336 void
 337 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 338 {
 339 }
 340 
 341 #endif
 342 
 343 void
 344 asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
     /* [previous][next][first][last][top][bottom][index][help] */
 345                      const sigset_t *mask, sigset_t *saved_mask)
 346 {
 347   sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
 348   do_lock (lock);
 349 }
 350 
 351 void
 352 asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 353 {
 354   do_unlock (lock);
 355   sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
 356 }

/* [previous][next][first][last][top][bottom][index][help] */