Unmark gen_random_uuid() function leakproof.
[pgsql.git] / src / include / storage / s_lock.h
blobe94ed5f48bd6511e21cc7f1789fd76540f7c2b7a
1 /*-------------------------------------------------------------------------
3 * s_lock.h
4 * Implementation of spinlocks.
6 * NOTE: none of the macros in this file are intended to be called directly.
7 * Call them through the macros in spin.h.
9 * The following hardware-dependent macros must be provided for each
10 * supported platform:
12 * void S_INIT_LOCK(slock_t *lock)
13 * Initialize a spinlock (to the unlocked state).
15 * int S_LOCK(slock_t *lock)
16 * Acquire a spinlock, waiting if necessary.
17 * Time out and abort() if unable to acquire the lock in a
18 * "reasonable" amount of time --- typically ~ 1 minute.
19 * Should return number of "delays"; see s_lock.c
21 * void S_UNLOCK(slock_t *lock)
22 * Unlock a previously acquired lock.
24 * bool S_LOCK_FREE(slock_t *lock)
25 * Tests if the lock is free. Returns true if free, false if locked.
26 * This does *not* change the state of the lock.
28 * void SPIN_DELAY(void)
29 * Delay operation to occur inside spinlock wait loop.
31 * Note to implementors: there are default implementations for all these
32 * macros at the bottom of the file. Check if your platform can use
33 * these or needs to override them.
35 * Usually, S_LOCK() is implemented in terms of even lower-level macros
36 * TAS() and TAS_SPIN():
38 * int TAS(slock_t *lock)
39 * Atomic test-and-set instruction. Attempt to acquire the lock,
40 * but do *not* wait. Returns 0 if successful, nonzero if unable
41 * to acquire the lock.
43 * int TAS_SPIN(slock_t *lock)
44 * Like TAS(), but this version is used when waiting for a lock
45 * previously found to be contended. By default, this is the
46 * same as TAS(), but on some architectures it's better to poll a
47 * contended lock using an unlocked instruction and retry the
48 * atomic test-and-set only when it appears free.
50 * TAS() and TAS_SPIN() are NOT part of the API, and should never be called
51 * directly.
53 * CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
54 * failure to acquire a lock even when the lock is not locked. For example,
55 * on Alpha TAS() will "fail" if interrupted. Therefore a retry loop must
56 * always be used, even if you are certain the lock is free.
58 * It is the responsibility of these macros to make sure that the compiler
59 * does not re-order accesses to shared memory to precede the actual lock
60 * acquisition, or follow the lock release. Prior to PostgreSQL 9.5, this
61 * was the caller's responsibility, which meant that callers had to use
62 * volatile-qualified pointers to refer to both the spinlock itself and the
63 * shared data being accessed within the spinlocked critical section. This
64 * was notationally awkward, easy to forget (and thus error-prone), and
65 * prevented some useful compiler optimizations. For these reasons, we
66 * now require that the macros themselves prevent compiler re-ordering,
67 * so that the caller doesn't need to take special precautions.
69 * On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
70 * S_UNLOCK() macros must further include hardware-level memory fence
71 * instructions to prevent similar re-ordering at the hardware level.
72 * TAS() and TAS_SPIN() must guarantee that loads and stores issued after
73 * the macro are not executed until the lock has been obtained. Conversely,
74 * S_UNLOCK() must guarantee that loads and stores issued before the macro
75 * have been executed before the lock is released.
77 * On most supported platforms, TAS() uses a tas() function written
78 * in assembly language to execute a hardware atomic-test-and-set
79 * instruction. Equivalent OS-supplied mutex routines could be used too.
82 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
83 * Portions Copyright (c) 1994, Regents of the University of California
85 * src/include/storage/s_lock.h
87 *-------------------------------------------------------------------------
89 #ifndef S_LOCK_H
90 #define S_LOCK_H
92 #ifdef FRONTEND
93 #error "s_lock.h may not be included from frontend code"
94 #endif
96 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
97 /*************************************************************************
98 * All the gcc inlines
99 * Gcc consistently defines the CPU as __cpu__.
100 * Other compilers use __cpu or __cpu__ so we test for both in those cases.
103 /*----------
104 * Standard gcc asm format (assuming "volatile slock_t *lock"):
106 __asm__ __volatile__(
107 " instruction \n"
108 " instruction \n"
109 " instruction \n"
110 : "=r"(_res), "+m"(*lock) // return register, in/out lock value
111 : "r"(lock) // lock pointer, in input register
112 : "memory", "cc"); // show clobbered registers here
114 * The output-operands list (after first colon) should always include
115 * "+m"(*lock), whether or not the asm code actually refers to this
116 * operand directly. This ensures that gcc believes the value in the
117 * lock variable is used and set by the asm code. Also, the clobbers
118 * list (after third colon) should always include "memory"; this prevents
119 * gcc from thinking it can cache the values of shared-memory fields
120 * across the asm code. Add "cc" if your asm code changes the condition
121 * code register, and also list any temp registers the code uses.
122 *----------
126 #ifdef __i386__ /* 32-bit i386 */
127 #define HAS_TEST_AND_SET
129 typedef unsigned char slock_t;
131 #define TAS(lock) tas(lock)
133 static __inline__ int
134 tas(volatile slock_t *lock)
136 slock_t _res = 1;
139 * Use a non-locking test before asserting the bus lock. Note that the
140 * extra test appears to be a small loss on some x86 platforms and a small
141 * win on others; it's by no means clear that we should keep it.
143 * When this was last tested, we didn't have separate TAS() and TAS_SPIN()
144 * macros. Nowadays it probably would be better to do a non-locking test
145 * in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
146 * testing to verify that. Without some empirical evidence, better to
147 * leave it alone.
149 __asm__ __volatile__(
150 " cmpb $0,%1 \n"
151 " jne 1f \n"
152 " lock \n"
153 " xchgb %0,%1 \n"
154 "1: \n"
155 : "+q"(_res), "+m"(*lock)
156 : /* no inputs */
157 : "memory", "cc");
158 return (int) _res;
161 #define SPIN_DELAY() spin_delay()
163 static __inline__ void
164 spin_delay(void)
167 * This sequence is equivalent to the PAUSE instruction ("rep" is
168 * ignored by old IA32 processors if the following instruction is
169 * not a string operation); the IA-32 Architecture Software
170 * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
171 * PAUSE in the inner loop of a spin lock is necessary for good
172 * performance:
174 * The PAUSE instruction improves the performance of IA-32
175 * processors supporting Hyper-Threading Technology when
176 * executing spin-wait loops and other routines where one
177 * thread is accessing a shared lock or semaphore in a tight
178 * polling loop. When executing a spin-wait loop, the
179 * processor can suffer a severe performance penalty when
180 * exiting the loop because it detects a possible memory order
181 * violation and flushes the core processor's pipeline. The
182 * PAUSE instruction provides a hint to the processor that the
183 * code sequence is a spin-wait loop. The processor uses this
184 * hint to avoid the memory order violation and prevent the
185 * pipeline flush. In addition, the PAUSE instruction
186 * de-pipelines the spin-wait loop to prevent it from
187 * consuming execution resources excessively.
189 __asm__ __volatile__(
190 " rep; nop \n");
193 #endif /* __i386__ */
196 #ifdef __x86_64__ /* AMD Opteron, Intel EM64T */
197 #define HAS_TEST_AND_SET
199 typedef unsigned char slock_t;
201 #define TAS(lock) tas(lock)
204 * On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
205 * but only when spinning.
207 * See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
208 * and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
209 * available at:
210 * http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
212 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
214 static __inline__ int
215 tas(volatile slock_t *lock)
217 slock_t _res = 1;
219 __asm__ __volatile__(
220 " lock \n"
221 " xchgb %0,%1 \n"
222 : "+q"(_res), "+m"(*lock)
223 : /* no inputs */
224 : "memory", "cc");
225 return (int) _res;
228 #define SPIN_DELAY() spin_delay()
230 static __inline__ void
231 spin_delay(void)
234 * Adding a PAUSE in the spin delay loop is demonstrably a no-op on
235 * Opteron, but it may be of some use on EM64T, so we keep it.
237 __asm__ __volatile__(
238 " rep; nop \n");
241 #endif /* __x86_64__ */
245 * On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
247 * We use the int-width variant of the builtin because it works on more chips
248 * than other widths.
250 #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
251 #ifdef HAVE_GCC__SYNC_INT32_TAS
252 #define HAS_TEST_AND_SET
254 #define TAS(lock) tas(lock)
256 typedef int slock_t;
258 static __inline__ int
259 tas(volatile slock_t *lock)
261 return __sync_lock_test_and_set(lock, 1);
264 #define S_UNLOCK(lock) __sync_lock_release(lock)
267 * Using an ISB instruction to delay in spinlock loops appears beneficial on
268 * high-core-count ARM64 processors. It seems mostly a wash for smaller gear,
269 * and ISB doesn't exist at all on pre-v7 ARM chips.
271 #if defined(__aarch64__)
273 #define SPIN_DELAY() spin_delay()
275 static __inline__ void
276 spin_delay(void)
278 __asm__ __volatile__(
279 " isb; \n");
282 #endif /* __aarch64__ */
283 #endif /* HAVE_GCC__SYNC_INT32_TAS */
284 #endif /* __arm__ || __arm || __aarch64__ */
287 /* S/390 and S/390x Linux (32- and 64-bit zSeries) */
288 #if defined(__s390__) || defined(__s390x__)
289 #define HAS_TEST_AND_SET
291 typedef unsigned int slock_t;
293 #define TAS(lock) tas(lock)
295 static __inline__ int
296 tas(volatile slock_t *lock)
298 int _res = 0;
300 __asm__ __volatile__(
301 " cs %0,%3,0(%2) \n"
302 : "+d"(_res), "+m"(*lock)
303 : "a"(lock), "d"(1)
304 : "memory", "cc");
305 return _res;
308 #endif /* __s390__ || __s390x__ */
311 #if defined(__sparc__) /* Sparc */
313 * Solaris has always run sparc processors in TSO (total store) mode, but
314 * linux didn't use to and the *BSDs still don't. So, be careful about
315 * acquire/release semantics. The CPU will treat superfluous members as
316 * NOPs, so it's just code space.
318 #define HAS_TEST_AND_SET
320 typedef unsigned char slock_t;
322 #define TAS(lock) tas(lock)
324 static __inline__ int
325 tas(volatile slock_t *lock)
327 slock_t _res;
330 * See comment in src/backend/port/tas/sunstudio_sparc.s for why this
331 * uses "ldstub", and that file uses "cas". gcc currently generates
332 * sparcv7-targeted binaries, so "cas" use isn't possible.
334 __asm__ __volatile__(
335 " ldstub [%2], %0 \n"
336 : "=r"(_res), "+m"(*lock)
337 : "r"(lock)
338 : "memory");
339 #if defined(__sparcv7) || defined(__sparc_v7__)
341 * No stbar or membar available, luckily no actually produced hardware
342 * requires a barrier.
344 #elif defined(__sparcv8) || defined(__sparc_v8__)
345 /* stbar is available (and required for both PSO, RMO), membar isn't */
346 __asm__ __volatile__ ("stbar \n":::"memory");
347 #else
349 * #LoadStore (RMO) | #LoadLoad (RMO) together are the appropriate acquire
350 * barrier for sparcv8+ upwards.
352 __asm__ __volatile__ ("membar #LoadStore | #LoadLoad \n":::"memory");
353 #endif
354 return (int) _res;
357 #if defined(__sparcv7) || defined(__sparc_v7__)
359 * No stbar or membar available, luckily no actually produced hardware
360 * requires a barrier. We fall through to the default gcc definition of
361 * S_UNLOCK in this case.
363 #elif defined(__sparcv8) || defined(__sparc_v8__)
364 /* stbar is available (and required for both PSO, RMO), membar isn't */
365 #define S_UNLOCK(lock) \
366 do \
368 __asm__ __volatile__ ("stbar \n":::"memory"); \
369 *((volatile slock_t *) (lock)) = 0; \
370 } while (0)
371 #else
373 * #LoadStore (RMO) | #StoreStore (RMO, PSO) together are the appropriate
374 * release barrier for sparcv8+ upwards.
376 #define S_UNLOCK(lock) \
377 do \
379 __asm__ __volatile__ ("membar #LoadStore | #StoreStore \n":::"memory"); \
380 *((volatile slock_t *) (lock)) = 0; \
381 } while (0)
382 #endif
384 #endif /* __sparc__ */
387 /* PowerPC */
388 #if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
389 #define HAS_TEST_AND_SET
391 typedef unsigned int slock_t;
393 #define TAS(lock) tas(lock)
395 /* On PPC, it's a win to use a non-locking test before the lwarx */
396 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
399 * The second operand of addi can hold a constant zero or a register number,
400 * hence constraint "=&b" to avoid allocating r0. "b" stands for "address
401 * base register"; most operands having this register-or-zero property are
402 * address bases, e.g. the second operand of lwax.
404 * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
405 * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
406 * But if the spinlock is in ordinary memory, we can use lwsync instead for
407 * better performance.
409 static __inline__ int
410 tas(volatile slock_t *lock)
412 slock_t _t;
413 int _res;
415 __asm__ __volatile__(
416 " lwarx %0,0,%3,1 \n"
417 " cmpwi %0,0 \n"
418 " bne 1f \n"
419 " addi %0,%0,1 \n"
420 " stwcx. %0,0,%3 \n"
421 " beq 2f \n"
422 "1: \n"
423 " li %1,1 \n"
424 " b 3f \n"
425 "2: \n"
426 " lwsync \n"
427 " li %1,0 \n"
428 "3: \n"
429 : "=&b"(_t), "=r"(_res), "+m"(*lock)
430 : "r"(lock)
431 : "memory", "cc");
432 return _res;
436 * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
437 * But we can use lwsync instead for better performance.
439 #define S_UNLOCK(lock) \
440 do \
442 __asm__ __volatile__ (" lwsync \n" ::: "memory"); \
443 *((volatile slock_t *) (lock)) = 0; \
444 } while (0)
446 #endif /* powerpc */
449 #if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
450 #define HAS_TEST_AND_SET
452 typedef unsigned int slock_t;
454 #define TAS(lock) tas(lock)
457 * Original MIPS-I processors lacked the LL/SC instructions, but if we are
458 * so unfortunate as to be running on one of those, we expect that the kernel
459 * will handle the illegal-instruction traps and emulate them for us. On
460 * anything newer (and really, MIPS-I is extinct) LL/SC is the only sane
461 * choice because any other synchronization method must involve a kernel
462 * call. Unfortunately, many toolchains still default to MIPS-I as the
463 * codegen target; if the symbol __mips shows that that's the case, we
464 * have to force the assembler to accept LL/SC.
466 * R10000 and up processors require a separate SYNC, which has the same
467 * issues as LL/SC.
469 #if __mips < 2
470 #define MIPS_SET_MIPS2 " .set mips2 \n"
471 #else
472 #define MIPS_SET_MIPS2
473 #endif
475 static __inline__ int
476 tas(volatile slock_t *lock)
478 volatile slock_t *_l = lock;
479 int _res;
480 int _tmp;
482 __asm__ __volatile__(
483 " .set push \n"
484 MIPS_SET_MIPS2
485 " .set noreorder \n"
486 " .set nomacro \n"
487 " ll %0, %2 \n"
488 " or %1, %0, 1 \n"
489 " sc %1, %2 \n"
490 " xori %1, 1 \n"
491 " or %0, %0, %1 \n"
492 " sync \n"
493 " .set pop "
494 : "=&r" (_res), "=&r" (_tmp), "+R" (*_l)
495 : /* no inputs */
496 : "memory");
497 return _res;
500 /* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
501 #define S_UNLOCK(lock) \
502 do \
504 __asm__ __volatile__( \
505 " .set push \n" \
506 MIPS_SET_MIPS2 \
507 " .set noreorder \n" \
508 " .set nomacro \n" \
509 " sync \n" \
510 " .set pop " \
511 : /* no outputs */ \
512 : /* no inputs */ \
513 : "memory"); \
514 *((volatile slock_t *) (lock)) = 0; \
515 } while (0)
517 #endif /* __mips__ && !__sgi */
522 * If we have no platform-specific knowledge, but we found that the compiler
523 * provides __sync_lock_test_and_set(), use that. Prefer the int-width
524 * version over the char-width version if we have both, on the rather dubious
525 * grounds that that's known to be more likely to work in the ARM ecosystem.
526 * (But we dealt with ARM above.)
528 #if !defined(HAS_TEST_AND_SET)
530 #if defined(HAVE_GCC__SYNC_INT32_TAS)
531 #define HAS_TEST_AND_SET
533 #define TAS(lock) tas(lock)
535 typedef int slock_t;
537 static __inline__ int
538 tas(volatile slock_t *lock)
540 return __sync_lock_test_and_set(lock, 1);
543 #define S_UNLOCK(lock) __sync_lock_release(lock)
545 #elif defined(HAVE_GCC__SYNC_CHAR_TAS)
546 #define HAS_TEST_AND_SET
548 #define TAS(lock) tas(lock)
550 typedef char slock_t;
552 static __inline__ int
553 tas(volatile slock_t *lock)
555 return __sync_lock_test_and_set(lock, 1);
558 #define S_UNLOCK(lock) __sync_lock_release(lock)
560 #endif /* HAVE_GCC__SYNC_INT32_TAS */
562 #endif /* !defined(HAS_TEST_AND_SET) */
566 * Default implementation of S_UNLOCK() for gcc/icc.
568 * Note that this implementation is unsafe for any platform that can reorder
569 * a memory access (either load or store) after a following store. That
570 * happens not to be possible on x86 and most legacy architectures (some are
571 * single-processor!), but many modern systems have weaker memory ordering.
572 * Those that do must define their own version of S_UNLOCK() rather than
573 * relying on this one.
575 #if !defined(S_UNLOCK)
576 #define S_UNLOCK(lock) \
577 do { __asm__ __volatile__("" : : : "memory"); *(lock) = 0; } while (0)
578 #endif
580 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
584 * ---------------------------------------------------------------------
585 * Platforms that use non-gcc inline assembly:
586 * ---------------------------------------------------------------------
589 #if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
591 /* These are in sunstudio_(sparc|x86).s */
593 #if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
594 #define HAS_TEST_AND_SET
596 #if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
597 typedef unsigned int slock_t;
598 #else
599 typedef unsigned char slock_t;
600 #endif
602 extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
603 slock_t cmp);
605 #define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
606 #endif
609 #ifdef _MSC_VER
610 typedef LONG slock_t;
612 #define HAS_TEST_AND_SET
613 #define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
615 #define SPIN_DELAY() spin_delay()
617 /* If using Visual C++ on Win64, inline assembly is unavailable.
618 * Use a _mm_pause intrinsic instead of rep nop.
620 #if defined(_WIN64)
621 static __forceinline void
622 spin_delay(void)
624 _mm_pause();
626 #else
627 static __forceinline void
628 spin_delay(void)
630 /* See comment for gcc code. Same code, MASM syntax */
631 __asm rep nop;
633 #endif
635 #include <intrin.h>
636 #pragma intrinsic(_ReadWriteBarrier)
638 #define S_UNLOCK(lock) \
639 do { _ReadWriteBarrier(); (*(lock)) = 0; } while (0)
641 #endif
644 #endif /* !defined(HAS_TEST_AND_SET) */
647 /* Blow up if we didn't have any way to do spinlocks */
648 #ifndef HAS_TEST_AND_SET
649 #error PostgreSQL does not have spinlock support on this platform. Please report this to pgsql-bugs@lists.postgresql.org.
650 #endif
654 * Default Definitions - override these above as needed.
657 #if !defined(S_LOCK)
658 #define S_LOCK(lock) \
659 (TAS(lock) ? s_lock((lock), __FILE__, __LINE__, __func__) : 0)
660 #endif /* S_LOCK */
662 #if !defined(S_LOCK_FREE)
663 #define S_LOCK_FREE(lock) (*(lock) == 0)
664 #endif /* S_LOCK_FREE */
666 #if !defined(S_UNLOCK)
668 * Our default implementation of S_UNLOCK is essentially *(lock) = 0. This
669 * is unsafe if the platform can reorder a memory access (either load or
670 * store) after a following store; platforms where this is possible must
671 * define their own S_UNLOCK. But CPU reordering is not the only concern:
672 * if we simply defined S_UNLOCK() as an inline macro, the compiler might
673 * reorder instructions from inside the critical section to occur after the
674 * lock release. Since the compiler probably can't know what the external
675 * function s_unlock is doing, putting the same logic there should be adequate.
676 * A sufficiently-smart globally optimizing compiler could break that
677 * assumption, though, and the cost of a function call for every spinlock
678 * release may hurt performance significantly, so we use this implementation
679 * only for platforms where we don't know of a suitable intrinsic. For the
680 * most part, those are relatively obscure platform/compiler combinations to
681 * which the PostgreSQL project does not have access.
683 #define USE_DEFAULT_S_UNLOCK
684 extern void s_unlock(volatile slock_t *lock);
685 #define S_UNLOCK(lock) s_unlock(lock)
686 #endif /* S_UNLOCK */
688 #if !defined(S_INIT_LOCK)
689 #define S_INIT_LOCK(lock) S_UNLOCK(lock)
690 #endif /* S_INIT_LOCK */
692 #if !defined(SPIN_DELAY)
693 #define SPIN_DELAY() ((void) 0)
694 #endif /* SPIN_DELAY */
696 #if !defined(TAS)
697 extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or
698 * s_lock.c */
700 #define TAS(lock) tas(lock)
701 #endif /* TAS */
703 #if !defined(TAS_SPIN)
704 #define TAS_SPIN(lock) TAS(lock)
705 #endif /* TAS_SPIN */
709 * Platform-independent out-of-line support routines
711 extern int s_lock(volatile slock_t *lock, const char *file, int line, const char *func);
713 /* Support for dynamic adjustment of spins_per_delay */
714 #define DEFAULT_SPINS_PER_DELAY 100
716 extern void set_spins_per_delay(int shared_spins_per_delay);
717 extern int update_spins_per_delay(int shared_spins_per_delay);
720 * Support for spin delay which is useful in various places where
721 * spinlock-like procedures take place.
723 typedef struct
725 int spins;
726 int delays;
727 int cur_delay;
728 const char *file;
729 int line;
730 const char *func;
731 } SpinDelayStatus;
733 static inline void
734 init_spin_delay(SpinDelayStatus *status,
735 const char *file, int line, const char *func)
737 status->spins = 0;
738 status->delays = 0;
739 status->cur_delay = 0;
740 status->file = file;
741 status->line = line;
742 status->func = func;
745 #define init_local_spin_delay(status) init_spin_delay(status, __FILE__, __LINE__, __func__)
746 extern void perform_spin_delay(SpinDelayStatus *status);
747 extern void finish_spin_delay(SpinDelayStatus *status);
749 #endif /* S_LOCK_H */