arm64: dts: allwinner: pinebook: Remove unused AXP803 regulators
[linux/fpc-iii.git] / include / asm-generic / qspinlock_types.h
blob56d1309d32f898f3cd9f8bbb4c755ca08a649916
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Queued spinlock
5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9 #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
10 #define __ASM_GENERIC_QSPINLOCK_TYPES_H
13 * Including atomic.h with PARAVIRT on will cause compilation errors because
14 * of recursive header file incluson via paravirt_types.h. So don't include
15 * it if PARAVIRT is on.
17 #ifndef CONFIG_PARAVIRT
18 #include <linux/types.h>
19 #include <linux/atomic.h>
20 #endif
22 typedef struct qspinlock {
23 union {
24 atomic_t val;
27 * By using the whole 2nd least significant byte for the
28 * pending bit, we can allow better optimization of the lock
29 * acquisition for the pending bit holder.
31 #ifdef __LITTLE_ENDIAN
32 struct {
33 u8 locked;
34 u8 pending;
36 struct {
37 u16 locked_pending;
38 u16 tail;
40 #else
41 struct {
42 u16 tail;
43 u16 locked_pending;
45 struct {
46 u8 reserved[2];
47 u8 pending;
48 u8 locked;
50 #endif
52 } arch_spinlock_t;
55 * Initializier
57 #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
60 * Bitfields in the atomic value:
62 * When NR_CPUS < 16K
63 * 0- 7: locked byte
64 * 8: pending
65 * 9-15: not used
66 * 16-17: tail index
67 * 18-31: tail cpu (+1)
69 * When NR_CPUS >= 16K
70 * 0- 7: locked byte
71 * 8: pending
72 * 9-10: tail index
73 * 11-31: tail cpu (+1)
75 #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
76 << _Q_ ## type ## _OFFSET)
77 #define _Q_LOCKED_OFFSET 0
78 #define _Q_LOCKED_BITS 8
79 #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
81 #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
82 #if CONFIG_NR_CPUS < (1U << 14)
83 #define _Q_PENDING_BITS 8
84 #else
85 #define _Q_PENDING_BITS 1
86 #endif
87 #define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
89 #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
90 #define _Q_TAIL_IDX_BITS 2
91 #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
93 #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
94 #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
95 #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
97 #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
98 #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
100 #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
101 #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
103 #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */