mdio_bus: Fix use-after-free on device_register fails
[linux/fpc-iii.git] / include / asm-generic / qspinlock_types.h
blob6503e96710fa50629c01d08b77375801cff70e1c
1 /*
2 * Queued spinlock
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
16 * Authors: Waiman Long <waiman.long@hp.com>
18 #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
19 #define __ASM_GENERIC_QSPINLOCK_TYPES_H
21 #include <asm/byteorder.h>
24 * Including atomic.h with PARAVIRT on will cause compilation errors because
25 * of recursive header file incluson via paravirt_types.h. So don't include
26 * it if PARAVIRT is on.
28 #ifndef CONFIG_PARAVIRT
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #endif
33 typedef struct qspinlock {
34 union {
35 atomic_t val;
38 * By using the whole 2nd least significant byte for the
39 * pending bit, we can allow better optimization of the lock
40 * acquisition for the pending bit holder.
42 #ifdef __LITTLE_ENDIAN
43 struct {
44 u8 locked;
45 u8 pending;
47 struct {
48 u16 locked_pending;
49 u16 tail;
51 #else
52 struct {
53 u16 tail;
54 u16 locked_pending;
56 struct {
57 u8 reserved[2];
58 u8 pending;
59 u8 locked;
61 #endif
63 } arch_spinlock_t;
66 * Initializier
68 #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
71 * Bitfields in the atomic value:
73 * When NR_CPUS < 16K
74 * 0- 7: locked byte
75 * 8: pending
76 * 9-15: not used
77 * 16-17: tail index
78 * 18-31: tail cpu (+1)
80 * When NR_CPUS >= 16K
81 * 0- 7: locked byte
82 * 8: pending
83 * 9-10: tail index
84 * 11-31: tail cpu (+1)
86 #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
87 << _Q_ ## type ## _OFFSET)
88 #define _Q_LOCKED_OFFSET 0
89 #define _Q_LOCKED_BITS 8
90 #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
92 #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
93 #if CONFIG_NR_CPUS < (1U << 14)
94 #define _Q_PENDING_BITS 8
95 #else
96 #define _Q_PENDING_BITS 1
97 #endif
98 #define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
100 #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
101 #define _Q_TAIL_IDX_BITS 2
102 #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
104 #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
105 #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
106 #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
108 #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
109 #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
111 #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
112 #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
114 #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */