userns: Make map_id_down a wrapper for map_id_range_down
[cris-mirror.git] / kernel / locking / osq_lock.c
bloba74ee6abd039d0291f136b1b2dd1e5affeba2d06
1 #include <linux/percpu.h>
2 #include <linux/sched.h>
3 #include <linux/osq_lock.h>
5 /*
6 * An MCS like lock especially tailored for optimistic spinning for sleeping
7 * lock implementations (mutex, rwsem, etc).
9 * Using a single mcs node per CPU is safe because sleeping locks should not be
10 * called from interrupt context and we have preemption disabled while
11 * spinning.
13 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
16 * We use the value 0 to represent "no CPU", thus the encoded value
17 * will be the CPU number incremented by 1.
19 static inline int encode_cpu(int cpu_nr)
21 return cpu_nr + 1;
24 static inline int node_cpu(struct optimistic_spin_node *node)
26 return node->cpu - 1;
29 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
31 int cpu_nr = encoded_cpu_val - 1;
33 return per_cpu_ptr(&osq_node, cpu_nr);
37 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
38 * Can return NULL in case we were the last queued and we updated @lock instead.
40 static inline struct optimistic_spin_node *
41 osq_wait_next(struct optimistic_spin_queue *lock,
42 struct optimistic_spin_node *node,
43 struct optimistic_spin_node *prev)
45 struct optimistic_spin_node *next = NULL;
46 int curr = encode_cpu(smp_processor_id());
47 int old;
50 * If there is a prev node in queue, then the 'old' value will be
51 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
52 * we're currently last in queue, then the queue will then become empty.
54 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
56 for (;;) {
57 if (atomic_read(&lock->tail) == curr &&
58 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
60 * We were the last queued, we moved @lock back. @prev
61 * will now observe @lock and will complete its
62 * unlock()/unqueue().
64 break;
68 * We must xchg() the @node->next value, because if we were to
69 * leave it in, a concurrent unlock()/unqueue() from
70 * @node->next might complete Step-A and think its @prev is
71 * still valid.
73 * If the concurrent unlock()/unqueue() wins the race, we'll
74 * wait for either @lock to point to us, through its Step-B, or
75 * wait for a new @node->next from its Step-C.
77 if (node->next) {
78 next = xchg(&node->next, NULL);
79 if (next)
80 break;
83 cpu_relax();
86 return next;
89 bool osq_lock(struct optimistic_spin_queue *lock)
91 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
92 struct optimistic_spin_node *prev, *next;
93 int curr = encode_cpu(smp_processor_id());
94 int old;
96 node->locked = 0;
97 node->next = NULL;
98 node->cpu = curr;
101 * We need both ACQUIRE (pairs with corresponding RELEASE in
102 * unlock() uncontended, or fastpath) and RELEASE (to publish
103 * the node fields we just initialised) semantics when updating
104 * the lock tail.
106 old = atomic_xchg(&lock->tail, curr);
107 if (old == OSQ_UNLOCKED_VAL)
108 return true;
110 prev = decode_cpu(old);
111 node->prev = prev;
114 * osq_lock() unqueue
116 * node->prev = prev osq_wait_next()
117 * WMB MB
118 * prev->next = node next->prev = prev // unqueue-C
120 * Here 'node->prev' and 'next->prev' are the same variable and we need
121 * to ensure these stores happen in-order to avoid corrupting the list.
123 smp_wmb();
125 WRITE_ONCE(prev->next, node);
128 * Normally @prev is untouchable after the above store; because at that
129 * moment unlock can proceed and wipe the node element from stack.
131 * However, since our nodes are static per-cpu storage, we're
132 * guaranteed their existence -- this allows us to apply
133 * cmpxchg in an attempt to undo our queueing.
136 while (!READ_ONCE(node->locked)) {
138 * If we need to reschedule bail... so we can block.
139 * Use vcpu_is_preempted() to avoid waiting for a preempted
140 * lock holder:
142 if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
143 goto unqueue;
145 cpu_relax();
147 return true;
149 unqueue:
151 * Step - A -- stabilize @prev
153 * Undo our @prev->next assignment; this will make @prev's
154 * unlock()/unqueue() wait for a next pointer since @lock points to us
155 * (or later).
158 for (;;) {
159 if (prev->next == node &&
160 cmpxchg(&prev->next, node, NULL) == node)
161 break;
164 * We can only fail the cmpxchg() racing against an unlock(),
165 * in which case we should observe @node->locked becomming
166 * true.
168 if (smp_load_acquire(&node->locked))
169 return true;
171 cpu_relax();
174 * Or we race against a concurrent unqueue()'s step-B, in which
175 * case its step-C will write us a new @node->prev pointer.
177 prev = READ_ONCE(node->prev);
181 * Step - B -- stabilize @next
183 * Similar to unlock(), wait for @node->next or move @lock from @node
184 * back to @prev.
187 next = osq_wait_next(lock, node, prev);
188 if (!next)
189 return false;
192 * Step - C -- unlink
194 * @prev is stable because its still waiting for a new @prev->next
195 * pointer, @next is stable because our @node->next pointer is NULL and
196 * it will wait in Step-A.
199 WRITE_ONCE(next->prev, prev);
200 WRITE_ONCE(prev->next, next);
202 return false;
205 void osq_unlock(struct optimistic_spin_queue *lock)
207 struct optimistic_spin_node *node, *next;
208 int curr = encode_cpu(smp_processor_id());
211 * Fast path for the uncontended case.
213 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
214 OSQ_UNLOCKED_VAL) == curr))
215 return;
218 * Second most likely case.
220 node = this_cpu_ptr(&osq_node);
221 next = xchg(&node->next, NULL);
222 if (next) {
223 WRITE_ONCE(next->locked, 1);
224 return;
227 next = osq_wait_next(lock, node, NULL);
228 if (next)
229 WRITE_ONCE(next->locked, 1);