1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Hardware spinlock public header
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
10 #ifndef __LINUX_HWSPINLOCK_H
11 #define __LINUX_HWSPINLOCK_H
13 #include <linux/err.h>
14 #include <linux/sched.h>
16 /* hwspinlock mode argument */
17 #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
18 #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
19 #define HWLOCK_RAW 0x03
20 #define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
25 struct hwspinlock_device
;
26 struct hwspinlock_ops
;
29 * struct hwspinlock_pdata - platform data for hwspinlock drivers
30 * @base_id: base id for this hwspinlock device
32 * hwspinlock devices provide system-wide hardware locks that are used
33 * by remote processors that have no other way to achieve synchronization.
35 * To achieve that, each physical lock must have a system-wide id number
36 * that is agreed upon, otherwise remote processors can't possibly assume
37 * they're using the same hardware lock.
39 * Usually boards have a single hwspinlock device, which provides several
40 * hwspinlocks, and in this case, they can be trivially numbered 0 to
43 * In case boards have several hwspinlocks devices, a different base id
44 * should be used for each hwspinlock device (they can't all use 0 as
47 * This platform data structure should be used to provide the base id
48 * for each device (which is trivially 0 when only a single hwspinlock
49 * device exists). It can be shared between different platforms, hence
52 struct hwspinlock_pdata
{
56 #ifdef CONFIG_HWSPINLOCK
58 int hwspin_lock_register(struct hwspinlock_device
*bank
, struct device
*dev
,
59 const struct hwspinlock_ops
*ops
, int base_id
, int num_locks
);
60 int hwspin_lock_unregister(struct hwspinlock_device
*bank
);
61 struct hwspinlock
*hwspin_lock_request(void);
62 struct hwspinlock
*hwspin_lock_request_specific(unsigned int id
);
63 int hwspin_lock_free(struct hwspinlock
*hwlock
);
64 int of_hwspin_lock_get_id(struct device_node
*np
, int index
);
65 int hwspin_lock_get_id(struct hwspinlock
*hwlock
);
66 int __hwspin_lock_timeout(struct hwspinlock
*, unsigned int, int,
68 int __hwspin_trylock(struct hwspinlock
*, int, unsigned long *);
69 void __hwspin_unlock(struct hwspinlock
*, int, unsigned long *);
70 int of_hwspin_lock_get_id_byname(struct device_node
*np
, const char *name
);
71 int hwspin_lock_bust(struct hwspinlock
*hwlock
, unsigned int id
);
72 int devm_hwspin_lock_free(struct device
*dev
, struct hwspinlock
*hwlock
);
73 struct hwspinlock
*devm_hwspin_lock_request(struct device
*dev
);
74 struct hwspinlock
*devm_hwspin_lock_request_specific(struct device
*dev
,
76 int devm_hwspin_lock_unregister(struct device
*dev
,
77 struct hwspinlock_device
*bank
);
78 int devm_hwspin_lock_register(struct device
*dev
,
79 struct hwspinlock_device
*bank
,
80 const struct hwspinlock_ops
*ops
,
81 int base_id
, int num_locks
);
83 #else /* !CONFIG_HWSPINLOCK */
86 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
87 * enabled. We prefer to silently succeed in this case, and let the
88 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
89 * required on a given setup, users will still work.
91 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
92 * we _do_ want users to fail (no point in registering hwspinlock instances if
93 * the framework is not available).
95 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
96 * users. Others, which care, can still check this with IS_ERR.
98 static inline struct hwspinlock
*hwspin_lock_request(void)
100 return ERR_PTR(-ENODEV
);
103 static inline struct hwspinlock
*hwspin_lock_request_specific(unsigned int id
)
105 return ERR_PTR(-ENODEV
);
108 static inline int hwspin_lock_free(struct hwspinlock
*hwlock
)
114 int __hwspin_lock_timeout(struct hwspinlock
*hwlock
, unsigned int to
,
115 int mode
, unsigned long *flags
)
121 int __hwspin_trylock(struct hwspinlock
*hwlock
, int mode
, unsigned long *flags
)
127 void __hwspin_unlock(struct hwspinlock
*hwlock
, int mode
, unsigned long *flags
)
131 static inline int hwspin_lock_bust(struct hwspinlock
*hwlock
, unsigned int id
)
136 static inline int of_hwspin_lock_get_id(struct device_node
*np
, int index
)
141 static inline int hwspin_lock_get_id(struct hwspinlock
*hwlock
)
147 int of_hwspin_lock_get_id_byname(struct device_node
*np
, const char *name
)
153 int devm_hwspin_lock_free(struct device
*dev
, struct hwspinlock
*hwlock
)
158 static inline struct hwspinlock
*devm_hwspin_lock_request(struct device
*dev
)
160 return ERR_PTR(-ENODEV
);
164 struct hwspinlock
*devm_hwspin_lock_request_specific(struct device
*dev
,
167 return ERR_PTR(-ENODEV
);
170 #endif /* !CONFIG_HWSPINLOCK */
173 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
174 * @hwlock: an hwspinlock which we want to trylock
175 * @flags: a pointer to where the caller's interrupt state will be saved at
177 * This function attempts to lock the underlying hwspinlock, and will
178 * immediately fail if the hwspinlock is already locked.
180 * Upon a successful return from this function, preemption and local
181 * interrupts are disabled (previous interrupts state is saved at @flags),
182 * so the caller must not sleep, and is advised to release the hwspinlock
183 * as soon as possible.
185 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
186 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
189 int hwspin_trylock_irqsave(struct hwspinlock
*hwlock
, unsigned long *flags
)
191 return __hwspin_trylock(hwlock
, HWLOCK_IRQSTATE
, flags
);
195 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
196 * @hwlock: an hwspinlock which we want to trylock
198 * This function attempts to lock the underlying hwspinlock, and will
199 * immediately fail if the hwspinlock is already locked.
201 * Upon a successful return from this function, preemption and local
202 * interrupts are disabled, so the caller must not sleep, and is advised
203 * to release the hwspinlock as soon as possible.
205 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
206 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
208 static inline int hwspin_trylock_irq(struct hwspinlock
*hwlock
)
210 return __hwspin_trylock(hwlock
, HWLOCK_IRQ
, NULL
);
214 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
215 * @hwlock: an hwspinlock which we want to trylock
217 * This function attempts to lock an hwspinlock, and will immediately fail
218 * if the hwspinlock is already taken.
220 * Caution: User must protect the routine of getting hardware lock with mutex
221 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
222 * or sleepable operations under the hardware lock.
224 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
225 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
227 static inline int hwspin_trylock_raw(struct hwspinlock
*hwlock
)
229 return __hwspin_trylock(hwlock
, HWLOCK_RAW
, NULL
);
233 * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
234 * @hwlock: an hwspinlock which we want to trylock
236 * This function attempts to lock an hwspinlock, and will immediately fail
237 * if the hwspinlock is already taken.
239 * This function shall be called only from an atomic context.
241 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
242 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
244 static inline int hwspin_trylock_in_atomic(struct hwspinlock
*hwlock
)
246 return __hwspin_trylock(hwlock
, HWLOCK_IN_ATOMIC
, NULL
);
250 * hwspin_trylock() - attempt to lock a specific hwspinlock
251 * @hwlock: an hwspinlock which we want to trylock
253 * This function attempts to lock an hwspinlock, and will immediately fail
254 * if the hwspinlock is already taken.
256 * Upon a successful return from this function, preemption is disabled,
257 * so the caller must not sleep, and is advised to release the hwspinlock
258 * as soon as possible. This is required in order to minimize remote cores
259 * polling on the hardware interconnect.
261 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
262 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
264 static inline int hwspin_trylock(struct hwspinlock
*hwlock
)
266 return __hwspin_trylock(hwlock
, 0, NULL
);
270 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
271 * @hwlock: the hwspinlock to be locked
272 * @to: timeout value in msecs
273 * @flags: a pointer to where the caller's interrupt state will be saved at
275 * This function locks the underlying @hwlock. If the @hwlock
276 * is already taken, the function will busy loop waiting for it to
277 * be released, but give up when @timeout msecs have elapsed.
279 * Upon a successful return from this function, preemption and local interrupts
280 * are disabled (plus previous interrupt state is saved), so the caller must
281 * not sleep, and is advised to release the hwspinlock as soon as possible.
283 * Returns 0 when the @hwlock was successfully taken, and an appropriate
284 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
285 * busy after @timeout msecs). The function will never sleep.
287 static inline int hwspin_lock_timeout_irqsave(struct hwspinlock
*hwlock
,
288 unsigned int to
, unsigned long *flags
)
290 return __hwspin_lock_timeout(hwlock
, to
, HWLOCK_IRQSTATE
, flags
);
294 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
295 * @hwlock: the hwspinlock to be locked
296 * @to: timeout value in msecs
298 * This function locks the underlying @hwlock. If the @hwlock
299 * is already taken, the function will busy loop waiting for it to
300 * be released, but give up when @timeout msecs have elapsed.
302 * Upon a successful return from this function, preemption and local interrupts
303 * are disabled so the caller must not sleep, and is advised to release the
304 * hwspinlock as soon as possible.
306 * Returns 0 when the @hwlock was successfully taken, and an appropriate
307 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
308 * busy after @timeout msecs). The function will never sleep.
311 int hwspin_lock_timeout_irq(struct hwspinlock
*hwlock
, unsigned int to
)
313 return __hwspin_lock_timeout(hwlock
, to
, HWLOCK_IRQ
, NULL
);
317 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
318 * @hwlock: the hwspinlock to be locked
319 * @to: timeout value in msecs
321 * This function locks the underlying @hwlock. If the @hwlock
322 * is already taken, the function will busy loop waiting for it to
323 * be released, but give up when @timeout msecs have elapsed.
325 * Caution: User must protect the routine of getting hardware lock with mutex
326 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
327 * or sleepable operations under the hardware lock.
329 * Returns 0 when the @hwlock was successfully taken, and an appropriate
330 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
331 * busy after @timeout msecs). The function will never sleep.
334 int hwspin_lock_timeout_raw(struct hwspinlock
*hwlock
, unsigned int to
)
336 return __hwspin_lock_timeout(hwlock
, to
, HWLOCK_RAW
, NULL
);
340 * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
341 * @hwlock: the hwspinlock to be locked
342 * @to: timeout value in msecs
344 * This function locks the underlying @hwlock. If the @hwlock
345 * is already taken, the function will busy loop waiting for it to
346 * be released, but give up when @timeout msecs have elapsed.
348 * This function shall be called only from an atomic context and the timeout
349 * value shall not exceed a few msecs.
351 * Returns 0 when the @hwlock was successfully taken, and an appropriate
352 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
353 * busy after @timeout msecs). The function will never sleep.
356 int hwspin_lock_timeout_in_atomic(struct hwspinlock
*hwlock
, unsigned int to
)
358 return __hwspin_lock_timeout(hwlock
, to
, HWLOCK_IN_ATOMIC
, NULL
);
362 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
363 * @hwlock: the hwspinlock to be locked
364 * @to: timeout value in msecs
366 * This function locks the underlying @hwlock. If the @hwlock
367 * is already taken, the function will busy loop waiting for it to
368 * be released, but give up when @timeout msecs have elapsed.
370 * Upon a successful return from this function, preemption is disabled
371 * so the caller must not sleep, and is advised to release the hwspinlock
372 * as soon as possible.
373 * This is required in order to minimize remote cores polling on the
374 * hardware interconnect.
376 * Returns 0 when the @hwlock was successfully taken, and an appropriate
377 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
378 * busy after @timeout msecs). The function will never sleep.
381 int hwspin_lock_timeout(struct hwspinlock
*hwlock
, unsigned int to
)
383 return __hwspin_lock_timeout(hwlock
, to
, 0, NULL
);
387 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
388 * @hwlock: a previously-acquired hwspinlock which we want to unlock
389 * @flags: previous caller's interrupt state to restore
391 * This function will unlock a specific hwspinlock, enable preemption and
392 * restore the previous state of the local interrupts. It should be used
393 * to undo, e.g., hwspin_trylock_irqsave().
395 * @hwlock must be already locked before calling this function: it is a bug
396 * to call unlock on a @hwlock that is already unlocked.
398 static inline void hwspin_unlock_irqrestore(struct hwspinlock
*hwlock
,
399 unsigned long *flags
)
401 __hwspin_unlock(hwlock
, HWLOCK_IRQSTATE
, flags
);
405 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
406 * @hwlock: a previously-acquired hwspinlock which we want to unlock
408 * This function will unlock a specific hwspinlock, enable preemption and
409 * enable local interrupts. Should be used to undo hwspin_lock_irq().
411 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
412 * calling this function: it is a bug to call unlock on a @hwlock that is
415 static inline void hwspin_unlock_irq(struct hwspinlock
*hwlock
)
417 __hwspin_unlock(hwlock
, HWLOCK_IRQ
, NULL
);
421 * hwspin_unlock_raw() - unlock hwspinlock
422 * @hwlock: a previously-acquired hwspinlock which we want to unlock
424 * This function will unlock a specific hwspinlock.
426 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
427 * this function: it is a bug to call unlock on a @hwlock that is already
430 static inline void hwspin_unlock_raw(struct hwspinlock
*hwlock
)
432 __hwspin_unlock(hwlock
, HWLOCK_RAW
, NULL
);
436 * hwspin_unlock_in_atomic() - unlock hwspinlock
437 * @hwlock: a previously-acquired hwspinlock which we want to unlock
439 * This function will unlock a specific hwspinlock.
441 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
442 * this function: it is a bug to call unlock on a @hwlock that is already
445 static inline void hwspin_unlock_in_atomic(struct hwspinlock
*hwlock
)
447 __hwspin_unlock(hwlock
, HWLOCK_IN_ATOMIC
, NULL
);
451 * hwspin_unlock() - unlock hwspinlock
452 * @hwlock: a previously-acquired hwspinlock which we want to unlock
454 * This function will unlock a specific hwspinlock and enable preemption
457 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
458 * this function: it is a bug to call unlock on a @hwlock that is already
461 static inline void hwspin_unlock(struct hwspinlock
*hwlock
)
463 __hwspin_unlock(hwlock
, 0, NULL
);
466 #endif /* __LINUX_HWSPINLOCK_H */