1 // SPDX-License-Identifier: GPL-2.0
3 * Hardware spinlock framework
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
10 #define pr_fmt(fmt) "%s: " fmt, __func__
12 #include <linux/delay.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/err.h>
18 #include <linux/jiffies.h>
19 #include <linux/radix-tree.h>
20 #include <linux/hwspinlock.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/mutex.h>
25 #include "hwspinlock_internal.h"
27 /* retry delay used in atomic context */
28 #define HWSPINLOCK_RETRY_DELAY_US 100
31 #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
34 * A radix tree is used to maintain the available hwspinlock instances.
35 * The tree associates hwspinlock pointers with their integer key id,
36 * and provides easy-to-use API which makes the hwspinlock core code simple
39 * Radix trees are quick on lookups, and reasonably efficient in terms of
40 * storage, especially with high density usages such as this framework
41 * requires (a continuous range of integer keys, beginning with zero, is
42 * used as the ID's of the hwspinlock instances).
44 * The radix tree API supports tagging items in the tree, which this
45 * framework uses to mark unused hwspinlock instances (see the
46 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
47 * tree, looking for an unused hwspinlock instance, is now reduced to a
48 * single radix tree API call.
50 static RADIX_TREE(hwspinlock_tree
, GFP_KERNEL
);
53 * Synchronization of access to the tree is achieved using this mutex,
54 * as the radix-tree API requires that users provide all synchronisation.
55 * A mutex is needed because we're using non-atomic radix tree allocations.
57 static DEFINE_MUTEX(hwspinlock_tree_lock
);
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock
62 * @hwlock: an hwspinlock which we want to trylock
63 * @mode: controls whether local interrupts are disabled or not
64 * @flags: a pointer where the caller's interrupt state will be saved at (if
67 * This function attempts to lock an hwspinlock, and will immediately
68 * fail if the hwspinlock is already taken.
70 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
71 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
72 * user need some time-consuming or sleepable operations under the hardware
73 * lock, they need one sleepable lock (like mutex) to protect the operations.
75 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
76 * return from this function, preemption (and possibly interrupts) is disabled,
77 * so the caller must not sleep, and is advised to release the hwspinlock as
78 * soon as possible. This is required in order to minimize remote cores polling
79 * on the hardware interconnect.
81 * The user decides whether local interrupts are disabled or not, and if yes,
82 * whether he wants their previous state to be saved. It is up to the user
83 * to choose the appropriate @mode of operation, exactly the same way users
84 * should decide between spin_trylock, spin_trylock_irq and
85 * spin_trylock_irqsave.
87 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
88 * the hwspinlock was already taken.
89 * This function will never sleep.
91 int __hwspin_trylock(struct hwspinlock
*hwlock
, int mode
, unsigned long *flags
)
96 BUG_ON(!flags
&& mode
== HWLOCK_IRQSTATE
);
99 * This spin_lock{_irq, _irqsave} serves three purposes:
101 * 1. Disable preemption, in order to minimize the period of time
102 * in which the hwspinlock is taken. This is important in order
103 * to minimize the possible polling on the hardware interconnect
104 * by a remote user of this lock.
105 * 2. Make the hwspinlock SMP-safe (so we can take it from
106 * additional contexts on the local host).
107 * 3. Ensure that in_atomic/might_sleep checks catch potential
108 * problems with hwspinlock usage (e.g. scheduler checks like
109 * 'scheduling while atomic' etc.)
112 case HWLOCK_IRQSTATE
:
113 ret
= spin_trylock_irqsave(&hwlock
->lock
, *flags
);
116 ret
= spin_trylock_irq(&hwlock
->lock
);
119 case HWLOCK_IN_ATOMIC
:
123 ret
= spin_trylock(&hwlock
->lock
);
127 /* is lock already taken by another context on the local cpu ? */
131 /* try to take the hwspinlock device */
132 ret
= hwlock
->bank
->ops
->trylock(hwlock
);
134 /* if hwlock is already taken, undo spin_trylock_* and exit */
137 case HWLOCK_IRQSTATE
:
138 spin_unlock_irqrestore(&hwlock
->lock
, *flags
);
141 spin_unlock_irq(&hwlock
->lock
);
144 case HWLOCK_IN_ATOMIC
:
148 spin_unlock(&hwlock
->lock
);
156 * We can be sure the other core's memory operations
157 * are observable to us only _after_ we successfully take
158 * the hwspinlock, and we must make sure that subsequent memory
159 * operations (both reads and writes) will not be reordered before
160 * we actually took the hwspinlock.
162 * Note: the implicit memory barrier of the spinlock above is too
163 * early, so we need this additional explicit memory barrier.
169 EXPORT_SYMBOL_GPL(__hwspin_trylock
);
172 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
173 * @hwlock: the hwspinlock to be locked
174 * @timeout: timeout value in msecs
175 * @mode: mode which controls whether local interrupts are disabled or not
176 * @flags: a pointer to where the caller's interrupt state will be saved at (if
179 * This function locks the given @hwlock. If the @hwlock
180 * is already taken, the function will busy loop waiting for it to
181 * be released, but give up after @timeout msecs have elapsed.
183 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
184 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
185 * user need some time-consuming or sleepable operations under the hardware
186 * lock, they need one sleepable lock (like mutex) to protect the operations.
188 * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
189 * is handled with busy-waiting delays, hence shall not exceed few msecs.
191 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
192 * return from this function, preemption (and possibly interrupts) is disabled,
193 * so the caller must not sleep, and is advised to release the hwspinlock as
194 * soon as possible. This is required in order to minimize remote cores polling
195 * on the hardware interconnect.
197 * The user decides whether local interrupts are disabled or not, and if yes,
198 * whether he wants their previous state to be saved. It is up to the user
199 * to choose the appropriate @mode of operation, exactly the same way users
200 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
202 * Returns 0 when the @hwlock was successfully taken, and an appropriate
203 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
204 * busy after @timeout msecs). The function will never sleep.
206 int __hwspin_lock_timeout(struct hwspinlock
*hwlock
, unsigned int to
,
207 int mode
, unsigned long *flags
)
210 unsigned long expire
, atomic_delay
= 0;
212 expire
= msecs_to_jiffies(to
) + jiffies
;
215 /* Try to take the hwspinlock */
216 ret
= __hwspin_trylock(hwlock
, mode
, flags
);
221 * The lock is already taken, let's check if the user wants
224 if (mode
== HWLOCK_IN_ATOMIC
) {
225 udelay(HWSPINLOCK_RETRY_DELAY_US
);
226 atomic_delay
+= HWSPINLOCK_RETRY_DELAY_US
;
227 if (atomic_delay
> to
* 1000)
230 if (time_is_before_eq_jiffies(expire
))
235 * Allow platform-specific relax handlers to prevent
236 * hogging the interconnect (no sleeping, though)
238 if (hwlock
->bank
->ops
->relax
)
239 hwlock
->bank
->ops
->relax(hwlock
);
244 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout
);
247 * __hwspin_unlock() - unlock a specific hwspinlock
248 * @hwlock: a previously-acquired hwspinlock which we want to unlock
249 * @mode: controls whether local interrupts needs to be restored or not
250 * @flags: previous caller's interrupt state to restore (if requested)
252 * This function will unlock a specific hwspinlock, enable preemption and
253 * (possibly) enable interrupts or restore their previous state.
254 * @hwlock must be already locked before calling this function: it is a bug
255 * to call unlock on a @hwlock that is already unlocked.
257 * The user decides whether local interrupts should be enabled or not, and
258 * if yes, whether he wants their previous state to be restored. It is up
259 * to the user to choose the appropriate @mode of operation, exactly the
260 * same way users decide between spin_unlock, spin_unlock_irq and
261 * spin_unlock_irqrestore.
263 * The function will never sleep.
265 void __hwspin_unlock(struct hwspinlock
*hwlock
, int mode
, unsigned long *flags
)
268 BUG_ON(!flags
&& mode
== HWLOCK_IRQSTATE
);
271 * We must make sure that memory operations (both reads and writes),
272 * done before unlocking the hwspinlock, will not be reordered
273 * after the lock is released.
275 * That's the purpose of this explicit memory barrier.
277 * Note: the memory barrier induced by the spin_unlock below is too
278 * late; the other core is going to access memory soon after it will
279 * take the hwspinlock, and by then we want to be sure our memory
280 * operations are already observable.
284 hwlock
->bank
->ops
->unlock(hwlock
);
286 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
288 case HWLOCK_IRQSTATE
:
289 spin_unlock_irqrestore(&hwlock
->lock
, *flags
);
292 spin_unlock_irq(&hwlock
->lock
);
295 case HWLOCK_IN_ATOMIC
:
299 spin_unlock(&hwlock
->lock
);
303 EXPORT_SYMBOL_GPL(__hwspin_unlock
);
306 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
307 * @bank: the hwspinlock device bank
308 * @hwlock_spec: hwlock specifier as found in the device tree
310 * This is a simple translation function, suitable for hwspinlock platform
311 * drivers that only has a lock specifier length of 1.
313 * Returns a relative index of the lock within a specified bank on success,
314 * or -EINVAL on invalid specifier cell count.
317 of_hwspin_lock_simple_xlate(const struct of_phandle_args
*hwlock_spec
)
319 if (WARN_ON(hwlock_spec
->args_count
!= 1))
322 return hwlock_spec
->args
[0];
326 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
327 * @np: device node from which to request the specific hwlock
328 * @index: index of the hwlock in the list of values
330 * This function provides a means for DT users of the hwspinlock module to
331 * get the global lock id of a specific hwspinlock using the phandle of the
332 * hwspinlock device, so that it can be requested using the normal
333 * hwspin_lock_request_specific() API.
335 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
336 * device is not yet registered, -EINVAL on invalid args specifier value or an
337 * appropriate error as returned from the OF parsing of the DT client node.
339 int of_hwspin_lock_get_id(struct device_node
*np
, int index
)
341 struct of_phandle_args args
;
342 struct hwspinlock
*hwlock
;
343 struct radix_tree_iter iter
;
348 ret
= of_parse_phandle_with_args(np
, "hwlocks", "#hwlock-cells", index
,
353 if (!of_device_is_available(args
.np
)) {
358 /* Find the hwspinlock device: we need its base_id */
361 radix_tree_for_each_slot(slot
, &hwspinlock_tree
, &iter
, 0) {
362 hwlock
= radix_tree_deref_slot(slot
);
363 if (unlikely(!hwlock
))
365 if (radix_tree_deref_retry(hwlock
)) {
366 slot
= radix_tree_iter_retry(&iter
);
370 if (hwlock
->bank
->dev
->of_node
== args
.np
) {
379 id
= of_hwspin_lock_simple_xlate(&args
);
380 if (id
< 0 || id
>= hwlock
->bank
->num_locks
) {
384 id
+= hwlock
->bank
->base_id
;
387 of_node_put(args
.np
);
388 return ret
? ret
: id
;
390 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id
);
393 * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
394 * @np: device node from which to request the specific hwlock
397 * This function provides a means for DT users of the hwspinlock module to
398 * get the global lock id of a specific hwspinlock using the specified name of
399 * the hwspinlock device, so that it can be requested using the normal
400 * hwspin_lock_request_specific() API.
402 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
403 * device is not yet registered, -EINVAL on invalid args specifier value or an
404 * appropriate error as returned from the OF parsing of the DT client node.
406 int of_hwspin_lock_get_id_byname(struct device_node
*np
, const char *name
)
413 index
= of_property_match_string(np
, "hwlock-names", name
);
417 return of_hwspin_lock_get_id(np
, index
);
419 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname
);
421 static int hwspin_lock_register_single(struct hwspinlock
*hwlock
, int id
)
423 struct hwspinlock
*tmp
;
426 mutex_lock(&hwspinlock_tree_lock
);
428 ret
= radix_tree_insert(&hwspinlock_tree
, id
, hwlock
);
431 pr_err("hwspinlock id %d already exists!\n", id
);
435 /* mark this hwspinlock as available */
436 tmp
= radix_tree_tag_set(&hwspinlock_tree
, id
, HWSPINLOCK_UNUSED
);
438 /* self-sanity check which should never fail */
439 WARN_ON(tmp
!= hwlock
);
442 mutex_unlock(&hwspinlock_tree_lock
);
446 static struct hwspinlock
*hwspin_lock_unregister_single(unsigned int id
)
448 struct hwspinlock
*hwlock
= NULL
;
451 mutex_lock(&hwspinlock_tree_lock
);
453 /* make sure the hwspinlock is not in use (tag is set) */
454 ret
= radix_tree_tag_get(&hwspinlock_tree
, id
, HWSPINLOCK_UNUSED
);
456 pr_err("hwspinlock %d still in use (or not present)\n", id
);
460 hwlock
= radix_tree_delete(&hwspinlock_tree
, id
);
462 pr_err("failed to delete hwspinlock %d\n", id
);
467 mutex_unlock(&hwspinlock_tree_lock
);
472 * hwspin_lock_register() - register a new hw spinlock device
473 * @bank: the hwspinlock device, which usually provides numerous hw locks
474 * @dev: the backing device
475 * @ops: hwspinlock handlers for this device
476 * @base_id: id of the first hardware spinlock in this bank
477 * @num_locks: number of hwspinlocks provided by this device
479 * This function should be called from the underlying platform-specific
480 * implementation, to register a new hwspinlock device instance.
482 * Should be called from a process context (might sleep)
484 * Returns 0 on success, or an appropriate error code on failure
486 int hwspin_lock_register(struct hwspinlock_device
*bank
, struct device
*dev
,
487 const struct hwspinlock_ops
*ops
, int base_id
, int num_locks
)
489 struct hwspinlock
*hwlock
;
492 if (!bank
|| !ops
|| !dev
|| !num_locks
|| !ops
->trylock
||
494 pr_err("invalid parameters\n");
500 bank
->base_id
= base_id
;
501 bank
->num_locks
= num_locks
;
503 for (i
= 0; i
< num_locks
; i
++) {
504 hwlock
= &bank
->lock
[i
];
506 spin_lock_init(&hwlock
->lock
);
509 ret
= hwspin_lock_register_single(hwlock
, base_id
+ i
);
518 hwspin_lock_unregister_single(base_id
+ i
);
521 EXPORT_SYMBOL_GPL(hwspin_lock_register
);
524 * hwspin_lock_unregister() - unregister an hw spinlock device
525 * @bank: the hwspinlock device, which usually provides numerous hw locks
527 * This function should be called from the underlying platform-specific
528 * implementation, to unregister an existing (and unused) hwspinlock.
530 * Should be called from a process context (might sleep)
532 * Returns 0 on success, or an appropriate error code on failure
534 int hwspin_lock_unregister(struct hwspinlock_device
*bank
)
536 struct hwspinlock
*hwlock
, *tmp
;
539 for (i
= 0; i
< bank
->num_locks
; i
++) {
540 hwlock
= &bank
->lock
[i
];
542 tmp
= hwspin_lock_unregister_single(bank
->base_id
+ i
);
546 /* self-sanity check that should never fail */
547 WARN_ON(tmp
!= hwlock
);
552 EXPORT_SYMBOL_GPL(hwspin_lock_unregister
);
554 static void devm_hwspin_lock_unreg(struct device
*dev
, void *res
)
556 hwspin_lock_unregister(*(struct hwspinlock_device
**)res
);
559 static int devm_hwspin_lock_device_match(struct device
*dev
, void *res
,
562 struct hwspinlock_device
**bank
= res
;
564 if (WARN_ON(!bank
|| !*bank
))
567 return *bank
== data
;
571 * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
573 * @dev: the backing device
574 * @bank: the hwspinlock device, which usually provides numerous hw locks
576 * This function should be called from the underlying platform-specific
577 * implementation, to unregister an existing (and unused) hwspinlock.
579 * Should be called from a process context (might sleep)
581 * Returns 0 on success, or an appropriate error code on failure
583 int devm_hwspin_lock_unregister(struct device
*dev
,
584 struct hwspinlock_device
*bank
)
588 ret
= devres_release(dev
, devm_hwspin_lock_unreg
,
589 devm_hwspin_lock_device_match
, bank
);
594 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister
);
597 * devm_hwspin_lock_register() - register a new hw spinlock device for
599 * @dev: the backing device
600 * @bank: the hwspinlock device, which usually provides numerous hw locks
601 * @ops: hwspinlock handlers for this device
602 * @base_id: id of the first hardware spinlock in this bank
603 * @num_locks: number of hwspinlocks provided by this device
605 * This function should be called from the underlying platform-specific
606 * implementation, to register a new hwspinlock device instance.
608 * Should be called from a process context (might sleep)
610 * Returns 0 on success, or an appropriate error code on failure
612 int devm_hwspin_lock_register(struct device
*dev
,
613 struct hwspinlock_device
*bank
,
614 const struct hwspinlock_ops
*ops
,
615 int base_id
, int num_locks
)
617 struct hwspinlock_device
**ptr
;
620 ptr
= devres_alloc(devm_hwspin_lock_unreg
, sizeof(*ptr
), GFP_KERNEL
);
624 ret
= hwspin_lock_register(bank
, dev
, ops
, base_id
, num_locks
);
627 devres_add(dev
, ptr
);
634 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register
);
637 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
639 * This is an internal function that prepares an hwspinlock instance
640 * before it is given to the user. The function assumes that
641 * hwspinlock_tree_lock is taken.
643 * Returns 0 or positive to indicate success, and a negative value to
644 * indicate an error (with the appropriate error code)
646 static int __hwspin_lock_request(struct hwspinlock
*hwlock
)
648 struct device
*dev
= hwlock
->bank
->dev
;
649 struct hwspinlock
*tmp
;
652 /* prevent underlying implementation from being removed */
653 if (!try_module_get(dev
->driver
->owner
)) {
654 dev_err(dev
, "%s: can't get owner\n", __func__
);
658 /* notify PM core that power is now needed */
659 ret
= pm_runtime_get_sync(dev
);
661 dev_err(dev
, "%s: can't power on device\n", __func__
);
662 pm_runtime_put_noidle(dev
);
663 module_put(dev
->driver
->owner
);
667 /* mark hwspinlock as used, should not fail */
668 tmp
= radix_tree_tag_clear(&hwspinlock_tree
, hwlock_to_id(hwlock
),
671 /* self-sanity check that should never fail */
672 WARN_ON(tmp
!= hwlock
);
678 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
679 * @hwlock: a valid hwspinlock instance
681 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
683 int hwspin_lock_get_id(struct hwspinlock
*hwlock
)
686 pr_err("invalid hwlock\n");
690 return hwlock_to_id(hwlock
);
692 EXPORT_SYMBOL_GPL(hwspin_lock_get_id
);
695 * hwspin_lock_request() - request an hwspinlock
697 * This function should be called by users of the hwspinlock device,
698 * in order to dynamically assign them an unused hwspinlock.
699 * Usually the user of this lock will then have to communicate the lock's id
700 * to the remote core before it can be used for synchronization (to get the
701 * id of a given hwlock, use hwspin_lock_get_id()).
703 * Should be called from a process context (might sleep)
705 * Returns the address of the assigned hwspinlock, or NULL on error
707 struct hwspinlock
*hwspin_lock_request(void)
709 struct hwspinlock
*hwlock
;
712 mutex_lock(&hwspinlock_tree_lock
);
714 /* look for an unused lock */
715 ret
= radix_tree_gang_lookup_tag(&hwspinlock_tree
, (void **)&hwlock
,
716 0, 1, HWSPINLOCK_UNUSED
);
718 pr_warn("a free hwspinlock is not available\n");
723 /* sanity check that should never fail */
726 /* mark as used and power up */
727 ret
= __hwspin_lock_request(hwlock
);
732 mutex_unlock(&hwspinlock_tree_lock
);
735 EXPORT_SYMBOL_GPL(hwspin_lock_request
);
738 * hwspin_lock_request_specific() - request for a specific hwspinlock
739 * @id: index of the specific hwspinlock that is requested
741 * This function should be called by users of the hwspinlock module,
742 * in order to assign them a specific hwspinlock.
743 * Usually early board code will be calling this function in order to
744 * reserve specific hwspinlock ids for predefined purposes.
746 * Should be called from a process context (might sleep)
748 * Returns the address of the assigned hwspinlock, or NULL on error
750 struct hwspinlock
*hwspin_lock_request_specific(unsigned int id
)
752 struct hwspinlock
*hwlock
;
755 mutex_lock(&hwspinlock_tree_lock
);
757 /* make sure this hwspinlock exists */
758 hwlock
= radix_tree_lookup(&hwspinlock_tree
, id
);
760 pr_warn("hwspinlock %u does not exist\n", id
);
764 /* sanity check (this shouldn't happen) */
765 WARN_ON(hwlock_to_id(hwlock
) != id
);
767 /* make sure this hwspinlock is unused */
768 ret
= radix_tree_tag_get(&hwspinlock_tree
, id
, HWSPINLOCK_UNUSED
);
770 pr_warn("hwspinlock %u is already in use\n", id
);
775 /* mark as used and power up */
776 ret
= __hwspin_lock_request(hwlock
);
781 mutex_unlock(&hwspinlock_tree_lock
);
784 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific
);
787 * hwspin_lock_free() - free a specific hwspinlock
788 * @hwlock: the specific hwspinlock to free
790 * This function mark @hwlock as free again.
791 * Should only be called with an @hwlock that was retrieved from
792 * an earlier call to hwspin_lock_request{_specific}.
794 * Should be called from a process context (might sleep)
796 * Returns 0 on success, or an appropriate error code on failure
798 int hwspin_lock_free(struct hwspinlock
*hwlock
)
801 struct hwspinlock
*tmp
;
805 pr_err("invalid hwlock\n");
809 dev
= hwlock
->bank
->dev
;
810 mutex_lock(&hwspinlock_tree_lock
);
812 /* make sure the hwspinlock is used */
813 ret
= radix_tree_tag_get(&hwspinlock_tree
, hwlock_to_id(hwlock
),
816 dev_err(dev
, "%s: hwlock is already free\n", __func__
);
822 /* notify the underlying device that power is not needed */
823 ret
= pm_runtime_put(dev
);
827 /* mark this hwspinlock as available */
828 tmp
= radix_tree_tag_set(&hwspinlock_tree
, hwlock_to_id(hwlock
),
831 /* sanity check (this shouldn't happen) */
832 WARN_ON(tmp
!= hwlock
);
834 module_put(dev
->driver
->owner
);
837 mutex_unlock(&hwspinlock_tree_lock
);
840 EXPORT_SYMBOL_GPL(hwspin_lock_free
);
842 static int devm_hwspin_lock_match(struct device
*dev
, void *res
, void *data
)
844 struct hwspinlock
**hwlock
= res
;
846 if (WARN_ON(!hwlock
|| !*hwlock
))
849 return *hwlock
== data
;
852 static void devm_hwspin_lock_release(struct device
*dev
, void *res
)
854 hwspin_lock_free(*(struct hwspinlock
**)res
);
858 * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
859 * @dev: the device to free the specific hwspinlock
860 * @hwlock: the specific hwspinlock to free
862 * This function mark @hwlock as free again.
863 * Should only be called with an @hwlock that was retrieved from
864 * an earlier call to hwspin_lock_request{_specific}.
866 * Should be called from a process context (might sleep)
868 * Returns 0 on success, or an appropriate error code on failure
870 int devm_hwspin_lock_free(struct device
*dev
, struct hwspinlock
*hwlock
)
874 ret
= devres_release(dev
, devm_hwspin_lock_release
,
875 devm_hwspin_lock_match
, hwlock
);
880 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free
);
883 * devm_hwspin_lock_request() - request an hwspinlock for a managed device
884 * @dev: the device to request an hwspinlock
886 * This function should be called by users of the hwspinlock device,
887 * in order to dynamically assign them an unused hwspinlock.
888 * Usually the user of this lock will then have to communicate the lock's id
889 * to the remote core before it can be used for synchronization (to get the
890 * id of a given hwlock, use hwspin_lock_get_id()).
892 * Should be called from a process context (might sleep)
894 * Returns the address of the assigned hwspinlock, or NULL on error
896 struct hwspinlock
*devm_hwspin_lock_request(struct device
*dev
)
898 struct hwspinlock
**ptr
, *hwlock
;
900 ptr
= devres_alloc(devm_hwspin_lock_release
, sizeof(*ptr
), GFP_KERNEL
);
904 hwlock
= hwspin_lock_request();
907 devres_add(dev
, ptr
);
914 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request
);
917 * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
919 * @dev: the device to request the specific hwspinlock
920 * @id: index of the specific hwspinlock that is requested
922 * This function should be called by users of the hwspinlock module,
923 * in order to assign them a specific hwspinlock.
924 * Usually early board code will be calling this function in order to
925 * reserve specific hwspinlock ids for predefined purposes.
927 * Should be called from a process context (might sleep)
929 * Returns the address of the assigned hwspinlock, or NULL on error
931 struct hwspinlock
*devm_hwspin_lock_request_specific(struct device
*dev
,
934 struct hwspinlock
**ptr
, *hwlock
;
936 ptr
= devres_alloc(devm_hwspin_lock_release
, sizeof(*ptr
), GFP_KERNEL
);
940 hwlock
= hwspin_lock_request_specific(id
);
943 devres_add(dev
, ptr
);
950 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific
);
952 MODULE_LICENSE("GPL v2");
953 MODULE_DESCRIPTION("Hardware spinlock interface");
954 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");