1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/devres.c - device resource management
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/percpu.h>
14 #include <asm/sections.h>
19 struct list_head entry
;
21 #ifdef CONFIG_DEBUG_DEVRES
28 struct devres_node node
;
30 unsigned long long data
[]; /* guarantee ull alignment */
34 struct devres_node node
[2];
40 #ifdef CONFIG_DEBUG_DEVRES
41 static int log_devres
= 0;
42 module_param_named(log
, log_devres
, int, S_IRUGO
| S_IWUSR
);
44 static void set_node_dbginfo(struct devres_node
*node
, const char *name
,
51 static void devres_log(struct device
*dev
, struct devres_node
*node
,
54 if (unlikely(log_devres
))
55 dev_err(dev
, "DEVRES %3s %p %s (%lu bytes)\n",
56 op
, node
, node
->name
, (unsigned long)node
->size
);
58 #else /* CONFIG_DEBUG_DEVRES */
59 #define set_node_dbginfo(node, n, s) do {} while (0)
60 #define devres_log(dev, node, op) do {} while (0)
61 #endif /* CONFIG_DEBUG_DEVRES */
64 * Release functions for devres group. These callbacks are used only
67 static void group_open_release(struct device
*dev
, void *res
)
72 static void group_close_release(struct device
*dev
, void *res
)
77 static struct devres_group
* node_to_group(struct devres_node
*node
)
79 if (node
->release
== &group_open_release
)
80 return container_of(node
, struct devres_group
, node
[0]);
81 if (node
->release
== &group_close_release
)
82 return container_of(node
, struct devres_group
, node
[1]);
86 static __always_inline
struct devres
* alloc_dr(dr_release_t release
,
87 size_t size
, gfp_t gfp
, int nid
)
92 /* We must catch any near-SIZE_MAX cases that could overflow. */
93 if (unlikely(check_add_overflow(sizeof(struct devres
), size
,
97 dr
= kmalloc_node_track_caller(tot_size
, gfp
, nid
);
101 memset(dr
, 0, offsetof(struct devres
, data
));
103 INIT_LIST_HEAD(&dr
->node
.entry
);
104 dr
->node
.release
= release
;
108 static void add_dr(struct device
*dev
, struct devres_node
*node
)
110 devres_log(dev
, node
, "ADD");
111 BUG_ON(!list_empty(&node
->entry
));
112 list_add_tail(&node
->entry
, &dev
->devres_head
);
115 #ifdef CONFIG_DEBUG_DEVRES
116 void * __devres_alloc_node(dr_release_t release
, size_t size
, gfp_t gfp
, int nid
,
121 dr
= alloc_dr(release
, size
, gfp
| __GFP_ZERO
, nid
);
124 set_node_dbginfo(&dr
->node
, name
, size
);
127 EXPORT_SYMBOL_GPL(__devres_alloc_node
);
130 * devres_alloc - Allocate device resource data
131 * @release: Release function devres will be associated with
132 * @size: Allocation size
133 * @gfp: Allocation flags
136 * Allocate devres of @size bytes. The allocated area is zeroed, then
137 * associated with @release. The returned pointer can be passed to
138 * other devres_*() functions.
141 * Pointer to allocated devres on success, NULL on failure.
143 void * devres_alloc_node(dr_release_t release
, size_t size
, gfp_t gfp
, int nid
)
147 dr
= alloc_dr(release
, size
, gfp
| __GFP_ZERO
, nid
);
152 EXPORT_SYMBOL_GPL(devres_alloc_node
);
156 * devres_for_each_res - Resource iterator
157 * @dev: Device to iterate resource from
158 * @release: Look for resources associated with this release function
159 * @match: Match function (optional)
160 * @match_data: Data for the match function
161 * @fn: Function to be called for each matched resource.
162 * @data: Data for @fn, the 3rd parameter of @fn
164 * Call @fn for each devres of @dev which is associated with @release
165 * and for which @match returns 1.
170 void devres_for_each_res(struct device
*dev
, dr_release_t release
,
171 dr_match_t match
, void *match_data
,
172 void (*fn
)(struct device
*, void *, void *),
175 struct devres_node
*node
;
176 struct devres_node
*tmp
;
182 spin_lock_irqsave(&dev
->devres_lock
, flags
);
183 list_for_each_entry_safe_reverse(node
, tmp
,
184 &dev
->devres_head
, entry
) {
185 struct devres
*dr
= container_of(node
, struct devres
, node
);
187 if (node
->release
!= release
)
189 if (match
&& !match(dev
, dr
->data
, match_data
))
191 fn(dev
, dr
->data
, data
);
193 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
195 EXPORT_SYMBOL_GPL(devres_for_each_res
);
198 * devres_free - Free device resource data
199 * @res: Pointer to devres data to free
201 * Free devres created with devres_alloc().
203 void devres_free(void *res
)
206 struct devres
*dr
= container_of(res
, struct devres
, data
);
208 BUG_ON(!list_empty(&dr
->node
.entry
));
212 EXPORT_SYMBOL_GPL(devres_free
);
215 * devres_add - Register device resource
216 * @dev: Device to add resource to
217 * @res: Resource to register
219 * Register devres @res to @dev. @res should have been allocated
220 * using devres_alloc(). On driver detach, the associated release
221 * function will be invoked and devres will be freed automatically.
223 void devres_add(struct device
*dev
, void *res
)
225 struct devres
*dr
= container_of(res
, struct devres
, data
);
228 spin_lock_irqsave(&dev
->devres_lock
, flags
);
229 add_dr(dev
, &dr
->node
);
230 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
232 EXPORT_SYMBOL_GPL(devres_add
);
234 static struct devres
*find_dr(struct device
*dev
, dr_release_t release
,
235 dr_match_t match
, void *match_data
)
237 struct devres_node
*node
;
239 list_for_each_entry_reverse(node
, &dev
->devres_head
, entry
) {
240 struct devres
*dr
= container_of(node
, struct devres
, node
);
242 if (node
->release
!= release
)
244 if (match
&& !match(dev
, dr
->data
, match_data
))
253 * devres_find - Find device resource
254 * @dev: Device to lookup resource from
255 * @release: Look for resources associated with this release function
256 * @match: Match function (optional)
257 * @match_data: Data for the match function
259 * Find the latest devres of @dev which is associated with @release
260 * and for which @match returns 1. If @match is NULL, it's considered
264 * Pointer to found devres, NULL if not found.
266 void * devres_find(struct device
*dev
, dr_release_t release
,
267 dr_match_t match
, void *match_data
)
272 spin_lock_irqsave(&dev
->devres_lock
, flags
);
273 dr
= find_dr(dev
, release
, match
, match_data
);
274 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
280 EXPORT_SYMBOL_GPL(devres_find
);
283 * devres_get - Find devres, if non-existent, add one atomically
284 * @dev: Device to lookup or add devres for
285 * @new_res: Pointer to new initialized devres to add if not found
286 * @match: Match function (optional)
287 * @match_data: Data for the match function
289 * Find the latest devres of @dev which has the same release function
290 * as @new_res and for which @match return 1. If found, @new_res is
291 * freed; otherwise, @new_res is added atomically.
294 * Pointer to found or added devres.
296 void * devres_get(struct device
*dev
, void *new_res
,
297 dr_match_t match
, void *match_data
)
299 struct devres
*new_dr
= container_of(new_res
, struct devres
, data
);
303 spin_lock_irqsave(&dev
->devres_lock
, flags
);
304 dr
= find_dr(dev
, new_dr
->node
.release
, match
, match_data
);
306 add_dr(dev
, &new_dr
->node
);
310 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
311 devres_free(new_res
);
315 EXPORT_SYMBOL_GPL(devres_get
);
318 * devres_remove - Find a device resource and remove it
319 * @dev: Device to find resource from
320 * @release: Look for resources associated with this release function
321 * @match: Match function (optional)
322 * @match_data: Data for the match function
324 * Find the latest devres of @dev associated with @release and for
325 * which @match returns 1. If @match is NULL, it's considered to
326 * match all. If found, the resource is removed atomically and
330 * Pointer to removed devres on success, NULL if not found.
332 void * devres_remove(struct device
*dev
, dr_release_t release
,
333 dr_match_t match
, void *match_data
)
338 spin_lock_irqsave(&dev
->devres_lock
, flags
);
339 dr
= find_dr(dev
, release
, match
, match_data
);
341 list_del_init(&dr
->node
.entry
);
342 devres_log(dev
, &dr
->node
, "REM");
344 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
350 EXPORT_SYMBOL_GPL(devres_remove
);
353 * devres_destroy - Find a device resource and destroy it
354 * @dev: Device to find resource from
355 * @release: Look for resources associated with this release function
356 * @match: Match function (optional)
357 * @match_data: Data for the match function
359 * Find the latest devres of @dev associated with @release and for
360 * which @match returns 1. If @match is NULL, it's considered to
361 * match all. If found, the resource is removed atomically and freed.
363 * Note that the release function for the resource will not be called,
364 * only the devres-allocated data will be freed. The caller becomes
365 * responsible for freeing any other data.
368 * 0 if devres is found and freed, -ENOENT if not found.
370 int devres_destroy(struct device
*dev
, dr_release_t release
,
371 dr_match_t match
, void *match_data
)
375 res
= devres_remove(dev
, release
, match
, match_data
);
382 EXPORT_SYMBOL_GPL(devres_destroy
);
386 * devres_release - Find a device resource and destroy it, calling release
387 * @dev: Device to find resource from
388 * @release: Look for resources associated with this release function
389 * @match: Match function (optional)
390 * @match_data: Data for the match function
392 * Find the latest devres of @dev associated with @release and for
393 * which @match returns 1. If @match is NULL, it's considered to
394 * match all. If found, the resource is removed atomically, the
395 * release function called and the resource freed.
398 * 0 if devres is found and freed, -ENOENT if not found.
400 int devres_release(struct device
*dev
, dr_release_t release
,
401 dr_match_t match
, void *match_data
)
405 res
= devres_remove(dev
, release
, match
, match_data
);
409 (*release
)(dev
, res
);
413 EXPORT_SYMBOL_GPL(devres_release
);
415 static int remove_nodes(struct device
*dev
,
416 struct list_head
*first
, struct list_head
*end
,
417 struct list_head
*todo
)
419 int cnt
= 0, nr_groups
= 0;
420 struct list_head
*cur
;
422 /* First pass - move normal devres entries to @todo and clear
423 * devres_group colors.
427 struct devres_node
*node
;
428 struct devres_group
*grp
;
430 node
= list_entry(cur
, struct devres_node
, entry
);
433 grp
= node_to_group(node
);
435 /* clear color of group markers in the first pass */
439 /* regular devres entry */
440 if (&node
->entry
== first
)
442 list_move_tail(&node
->entry
, todo
);
450 /* Second pass - Scan groups and color them. A group gets
451 * color value of two iff the group is wholly contained in
452 * [cur, end). That is, for a closed group, both opening and
453 * closing markers should be in the range, while just the
454 * opening marker is enough for an open group.
458 struct devres_node
*node
;
459 struct devres_group
*grp
;
461 node
= list_entry(cur
, struct devres_node
, entry
);
464 grp
= node_to_group(node
);
465 BUG_ON(!grp
|| list_empty(&grp
->node
[0].entry
));
468 if (list_empty(&grp
->node
[1].entry
))
471 BUG_ON(grp
->color
<= 0 || grp
->color
> 2);
472 if (grp
->color
== 2) {
473 /* No need to update cur or end. The removed
474 * nodes are always before both.
476 list_move_tail(&grp
->node
[0].entry
, todo
);
477 list_del_init(&grp
->node
[1].entry
);
484 static int release_nodes(struct device
*dev
, struct list_head
*first
,
485 struct list_head
*end
, unsigned long flags
)
486 __releases(&dev
->devres_lock
)
490 struct devres
*dr
, *tmp
;
492 cnt
= remove_nodes(dev
, first
, end
, &todo
);
494 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
496 /* Release. Note that both devres and devres_group are
497 * handled as devres in the following loop. This is safe.
499 list_for_each_entry_safe_reverse(dr
, tmp
, &todo
, node
.entry
) {
500 devres_log(dev
, &dr
->node
, "REL");
501 dr
->node
.release(dev
, dr
->data
);
509 * devres_release_all - Release all managed resources
510 * @dev: Device to release resources for
512 * Release all resources associated with @dev. This function is
513 * called on driver detach.
515 int devres_release_all(struct device
*dev
)
519 /* Looks like an uninitialized device structure */
520 if (WARN_ON(dev
->devres_head
.next
== NULL
))
522 spin_lock_irqsave(&dev
->devres_lock
, flags
);
523 return release_nodes(dev
, dev
->devres_head
.next
, &dev
->devres_head
,
528 * devres_open_group - Open a new devres group
529 * @dev: Device to open devres group for
531 * @gfp: Allocation flags
533 * Open a new devres group for @dev with @id. For @id, using a
534 * pointer to an object which won't be used for another group is
535 * recommended. If @id is NULL, address-wise unique ID is created.
538 * ID of the new group, NULL on failure.
540 void * devres_open_group(struct device
*dev
, void *id
, gfp_t gfp
)
542 struct devres_group
*grp
;
545 grp
= kmalloc(sizeof(*grp
), gfp
);
549 grp
->node
[0].release
= &group_open_release
;
550 grp
->node
[1].release
= &group_close_release
;
551 INIT_LIST_HEAD(&grp
->node
[0].entry
);
552 INIT_LIST_HEAD(&grp
->node
[1].entry
);
553 set_node_dbginfo(&grp
->node
[0], "grp<", 0);
554 set_node_dbginfo(&grp
->node
[1], "grp>", 0);
559 spin_lock_irqsave(&dev
->devres_lock
, flags
);
560 add_dr(dev
, &grp
->node
[0]);
561 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
564 EXPORT_SYMBOL_GPL(devres_open_group
);
566 /* Find devres group with ID @id. If @id is NULL, look for the latest. */
567 static struct devres_group
* find_group(struct device
*dev
, void *id
)
569 struct devres_node
*node
;
571 list_for_each_entry_reverse(node
, &dev
->devres_head
, entry
) {
572 struct devres_group
*grp
;
574 if (node
->release
!= &group_open_release
)
577 grp
= container_of(node
, struct devres_group
, node
[0]);
582 } else if (list_empty(&grp
->node
[1].entry
))
590 * devres_close_group - Close a devres group
591 * @dev: Device to close devres group for
592 * @id: ID of target group, can be NULL
594 * Close the group identified by @id. If @id is NULL, the latest open
597 void devres_close_group(struct device
*dev
, void *id
)
599 struct devres_group
*grp
;
602 spin_lock_irqsave(&dev
->devres_lock
, flags
);
604 grp
= find_group(dev
, id
);
606 add_dr(dev
, &grp
->node
[1]);
610 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
612 EXPORT_SYMBOL_GPL(devres_close_group
);
615 * devres_remove_group - Remove a devres group
616 * @dev: Device to remove group for
617 * @id: ID of target group, can be NULL
619 * Remove the group identified by @id. If @id is NULL, the latest
620 * open group is selected. Note that removing a group doesn't affect
621 * any other resources.
623 void devres_remove_group(struct device
*dev
, void *id
)
625 struct devres_group
*grp
;
628 spin_lock_irqsave(&dev
->devres_lock
, flags
);
630 grp
= find_group(dev
, id
);
632 list_del_init(&grp
->node
[0].entry
);
633 list_del_init(&grp
->node
[1].entry
);
634 devres_log(dev
, &grp
->node
[0], "REM");
638 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
642 EXPORT_SYMBOL_GPL(devres_remove_group
);
645 * devres_release_group - Release resources in a devres group
646 * @dev: Device to release group for
647 * @id: ID of target group, can be NULL
649 * Release all resources in the group identified by @id. If @id is
650 * NULL, the latest open group is selected. The selected group and
651 * groups properly nested inside the selected group are removed.
654 * The number of released non-group resources.
656 int devres_release_group(struct device
*dev
, void *id
)
658 struct devres_group
*grp
;
662 spin_lock_irqsave(&dev
->devres_lock
, flags
);
664 grp
= find_group(dev
, id
);
666 struct list_head
*first
= &grp
->node
[0].entry
;
667 struct list_head
*end
= &dev
->devres_head
;
669 if (!list_empty(&grp
->node
[1].entry
))
670 end
= grp
->node
[1].entry
.next
;
672 cnt
= release_nodes(dev
, first
, end
, flags
);
675 spin_unlock_irqrestore(&dev
->devres_lock
, flags
);
680 EXPORT_SYMBOL_GPL(devres_release_group
);
683 * Custom devres actions allow inserting a simple function call
684 * into the teadown sequence.
687 struct action_devres
{
689 void (*action
)(void *);
692 static int devm_action_match(struct device
*dev
, void *res
, void *p
)
694 struct action_devres
*devres
= res
;
695 struct action_devres
*target
= p
;
697 return devres
->action
== target
->action
&&
698 devres
->data
== target
->data
;
701 static void devm_action_release(struct device
*dev
, void *res
)
703 struct action_devres
*devres
= res
;
705 devres
->action(devres
->data
);
709 * devm_add_action() - add a custom action to list of managed resources
710 * @dev: Device that owns the action
711 * @action: Function that should be called
712 * @data: Pointer to data passed to @action implementation
714 * This adds a custom action to the list of managed resources so that
715 * it gets executed as part of standard resource unwinding.
717 int devm_add_action(struct device
*dev
, void (*action
)(void *), void *data
)
719 struct action_devres
*devres
;
721 devres
= devres_alloc(devm_action_release
,
722 sizeof(struct action_devres
), GFP_KERNEL
);
727 devres
->action
= action
;
729 devres_add(dev
, devres
);
732 EXPORT_SYMBOL_GPL(devm_add_action
);
735 * devm_remove_action() - removes previously added custom action
736 * @dev: Device that owns the action
737 * @action: Function implementing the action
738 * @data: Pointer to data passed to @action implementation
740 * Removes instance of @action previously added by devm_add_action().
741 * Both action and data should match one of the existing entries.
743 void devm_remove_action(struct device
*dev
, void (*action
)(void *), void *data
)
745 struct action_devres devres
= {
750 WARN_ON(devres_destroy(dev
, devm_action_release
, devm_action_match
,
754 EXPORT_SYMBOL_GPL(devm_remove_action
);
757 * Managed kmalloc/kfree
759 static void devm_kmalloc_release(struct device
*dev
, void *res
)
764 static int devm_kmalloc_match(struct device
*dev
, void *res
, void *data
)
770 * devm_kmalloc - Resource-managed kmalloc
771 * @dev: Device to allocate memory for
772 * @size: Allocation size
773 * @gfp: Allocation gfp flags
775 * Managed kmalloc. Memory allocated with this function is
776 * automatically freed on driver detach. Like all other devres
777 * resources, guaranteed alignment is unsigned long long.
780 * Pointer to allocated memory on success, NULL on failure.
782 void * devm_kmalloc(struct device
*dev
, size_t size
, gfp_t gfp
)
786 /* use raw alloc_dr for kmalloc caller tracing */
787 dr
= alloc_dr(devm_kmalloc_release
, size
, gfp
, dev_to_node(dev
));
792 * This is named devm_kzalloc_release for historical reasons
793 * The initial implementation did not support kmalloc, only kzalloc
795 set_node_dbginfo(&dr
->node
, "devm_kzalloc_release", size
);
796 devres_add(dev
, dr
->data
);
799 EXPORT_SYMBOL_GPL(devm_kmalloc
);
802 * devm_kstrdup - Allocate resource managed space and
803 * copy an existing string into that.
804 * @dev: Device to allocate memory for
805 * @s: the string to duplicate
806 * @gfp: the GFP mask used in the devm_kmalloc() call when
809 * Pointer to allocated string on success, NULL on failure.
811 char *devm_kstrdup(struct device
*dev
, const char *s
, gfp_t gfp
)
819 size
= strlen(s
) + 1;
820 buf
= devm_kmalloc(dev
, size
, gfp
);
822 memcpy(buf
, s
, size
);
825 EXPORT_SYMBOL_GPL(devm_kstrdup
);
828 * devm_kstrdup_const - resource managed conditional string duplication
829 * @dev: device for which to duplicate the string
830 * @s: the string to duplicate
831 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
833 * Strings allocated by devm_kstrdup_const will be automatically freed when
834 * the associated device is detached.
837 * Source string if it is in .rodata section otherwise it falls back to
840 const char *devm_kstrdup_const(struct device
*dev
, const char *s
, gfp_t gfp
)
842 if (is_kernel_rodata((unsigned long)s
))
845 return devm_kstrdup(dev
, s
, gfp
);
847 EXPORT_SYMBOL_GPL(devm_kstrdup_const
);
850 * devm_kvasprintf - Allocate resource managed space and format a string
852 * @dev: Device to allocate memory for
853 * @gfp: the GFP mask used in the devm_kmalloc() call when
855 * @fmt: The printf()-style format string
856 * @ap: Arguments for the format string
858 * Pointer to allocated string on success, NULL on failure.
860 char *devm_kvasprintf(struct device
*dev
, gfp_t gfp
, const char *fmt
,
868 len
= vsnprintf(NULL
, 0, fmt
, aq
);
871 p
= devm_kmalloc(dev
, len
+1, gfp
);
875 vsnprintf(p
, len
+1, fmt
, ap
);
879 EXPORT_SYMBOL(devm_kvasprintf
);
882 * devm_kasprintf - Allocate resource managed space and format a string
884 * @dev: Device to allocate memory for
885 * @gfp: the GFP mask used in the devm_kmalloc() call when
887 * @fmt: The printf()-style format string
888 * @...: Arguments for the format string
890 * Pointer to allocated string on success, NULL on failure.
892 char *devm_kasprintf(struct device
*dev
, gfp_t gfp
, const char *fmt
, ...)
898 p
= devm_kvasprintf(dev
, gfp
, fmt
, ap
);
903 EXPORT_SYMBOL_GPL(devm_kasprintf
);
906 * devm_kfree - Resource-managed kfree
907 * @dev: Device this memory belongs to
910 * Free memory allocated with devm_kmalloc().
912 void devm_kfree(struct device
*dev
, const void *p
)
917 * Special case: pointer to a string in .rodata returned by
918 * devm_kstrdup_const().
920 if (unlikely(is_kernel_rodata((unsigned long)p
)))
923 rc
= devres_destroy(dev
, devm_kmalloc_release
,
924 devm_kmalloc_match
, (void *)p
);
927 EXPORT_SYMBOL_GPL(devm_kfree
);
930 * devm_kmemdup - Resource-managed kmemdup
931 * @dev: Device this memory belongs to
932 * @src: Memory region to duplicate
933 * @len: Memory region length
934 * @gfp: GFP mask to use
936 * Duplicate region of a memory using resource managed kmalloc
938 void *devm_kmemdup(struct device
*dev
, const void *src
, size_t len
, gfp_t gfp
)
942 p
= devm_kmalloc(dev
, len
, gfp
);
948 EXPORT_SYMBOL_GPL(devm_kmemdup
);
950 struct pages_devres
{
955 static int devm_pages_match(struct device
*dev
, void *res
, void *p
)
957 struct pages_devres
*devres
= res
;
958 struct pages_devres
*target
= p
;
960 return devres
->addr
== target
->addr
;
963 static void devm_pages_release(struct device
*dev
, void *res
)
965 struct pages_devres
*devres
= res
;
967 free_pages(devres
->addr
, devres
->order
);
971 * devm_get_free_pages - Resource-managed __get_free_pages
972 * @dev: Device to allocate memory for
973 * @gfp_mask: Allocation gfp flags
974 * @order: Allocation size is (1 << order) pages
976 * Managed get_free_pages. Memory allocated with this function is
977 * automatically freed on driver detach.
980 * Address of allocated memory on success, 0 on failure.
983 unsigned long devm_get_free_pages(struct device
*dev
,
984 gfp_t gfp_mask
, unsigned int order
)
986 struct pages_devres
*devres
;
989 addr
= __get_free_pages(gfp_mask
, order
);
994 devres
= devres_alloc(devm_pages_release
,
995 sizeof(struct pages_devres
), GFP_KERNEL
);
996 if (unlikely(!devres
)) {
997 free_pages(addr
, order
);
1001 devres
->addr
= addr
;
1002 devres
->order
= order
;
1004 devres_add(dev
, devres
);
1007 EXPORT_SYMBOL_GPL(devm_get_free_pages
);
1010 * devm_free_pages - Resource-managed free_pages
1011 * @dev: Device this memory belongs to
1012 * @addr: Memory to free
1014 * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1015 * there is no need to supply the @order.
1017 void devm_free_pages(struct device
*dev
, unsigned long addr
)
1019 struct pages_devres devres
= { .addr
= addr
};
1021 WARN_ON(devres_release(dev
, devm_pages_release
, devm_pages_match
,
1024 EXPORT_SYMBOL_GPL(devm_free_pages
);
1026 static void devm_percpu_release(struct device
*dev
, void *pdata
)
1030 p
= *(void __percpu
**)pdata
;
1034 static int devm_percpu_match(struct device
*dev
, void *data
, void *p
)
1036 struct devres
*devr
= container_of(data
, struct devres
, data
);
1038 return *(void **)devr
->data
== p
;
1042 * __devm_alloc_percpu - Resource-managed alloc_percpu
1043 * @dev: Device to allocate per-cpu memory for
1044 * @size: Size of per-cpu memory to allocate
1045 * @align: Alignment of per-cpu memory to allocate
1047 * Managed alloc_percpu. Per-cpu memory allocated with this function is
1048 * automatically freed on driver detach.
1051 * Pointer to allocated memory on success, NULL on failure.
1053 void __percpu
*__devm_alloc_percpu(struct device
*dev
, size_t size
,
1057 void __percpu
*pcpu
;
1059 pcpu
= __alloc_percpu(size
, align
);
1063 p
= devres_alloc(devm_percpu_release
, sizeof(void *), GFP_KERNEL
);
1069 *(void __percpu
**)p
= pcpu
;
1075 EXPORT_SYMBOL_GPL(__devm_alloc_percpu
);
1078 * devm_free_percpu - Resource-managed free_percpu
1079 * @dev: Device this memory belongs to
1080 * @pdata: Per-cpu memory to free
1082 * Free memory allocated with devm_alloc_percpu().
1084 void devm_free_percpu(struct device
*dev
, void __percpu
*pdata
)
1086 WARN_ON(devres_destroy(dev
, devm_percpu_release
, devm_percpu_match
,
1089 EXPORT_SYMBOL_GPL(devm_free_percpu
);