1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Intel
5 * Based on drivers/base/devres.c
8 #include <drm/drm_managed.h>
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_print.h>
17 #include "drm_internal.h"
20 * DOC: managed resources
22 * Inspired by struct &device managed resources, but tied to the lifetime of
23 * struct &drm_device, which can outlive the underlying physical device, usually
24 * when userspace has some open files and other handles to resources still open.
26 * Release actions can be added with drmm_add_action(), memory allocations can
27 * be done directly with drmm_kmalloc() and the related functions. Everything
28 * will be released on the final drm_dev_put() in reverse order of how the
29 * release actions have been added and memory has been allocated since driver
30 * loading started with devm_drm_dev_alloc().
32 * Note that release actions and managed memory can also be added and removed
33 * during the lifetime of the driver, all the functions are fully concurrent
34 * safe. But it is recommended to use managed resources only for resources that
35 * change rarely, if ever, during the lifetime of the &drm_device instance.
39 struct list_head entry
;
40 drmres_release_t release
;
46 struct drmres_node node
;
48 * Some archs want to perform DMA into kmalloc caches
49 * and need a guaranteed alignment larger than
50 * the alignment of a 64-bit integer.
51 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
52 * buffer alignment as if it was allocated by plain kmalloc().
54 u8
__aligned(ARCH_KMALLOC_MINALIGN
) data
[];
57 static void free_dr(struct drmres
*dr
)
59 kfree_const(dr
->node
.name
);
63 void drm_managed_release(struct drm_device
*dev
)
65 struct drmres
*dr
, *tmp
;
67 drm_dbg_drmres(dev
, "drmres release begin\n");
68 list_for_each_entry_safe(dr
, tmp
, &dev
->managed
.resources
, node
.entry
) {
69 drm_dbg_drmres(dev
, "REL %p %s (%zu bytes)\n",
70 dr
, dr
->node
.name
, dr
->node
.size
);
73 dr
->node
.release(dev
, dr
->node
.size
? *(void **)&dr
->data
: NULL
);
75 list_del(&dr
->node
.entry
);
78 drm_dbg_drmres(dev
, "drmres release end\n");
82 * Always inline so that kmalloc_track_caller tracks the actual interesting
83 * caller outside of drm_managed.c.
85 static __always_inline
struct drmres
* alloc_dr(drmres_release_t release
,
86 size_t size
, gfp_t gfp
, int nid
)
91 /* We must catch any near-SIZE_MAX cases that could overflow. */
92 if (unlikely(check_add_overflow(sizeof(*dr
), size
, &tot_size
)))
95 dr
= kmalloc_node_track_caller(tot_size
, gfp
, nid
);
99 memset(dr
, 0, offsetof(struct drmres
, data
));
101 INIT_LIST_HEAD(&dr
->node
.entry
);
102 dr
->node
.release
= release
;
103 dr
->node
.size
= size
;
108 static void del_dr(struct drm_device
*dev
, struct drmres
*dr
)
110 list_del_init(&dr
->node
.entry
);
112 drm_dbg_drmres(dev
, "DEL %p %s (%lu bytes)\n",
113 dr
, dr
->node
.name
, (unsigned long) dr
->node
.size
);
116 static void add_dr(struct drm_device
*dev
, struct drmres
*dr
)
120 spin_lock_irqsave(&dev
->managed
.lock
, flags
);
121 list_add(&dr
->node
.entry
, &dev
->managed
.resources
);
122 spin_unlock_irqrestore(&dev
->managed
.lock
, flags
);
124 drm_dbg_drmres(dev
, "ADD %p %s (%lu bytes)\n",
125 dr
, dr
->node
.name
, (unsigned long) dr
->node
.size
);
128 void drmm_add_final_kfree(struct drm_device
*dev
, void *container
)
130 WARN_ON(dev
->managed
.final_kfree
);
131 WARN_ON(dev
< (struct drm_device
*) container
);
132 WARN_ON(dev
+ 1 > (struct drm_device
*) (container
+ ksize(container
)));
133 dev
->managed
.final_kfree
= container
;
136 int __drmm_add_action(struct drm_device
*dev
,
137 drmres_release_t action
,
138 void *data
, const char *name
)
143 dr
= alloc_dr(action
, data
? sizeof(void*) : 0,
144 GFP_KERNEL
| __GFP_ZERO
,
145 dev_to_node(dev
->dev
));
147 drm_dbg_drmres(dev
, "failed to add action %s for %p\n",
152 dr
->node
.name
= kstrdup_const(name
, GFP_KERNEL
);
154 void_ptr
= (void **)&dr
->data
;
162 EXPORT_SYMBOL(__drmm_add_action
);
164 int __drmm_add_action_or_reset(struct drm_device
*dev
,
165 drmres_release_t action
,
166 void *data
, const char *name
)
170 ret
= __drmm_add_action(dev
, action
, data
, name
);
176 EXPORT_SYMBOL(__drmm_add_action_or_reset
);
179 * drmm_kmalloc - &drm_device managed kmalloc()
181 * @size: size of the memory allocation
182 * @gfp: GFP allocation flags
184 * This is a &drm_device managed version of kmalloc(). The allocated memory is
185 * automatically freed on the final drm_dev_put(). Memory can also be freed
186 * before the final drm_dev_put() by calling drmm_kfree().
188 void *drmm_kmalloc(struct drm_device
*dev
, size_t size
, gfp_t gfp
)
192 dr
= alloc_dr(NULL
, size
, gfp
, dev_to_node(dev
->dev
));
194 drm_dbg_drmres(dev
, "failed to allocate %zu bytes, %u flags\n",
198 dr
->node
.name
= kstrdup_const("kmalloc", GFP_KERNEL
);
204 EXPORT_SYMBOL(drmm_kmalloc
);
207 * drmm_kstrdup - &drm_device managed kstrdup()
209 * @s: 0-terminated string to be duplicated
210 * @gfp: GFP allocation flags
212 * This is a &drm_device managed version of kstrdup(). The allocated memory is
213 * automatically freed on the final drm_dev_put() and works exactly like a
214 * memory allocation obtained by drmm_kmalloc().
216 char *drmm_kstrdup(struct drm_device
*dev
, const char *s
, gfp_t gfp
)
224 size
= strlen(s
) + 1;
225 buf
= drmm_kmalloc(dev
, size
, gfp
);
227 memcpy(buf
, s
, size
);
230 EXPORT_SYMBOL_GPL(drmm_kstrdup
);
233 * drmm_kfree - &drm_device managed kfree()
235 * @data: memory allocation to be freed
237 * This is a &drm_device managed version of kfree() which can be used to
238 * release memory allocated through drmm_kmalloc() or any of its related
239 * functions before the final drm_dev_put() of @dev.
241 void drmm_kfree(struct drm_device
*dev
, void *data
)
243 struct drmres
*dr_match
= NULL
, *dr
;
249 spin_lock_irqsave(&dev
->managed
.lock
, flags
);
250 list_for_each_entry(dr
, &dev
->managed
.resources
, node
.entry
) {
251 if (dr
->data
== data
) {
253 del_dr(dev
, dr_match
);
257 spin_unlock_irqrestore(&dev
->managed
.lock
, flags
);
259 if (WARN_ON(!dr_match
))
264 EXPORT_SYMBOL(drmm_kfree
);