1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks across all pages. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/poison.h>
30 #include <linux/sched.h>
31 #include <linux/sched/mm.h>
32 #include <linux/slab.h>
33 #include <linux/stat.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/wait.h>
39 #ifdef CONFIG_SLUB_DEBUG_ON
40 #define DMAPOOL_DEBUG 1
44 struct dma_block
*next_block
;
48 struct dma_pool
{ /* the pool */
49 struct list_head page_list
;
51 struct dma_block
*next_block
;
57 unsigned int allocation
;
58 unsigned int boundary
;
60 struct list_head pools
;
63 struct dma_page
{ /* cacheable header for 'allocation' bytes */
64 struct list_head page_list
;
69 static DEFINE_MUTEX(pools_lock
);
70 static DEFINE_MUTEX(pools_reg_lock
);
72 static ssize_t
pools_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
74 struct dma_pool
*pool
;
77 size
= sysfs_emit(buf
, "poolinfo - 0.1\n");
79 mutex_lock(&pools_lock
);
80 list_for_each_entry(pool
, &dev
->dma_pools
, pools
) {
81 /* per-pool info, no real statistics yet */
82 size
+= sysfs_emit_at(buf
, size
, "%-16s %4zu %4zu %4u %2zu\n",
83 pool
->name
, pool
->nr_active
,
84 pool
->nr_blocks
, pool
->size
,
87 mutex_unlock(&pools_lock
);
92 static DEVICE_ATTR_RO(pools
);
95 static void pool_check_block(struct dma_pool
*pool
, struct dma_block
*block
,
98 u8
*data
= (void *)block
;
101 for (i
= sizeof(struct dma_block
); i
< pool
->size
; i
++) {
102 if (data
[i
] == POOL_POISON_FREED
)
104 dev_err(pool
->dev
, "%s %s, %p (corrupted)\n", __func__
,
108 * Dump the first 4 bytes even if they are not
111 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 16, 1,
112 data
, pool
->size
, 1);
116 if (!want_init_on_alloc(mem_flags
))
117 memset(block
, POOL_POISON_ALLOCATED
, pool
->size
);
120 static struct dma_page
*pool_find_page(struct dma_pool
*pool
, dma_addr_t dma
)
122 struct dma_page
*page
;
124 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
127 if ((dma
- page
->dma
) < pool
->allocation
)
133 static bool pool_block_err(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
135 struct dma_block
*block
= pool
->next_block
;
136 struct dma_page
*page
;
138 page
= pool_find_page(pool
, dma
);
140 dev_err(pool
->dev
, "%s %s, %p/%pad (bad dma)\n",
141 __func__
, pool
->name
, vaddr
, &dma
);
146 if (block
!= vaddr
) {
147 block
= block
->next_block
;
150 dev_err(pool
->dev
, "%s %s, dma %pad already free\n",
151 __func__
, pool
->name
, &dma
);
155 memset(vaddr
, POOL_POISON_FREED
, pool
->size
);
159 static void pool_init_page(struct dma_pool
*pool
, struct dma_page
*page
)
161 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
164 static void pool_check_block(struct dma_pool
*pool
, struct dma_block
*block
,
169 static bool pool_block_err(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
171 if (want_init_on_free())
172 memset(vaddr
, 0, pool
->size
);
176 static void pool_init_page(struct dma_pool
*pool
, struct dma_page
*page
)
181 static struct dma_block
*pool_block_pop(struct dma_pool
*pool
)
183 struct dma_block
*block
= pool
->next_block
;
186 pool
->next_block
= block
->next_block
;
192 static void pool_block_push(struct dma_pool
*pool
, struct dma_block
*block
,
196 block
->next_block
= pool
->next_block
;
197 pool
->next_block
= block
;
202 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203 * @name: name of pool, for diagnostics
204 * @dev: device that will be doing the DMA
205 * @size: size of the blocks in this pool.
206 * @align: alignment requirement for blocks; must be a power of two
207 * @boundary: returned blocks won't cross this power of two boundary
208 * Context: not in_interrupt()
210 * Given one of these pools, dma_pool_alloc()
211 * may be used to allocate memory. Such memory will all have "consistent"
212 * DMA mappings, accessible by the device and its driver without using
213 * cache flushing primitives. The actual size of blocks allocated may be
214 * larger than requested because of alignment.
216 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217 * cross that size boundary. This is useful for devices which have
218 * addressing restrictions on individual DMA transfers, such as not crossing
219 * boundaries of 4KBytes.
221 * Return: a dma allocation pool with the requested characteristics, or
222 * %NULL if one can't be created.
224 struct dma_pool
*dma_pool_create(const char *name
, struct device
*dev
,
225 size_t size
, size_t align
, size_t boundary
)
227 struct dma_pool
*retval
;
236 else if (align
& (align
- 1))
239 if (size
== 0 || size
> INT_MAX
)
241 if (size
< sizeof(struct dma_block
))
242 size
= sizeof(struct dma_block
);
244 size
= ALIGN(size
, align
);
245 allocation
= max_t(size_t, size
, PAGE_SIZE
);
248 boundary
= allocation
;
249 else if ((boundary
< size
) || (boundary
& (boundary
- 1)))
252 boundary
= min(boundary
, allocation
);
254 retval
= kzalloc(sizeof(*retval
), GFP_KERNEL
);
258 strscpy(retval
->name
, name
, sizeof(retval
->name
));
262 INIT_LIST_HEAD(&retval
->page_list
);
263 spin_lock_init(&retval
->lock
);
265 retval
->boundary
= boundary
;
266 retval
->allocation
= allocation
;
267 INIT_LIST_HEAD(&retval
->pools
);
270 * pools_lock ensures that the ->dma_pools list does not get corrupted.
271 * pools_reg_lock ensures that there is not a race between
272 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
273 * when the first invocation of dma_pool_create() failed on
274 * device_create_file() and the second assumes that it has been done (I
275 * know it is a short window).
277 mutex_lock(&pools_reg_lock
);
278 mutex_lock(&pools_lock
);
279 empty
= list_empty(&dev
->dma_pools
);
280 list_add(&retval
->pools
, &dev
->dma_pools
);
281 mutex_unlock(&pools_lock
);
285 err
= device_create_file(dev
, &dev_attr_pools
);
287 mutex_lock(&pools_lock
);
288 list_del(&retval
->pools
);
289 mutex_unlock(&pools_lock
);
290 mutex_unlock(&pools_reg_lock
);
295 mutex_unlock(&pools_reg_lock
);
298 EXPORT_SYMBOL(dma_pool_create
);
300 static void pool_initialise_page(struct dma_pool
*pool
, struct dma_page
*page
)
302 unsigned int next_boundary
= pool
->boundary
, offset
= 0;
303 struct dma_block
*block
, *first
= NULL
, *last
= NULL
;
305 pool_init_page(pool
, page
);
306 while (offset
+ pool
->size
<= pool
->allocation
) {
307 if (offset
+ pool
->size
> next_boundary
) {
308 offset
= next_boundary
;
309 next_boundary
+= pool
->boundary
;
313 block
= page
->vaddr
+ offset
;
314 block
->dma
= page
->dma
+ offset
;
315 block
->next_block
= NULL
;
318 last
->next_block
= block
;
323 offset
+= pool
->size
;
327 last
->next_block
= pool
->next_block
;
328 pool
->next_block
= first
;
330 list_add(&page
->page_list
, &pool
->page_list
);
334 static struct dma_page
*pool_alloc_page(struct dma_pool
*pool
, gfp_t mem_flags
)
336 struct dma_page
*page
;
338 page
= kmalloc(sizeof(*page
), mem_flags
);
342 page
->vaddr
= dma_alloc_coherent(pool
->dev
, pool
->allocation
,
343 &page
->dma
, mem_flags
);
353 * dma_pool_destroy - destroys a pool of dma memory blocks.
354 * @pool: dma pool that will be destroyed
355 * Context: !in_interrupt()
357 * Caller guarantees that no more memory from the pool is in use,
358 * and that nothing will try to use the pool after this call.
360 void dma_pool_destroy(struct dma_pool
*pool
)
362 struct dma_page
*page
, *tmp
;
363 bool empty
, busy
= false;
368 mutex_lock(&pools_reg_lock
);
369 mutex_lock(&pools_lock
);
370 list_del(&pool
->pools
);
371 empty
= list_empty(&pool
->dev
->dma_pools
);
372 mutex_unlock(&pools_lock
);
374 device_remove_file(pool
->dev
, &dev_attr_pools
);
375 mutex_unlock(&pools_reg_lock
);
377 if (pool
->nr_active
) {
378 dev_err(pool
->dev
, "%s %s busy\n", __func__
, pool
->name
);
382 list_for_each_entry_safe(page
, tmp
, &pool
->page_list
, page_list
) {
384 dma_free_coherent(pool
->dev
, pool
->allocation
,
385 page
->vaddr
, page
->dma
);
386 list_del(&page
->page_list
);
392 EXPORT_SYMBOL(dma_pool_destroy
);
395 * dma_pool_alloc - get a block of consistent memory
396 * @pool: dma pool that will produce the block
397 * @mem_flags: GFP_* bitmask
398 * @handle: pointer to dma address of block
400 * Return: the kernel virtual address of a currently unused block,
401 * and reports its dma address through the handle.
402 * If such a memory block can't be allocated, %NULL is returned.
404 void *dma_pool_alloc(struct dma_pool
*pool
, gfp_t mem_flags
,
407 struct dma_block
*block
;
408 struct dma_page
*page
;
411 might_alloc(mem_flags
);
413 spin_lock_irqsave(&pool
->lock
, flags
);
414 block
= pool_block_pop(pool
);
417 * pool_alloc_page() might sleep, so temporarily drop
420 spin_unlock_irqrestore(&pool
->lock
, flags
);
422 page
= pool_alloc_page(pool
, mem_flags
& (~__GFP_ZERO
));
426 spin_lock_irqsave(&pool
->lock
, flags
);
427 pool_initialise_page(pool
, page
);
428 block
= pool_block_pop(pool
);
430 spin_unlock_irqrestore(&pool
->lock
, flags
);
432 *handle
= block
->dma
;
433 pool_check_block(pool
, block
, mem_flags
);
434 if (want_init_on_alloc(mem_flags
))
435 memset(block
, 0, pool
->size
);
439 EXPORT_SYMBOL(dma_pool_alloc
);
442 * dma_pool_free - put block back into dma pool
443 * @pool: the dma pool holding the block
444 * @vaddr: virtual address of block
445 * @dma: dma address of block
447 * Caller promises neither device nor driver will again touch this block
448 * unless it is first re-allocated.
450 void dma_pool_free(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
452 struct dma_block
*block
= vaddr
;
455 spin_lock_irqsave(&pool
->lock
, flags
);
456 if (!pool_block_err(pool
, vaddr
, dma
)) {
457 pool_block_push(pool
, block
, dma
);
460 spin_unlock_irqrestore(&pool
->lock
, flags
);
462 EXPORT_SYMBOL(dma_pool_free
);
467 static void dmam_pool_release(struct device
*dev
, void *res
)
469 struct dma_pool
*pool
= *(struct dma_pool
**)res
;
471 dma_pool_destroy(pool
);
474 static int dmam_pool_match(struct device
*dev
, void *res
, void *match_data
)
476 return *(struct dma_pool
**)res
== match_data
;
480 * dmam_pool_create - Managed dma_pool_create()
481 * @name: name of pool, for diagnostics
482 * @dev: device that will be doing the DMA
483 * @size: size of the blocks in this pool.
484 * @align: alignment requirement for blocks; must be a power of two
485 * @allocation: returned blocks won't cross this boundary (or zero)
487 * Managed dma_pool_create(). DMA pool created with this function is
488 * automatically destroyed on driver detach.
490 * Return: a managed dma allocation pool with the requested
491 * characteristics, or %NULL if one can't be created.
493 struct dma_pool
*dmam_pool_create(const char *name
, struct device
*dev
,
494 size_t size
, size_t align
, size_t allocation
)
496 struct dma_pool
**ptr
, *pool
;
498 ptr
= devres_alloc(dmam_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
502 pool
= *ptr
= dma_pool_create(name
, dev
, size
, align
, allocation
);
504 devres_add(dev
, ptr
);
510 EXPORT_SYMBOL(dmam_pool_create
);
513 * dmam_pool_destroy - Managed dma_pool_destroy()
514 * @pool: dma pool that will be destroyed
516 * Managed dma_pool_destroy().
518 void dmam_pool_destroy(struct dma_pool
*pool
)
520 struct device
*dev
= pool
->dev
;
522 WARN_ON(devres_release(dev
, dmam_pool_release
, dmam_pool_match
, pool
));
524 EXPORT_SYMBOL(dmam_pool_destroy
);