2 #include <linux/device.h>
4 #include <asm/io.h> /* Needed for i386 to build */
5 #include <asm/scatterlist.h> /* Needed for i386 to build */
6 #include <linux/dma-mapping.h>
7 #include <linux/dmapool.h>
8 #include <linux/slab.h>
9 #include <linux/module.h>
12 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
13 * small blocks are easily used by drivers for bus mastering controllers.
14 * This should probably be sharing the guts of the slab allocator.
17 struct dma_pool
{ /* the pool */
18 struct list_head page_list
;
20 size_t blocks_per_page
;
25 wait_queue_head_t waitq
;
26 struct list_head pools
;
29 struct dma_page
{ /* cacheable header for 'allocation' bytes */
30 struct list_head page_list
;
34 unsigned long bitmap
[0];
37 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
38 #define POOL_POISON_FREED 0xa7 /* !inuse */
39 #define POOL_POISON_ALLOCATED 0xa9 /* !initted */
41 static DECLARE_MUTEX (pools_lock
);
44 show_pools (struct device
*dev
, char *buf
)
49 struct dma_page
*page
;
50 struct dma_pool
*pool
;
55 temp
= scnprintf(next
, size
, "poolinfo - 0.1\n");
60 list_for_each_entry(pool
, &dev
->dma_pools
, pools
) {
64 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
66 blocks
+= page
->in_use
;
69 /* per-pool info, no real statistics yet */
70 temp
= scnprintf(next
, size
, "%-16s %4u %4Zu %4Zu %2u\n",
72 blocks
, pages
* pool
->blocks_per_page
,
79 return PAGE_SIZE
- size
;
81 static DEVICE_ATTR (pools
, S_IRUGO
, show_pools
, NULL
);
84 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
85 * @name: name of pool, for diagnostics
86 * @dev: device that will be doing the DMA
87 * @size: size of the blocks in this pool.
88 * @align: alignment requirement for blocks; must be a power of two
89 * @allocation: returned blocks won't cross this boundary (or zero)
90 * Context: !in_interrupt()
92 * Returns a dma allocation pool with the requested characteristics, or
93 * null if one can't be created. Given one of these pools, dma_pool_alloc()
94 * may be used to allocate memory. Such memory will all have "consistent"
95 * DMA mappings, accessible by the device and its driver without using
96 * cache flushing primitives. The actual size of blocks allocated may be
97 * larger than requested because of alignment.
99 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
100 * cross that size boundary. This is useful for devices which have
101 * addressing restrictions on individual DMA transfers, such as not crossing
102 * boundaries of 4KBytes.
105 dma_pool_create (const char *name
, struct device
*dev
,
106 size_t size
, size_t align
, size_t allocation
)
108 struct dma_pool
*retval
;
114 else if (size
< align
)
116 else if ((size
% align
) != 0) {
118 size
&= ~(align
- 1);
121 if (allocation
== 0) {
122 if (PAGE_SIZE
< size
)
125 allocation
= PAGE_SIZE
;
126 // FIXME: round up for less fragmentation
127 } else if (allocation
< size
)
130 if (!(retval
= kmalloc (sizeof *retval
, SLAB_KERNEL
)))
133 strlcpy (retval
->name
, name
, sizeof retval
->name
);
137 INIT_LIST_HEAD (&retval
->page_list
);
138 spin_lock_init (&retval
->lock
);
140 retval
->allocation
= allocation
;
141 retval
->blocks_per_page
= allocation
/ size
;
142 init_waitqueue_head (&retval
->waitq
);
146 if (list_empty (&dev
->dma_pools
))
147 device_create_file (dev
, &dev_attr_pools
);
148 /* note: not currently insisting "name" be unique */
149 list_add (&retval
->pools
, &dev
->dma_pools
);
152 INIT_LIST_HEAD (&retval
->pools
);
158 static struct dma_page
*
159 pool_alloc_page (struct dma_pool
*pool
, unsigned int __nocast mem_flags
)
161 struct dma_page
*page
;
164 mapsize
= pool
->blocks_per_page
;
165 mapsize
= (mapsize
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
166 mapsize
*= sizeof (long);
168 page
= (struct dma_page
*) kmalloc (mapsize
+ sizeof *page
, mem_flags
);
171 page
->vaddr
= dma_alloc_coherent (pool
->dev
,
176 memset (page
->bitmap
, 0xff, mapsize
); // bit set == free
177 #ifdef CONFIG_DEBUG_SLAB
178 memset (page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
180 list_add (&page
->page_list
, &pool
->page_list
);
191 is_page_busy (int blocks
, unsigned long *bitmap
)
194 if (*bitmap
++ != ~0UL)
196 blocks
-= BITS_PER_LONG
;
202 pool_free_page (struct dma_pool
*pool
, struct dma_page
*page
)
204 dma_addr_t dma
= page
->dma
;
206 #ifdef CONFIG_DEBUG_SLAB
207 memset (page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
209 dma_free_coherent (pool
->dev
, pool
->allocation
, page
->vaddr
, dma
);
210 list_del (&page
->page_list
);
216 * dma_pool_destroy - destroys a pool of dma memory blocks.
217 * @pool: dma pool that will be destroyed
218 * Context: !in_interrupt()
220 * Caller guarantees that no more memory from the pool is in use,
221 * and that nothing will try to use the pool after this call.
224 dma_pool_destroy (struct dma_pool
*pool
)
227 list_del (&pool
->pools
);
228 if (pool
->dev
&& list_empty (&pool
->dev
->dma_pools
))
229 device_remove_file (pool
->dev
, &dev_attr_pools
);
232 while (!list_empty (&pool
->page_list
)) {
233 struct dma_page
*page
;
234 page
= list_entry (pool
->page_list
.next
,
235 struct dma_page
, page_list
);
236 if (is_page_busy (pool
->blocks_per_page
, page
->bitmap
)) {
238 dev_err(pool
->dev
, "dma_pool_destroy %s, %p busy\n",
239 pool
->name
, page
->vaddr
);
241 printk (KERN_ERR
"dma_pool_destroy %s, %p busy\n",
242 pool
->name
, page
->vaddr
);
243 /* leak the still-in-use consistent memory */
244 list_del (&page
->page_list
);
247 pool_free_page (pool
, page
);
255 * dma_pool_alloc - get a block of consistent memory
256 * @pool: dma pool that will produce the block
257 * @mem_flags: GFP_* bitmask
258 * @handle: pointer to dma address of block
260 * This returns the kernel virtual address of a currently unused block,
261 * and reports its dma address through the handle.
262 * If such a memory block can't be allocated, null is returned.
265 dma_pool_alloc (struct dma_pool
*pool
, int mem_flags
, dma_addr_t
*handle
)
268 struct dma_page
*page
;
274 spin_lock_irqsave (&pool
->lock
, flags
);
275 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
277 /* only cachable accesses here ... */
279 i
< pool
->blocks_per_page
;
280 i
+= BITS_PER_LONG
, map
++) {
281 if (page
->bitmap
[map
] == 0)
283 block
= ffz (~ page
->bitmap
[map
]);
284 if ((i
+ block
) < pool
->blocks_per_page
) {
285 clear_bit (block
, &page
->bitmap
[map
]);
286 offset
= (BITS_PER_LONG
* map
) + block
;
287 offset
*= pool
->size
;
292 if (!(page
= pool_alloc_page (pool
, SLAB_ATOMIC
))) {
293 if (mem_flags
& __GFP_WAIT
) {
294 DECLARE_WAITQUEUE (wait
, current
);
296 current
->state
= TASK_INTERRUPTIBLE
;
297 add_wait_queue (&pool
->waitq
, &wait
);
298 spin_unlock_irqrestore (&pool
->lock
, flags
);
300 schedule_timeout (POOL_TIMEOUT_JIFFIES
);
302 remove_wait_queue (&pool
->waitq
, &wait
);
309 clear_bit (0, &page
->bitmap
[0]);
313 retval
= offset
+ page
->vaddr
;
314 *handle
= offset
+ page
->dma
;
315 #ifdef CONFIG_DEBUG_SLAB
316 memset (retval
, POOL_POISON_ALLOCATED
, pool
->size
);
319 spin_unlock_irqrestore (&pool
->lock
, flags
);
324 static struct dma_page
*
325 pool_find_page (struct dma_pool
*pool
, dma_addr_t dma
)
328 struct dma_page
*page
;
330 spin_lock_irqsave (&pool
->lock
, flags
);
331 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
334 if (dma
< (page
->dma
+ pool
->allocation
))
339 spin_unlock_irqrestore (&pool
->lock
, flags
);
345 * dma_pool_free - put block back into dma pool
346 * @pool: the dma pool holding the block
347 * @vaddr: virtual address of block
348 * @dma: dma address of block
350 * Caller promises neither device nor driver will again touch this block
351 * unless it is first re-allocated.
354 dma_pool_free (struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
356 struct dma_page
*page
;
360 if ((page
= pool_find_page (pool
, dma
)) == 0) {
362 dev_err(pool
->dev
, "dma_pool_free %s, %p/%lx (bad dma)\n",
363 pool
->name
, vaddr
, (unsigned long) dma
);
365 printk (KERN_ERR
"dma_pool_free %s, %p/%lx (bad dma)\n",
366 pool
->name
, vaddr
, (unsigned long) dma
);
370 block
= dma
- page
->dma
;
372 map
= block
/ BITS_PER_LONG
;
373 block
%= BITS_PER_LONG
;
375 #ifdef CONFIG_DEBUG_SLAB
376 if (((dma
- page
->dma
) + (void *)page
->vaddr
) != vaddr
) {
378 dev_err(pool
->dev
, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
379 pool
->name
, vaddr
, (unsigned long long) dma
);
381 printk (KERN_ERR
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
382 pool
->name
, vaddr
, (unsigned long long) dma
);
385 if (page
->bitmap
[map
] & (1UL << block
)) {
387 dev_err(pool
->dev
, "dma_pool_free %s, dma %Lx already free\n",
388 pool
->name
, (unsigned long long)dma
);
390 printk (KERN_ERR
"dma_pool_free %s, dma %Lx already free\n",
391 pool
->name
, (unsigned long long)dma
);
394 memset (vaddr
, POOL_POISON_FREED
, pool
->size
);
397 spin_lock_irqsave (&pool
->lock
, flags
);
399 set_bit (block
, &page
->bitmap
[map
]);
400 if (waitqueue_active (&pool
->waitq
))
401 wake_up (&pool
->waitq
);
403 * Resist a temptation to do
404 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
405 * Better have a few empty pages hang around.
407 spin_unlock_irqrestore (&pool
->lock
, flags
);
411 EXPORT_SYMBOL (dma_pool_create
);
412 EXPORT_SYMBOL (dma_pool_destroy
);
413 EXPORT_SYMBOL (dma_pool_alloc
);
414 EXPORT_SYMBOL (dma_pool_free
);