net: phy: bcm7xxx: remove 28nm wildcard entry
[linux/fpc-iii.git] / mm / dmapool.c
blob306baa594f95cd8ed8e5c945e4cc46e4f19c1f93
1 /*
2 * DMA Pool allocator
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/stat.h>
36 #include <linux/spinlock.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <linux/wait.h>
41 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42 #define DMAPOOL_DEBUG 1
43 #endif
45 struct dma_pool { /* the pool */
46 struct list_head page_list;
47 spinlock_t lock;
48 size_t size;
49 struct device *dev;
50 size_t allocation;
51 size_t boundary;
52 char name[32];
53 struct list_head pools;
56 struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list;
58 void *vaddr;
59 dma_addr_t dma;
60 unsigned int in_use;
61 unsigned int offset;
64 static DEFINE_MUTEX(pools_lock);
66 static ssize_t
67 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
69 unsigned temp;
70 unsigned size;
71 char *next;
72 struct dma_page *page;
73 struct dma_pool *pool;
75 next = buf;
76 size = PAGE_SIZE;
78 temp = scnprintf(next, size, "poolinfo - 0.1\n");
79 size -= temp;
80 next += temp;
82 mutex_lock(&pools_lock);
83 list_for_each_entry(pool, &dev->dma_pools, pools) {
84 unsigned pages = 0;
85 unsigned blocks = 0;
87 spin_lock_irq(&pool->lock);
88 list_for_each_entry(page, &pool->page_list, page_list) {
89 pages++;
90 blocks += page->in_use;
92 spin_unlock_irq(&pool->lock);
94 /* per-pool info, no real statistics yet */
95 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
96 pool->name, blocks,
97 pages * (pool->allocation / pool->size),
98 pool->size, pages);
99 size -= temp;
100 next += temp;
102 mutex_unlock(&pools_lock);
104 return PAGE_SIZE - size;
107 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
111 * @name: name of pool, for diagnostics
112 * @dev: device that will be doing the DMA
113 * @size: size of the blocks in this pool.
114 * @align: alignment requirement for blocks; must be a power of two
115 * @boundary: returned blocks won't cross this power of two boundary
116 * Context: !in_interrupt()
118 * Returns a dma allocation pool with the requested characteristics, or
119 * null if one can't be created. Given one of these pools, dma_pool_alloc()
120 * may be used to allocate memory. Such memory will all have "consistent"
121 * DMA mappings, accessible by the device and its driver without using
122 * cache flushing primitives. The actual size of blocks allocated may be
123 * larger than requested because of alignment.
125 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
126 * cross that size boundary. This is useful for devices which have
127 * addressing restrictions on individual DMA transfers, such as not crossing
128 * boundaries of 4KBytes.
130 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131 size_t size, size_t align, size_t boundary)
133 struct dma_pool *retval;
134 size_t allocation;
136 if (align == 0) {
137 align = 1;
138 } else if (align & (align - 1)) {
139 return NULL;
142 if (size == 0) {
143 return NULL;
144 } else if (size < 4) {
145 size = 4;
148 if ((size % align) != 0)
149 size = ALIGN(size, align);
151 allocation = max_t(size_t, size, PAGE_SIZE);
153 if (!boundary) {
154 boundary = allocation;
155 } else if ((boundary < size) || (boundary & (boundary - 1))) {
156 return NULL;
159 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
160 if (!retval)
161 return retval;
163 strlcpy(retval->name, name, sizeof(retval->name));
165 retval->dev = dev;
167 INIT_LIST_HEAD(&retval->page_list);
168 spin_lock_init(&retval->lock);
169 retval->size = size;
170 retval->boundary = boundary;
171 retval->allocation = allocation;
173 INIT_LIST_HEAD(&retval->pools);
175 mutex_lock(&pools_lock);
176 if (list_empty(&dev->dma_pools) &&
177 device_create_file(dev, &dev_attr_pools)) {
178 kfree(retval);
179 return NULL;
180 } else
181 list_add(&retval->pools, &dev->dma_pools);
182 mutex_unlock(&pools_lock);
184 return retval;
186 EXPORT_SYMBOL(dma_pool_create);
188 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
190 unsigned int offset = 0;
191 unsigned int next_boundary = pool->boundary;
193 do {
194 unsigned int next = offset + pool->size;
195 if (unlikely((next + pool->size) >= next_boundary)) {
196 next = next_boundary;
197 next_boundary += pool->boundary;
199 *(int *)(page->vaddr + offset) = next;
200 offset = next;
201 } while (offset < pool->allocation);
204 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
206 struct dma_page *page;
208 page = kmalloc(sizeof(*page), mem_flags);
209 if (!page)
210 return NULL;
211 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
212 &page->dma, mem_flags);
213 if (page->vaddr) {
214 #ifdef DMAPOOL_DEBUG
215 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
216 #endif
217 pool_initialise_page(pool, page);
218 page->in_use = 0;
219 page->offset = 0;
220 } else {
221 kfree(page);
222 page = NULL;
224 return page;
227 static inline int is_page_busy(struct dma_page *page)
229 return page->in_use != 0;
232 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
234 dma_addr_t dma = page->dma;
236 #ifdef DMAPOOL_DEBUG
237 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
238 #endif
239 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
240 list_del(&page->page_list);
241 kfree(page);
245 * dma_pool_destroy - destroys a pool of dma memory blocks.
246 * @pool: dma pool that will be destroyed
247 * Context: !in_interrupt()
249 * Caller guarantees that no more memory from the pool is in use,
250 * and that nothing will try to use the pool after this call.
252 void dma_pool_destroy(struct dma_pool *pool)
254 mutex_lock(&pools_lock);
255 list_del(&pool->pools);
256 if (pool->dev && list_empty(&pool->dev->dma_pools))
257 device_remove_file(pool->dev, &dev_attr_pools);
258 mutex_unlock(&pools_lock);
260 while (!list_empty(&pool->page_list)) {
261 struct dma_page *page;
262 page = list_entry(pool->page_list.next,
263 struct dma_page, page_list);
264 if (is_page_busy(page)) {
265 if (pool->dev)
266 dev_err(pool->dev,
267 "dma_pool_destroy %s, %p busy\n",
268 pool->name, page->vaddr);
269 else
270 printk(KERN_ERR
271 "dma_pool_destroy %s, %p busy\n",
272 pool->name, page->vaddr);
273 /* leak the still-in-use consistent memory */
274 list_del(&page->page_list);
275 kfree(page);
276 } else
277 pool_free_page(pool, page);
280 kfree(pool);
282 EXPORT_SYMBOL(dma_pool_destroy);
285 * dma_pool_alloc - get a block of consistent memory
286 * @pool: dma pool that will produce the block
287 * @mem_flags: GFP_* bitmask
288 * @handle: pointer to dma address of block
290 * This returns the kernel virtual address of a currently unused block,
291 * and reports its dma address through the handle.
292 * If such a memory block can't be allocated, %NULL is returned.
294 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
295 dma_addr_t *handle)
297 unsigned long flags;
298 struct dma_page *page;
299 size_t offset;
300 void *retval;
302 might_sleep_if(mem_flags & __GFP_WAIT);
304 spin_lock_irqsave(&pool->lock, flags);
305 list_for_each_entry(page, &pool->page_list, page_list) {
306 if (page->offset < pool->allocation)
307 goto ready;
310 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
311 spin_unlock_irqrestore(&pool->lock, flags);
313 page = pool_alloc_page(pool, mem_flags);
314 if (!page)
315 return NULL;
317 spin_lock_irqsave(&pool->lock, flags);
319 list_add(&page->page_list, &pool->page_list);
320 ready:
321 page->in_use++;
322 offset = page->offset;
323 page->offset = *(int *)(page->vaddr + offset);
324 retval = offset + page->vaddr;
325 *handle = offset + page->dma;
326 #ifdef DMAPOOL_DEBUG
328 int i;
329 u8 *data = retval;
330 /* page->offset is stored in first 4 bytes */
331 for (i = sizeof(page->offset); i < pool->size; i++) {
332 if (data[i] == POOL_POISON_FREED)
333 continue;
334 if (pool->dev)
335 dev_err(pool->dev,
336 "dma_pool_alloc %s, %p (corrupted)\n",
337 pool->name, retval);
338 else
339 pr_err("dma_pool_alloc %s, %p (corrupted)\n",
340 pool->name, retval);
343 * Dump the first 4 bytes even if they are not
344 * POOL_POISON_FREED
346 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
347 data, pool->size, 1);
348 break;
351 memset(retval, POOL_POISON_ALLOCATED, pool->size);
352 #endif
353 spin_unlock_irqrestore(&pool->lock, flags);
354 return retval;
356 EXPORT_SYMBOL(dma_pool_alloc);
358 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
360 struct dma_page *page;
362 list_for_each_entry(page, &pool->page_list, page_list) {
363 if (dma < page->dma)
364 continue;
365 if (dma < (page->dma + pool->allocation))
366 return page;
368 return NULL;
372 * dma_pool_free - put block back into dma pool
373 * @pool: the dma pool holding the block
374 * @vaddr: virtual address of block
375 * @dma: dma address of block
377 * Caller promises neither device nor driver will again touch this block
378 * unless it is first re-allocated.
380 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
382 struct dma_page *page;
383 unsigned long flags;
384 unsigned int offset;
386 spin_lock_irqsave(&pool->lock, flags);
387 page = pool_find_page(pool, dma);
388 if (!page) {
389 spin_unlock_irqrestore(&pool->lock, flags);
390 if (pool->dev)
391 dev_err(pool->dev,
392 "dma_pool_free %s, %p/%lx (bad dma)\n",
393 pool->name, vaddr, (unsigned long)dma);
394 else
395 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
396 pool->name, vaddr, (unsigned long)dma);
397 return;
400 offset = vaddr - page->vaddr;
401 #ifdef DMAPOOL_DEBUG
402 if ((dma - page->dma) != offset) {
403 spin_unlock_irqrestore(&pool->lock, flags);
404 if (pool->dev)
405 dev_err(pool->dev,
406 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
407 pool->name, vaddr, (unsigned long long)dma);
408 else
409 printk(KERN_ERR
410 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
411 pool->name, vaddr, (unsigned long long)dma);
412 return;
415 unsigned int chain = page->offset;
416 while (chain < pool->allocation) {
417 if (chain != offset) {
418 chain = *(int *)(page->vaddr + chain);
419 continue;
421 spin_unlock_irqrestore(&pool->lock, flags);
422 if (pool->dev)
423 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
424 "already free\n", pool->name,
425 (unsigned long long)dma);
426 else
427 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
428 "already free\n", pool->name,
429 (unsigned long long)dma);
430 return;
433 memset(vaddr, POOL_POISON_FREED, pool->size);
434 #endif
436 page->in_use--;
437 *(int *)vaddr = page->offset;
438 page->offset = offset;
440 * Resist a temptation to do
441 * if (!is_page_busy(page)) pool_free_page(pool, page);
442 * Better have a few empty pages hang around.
444 spin_unlock_irqrestore(&pool->lock, flags);
446 EXPORT_SYMBOL(dma_pool_free);
449 * Managed DMA pool
451 static void dmam_pool_release(struct device *dev, void *res)
453 struct dma_pool *pool = *(struct dma_pool **)res;
455 dma_pool_destroy(pool);
458 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
460 return *(struct dma_pool **)res == match_data;
464 * dmam_pool_create - Managed dma_pool_create()
465 * @name: name of pool, for diagnostics
466 * @dev: device that will be doing the DMA
467 * @size: size of the blocks in this pool.
468 * @align: alignment requirement for blocks; must be a power of two
469 * @allocation: returned blocks won't cross this boundary (or zero)
471 * Managed dma_pool_create(). DMA pool created with this function is
472 * automatically destroyed on driver detach.
474 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
475 size_t size, size_t align, size_t allocation)
477 struct dma_pool **ptr, *pool;
479 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
480 if (!ptr)
481 return NULL;
483 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
484 if (pool)
485 devres_add(dev, ptr);
486 else
487 devres_free(ptr);
489 return pool;
491 EXPORT_SYMBOL(dmam_pool_create);
494 * dmam_pool_destroy - Managed dma_pool_destroy()
495 * @pool: dma pool that will be destroyed
497 * Managed dma_pool_destroy().
499 void dmam_pool_destroy(struct dma_pool *pool)
501 struct device *dev = pool->dev;
503 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
505 EXPORT_SYMBOL(dmam_pool_destroy);