2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM and the remainder of memory is at the top an the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
33 #include <asm/cacheflush.h>
39 #define DO_STATS(X) do { X ; } while (0)
41 #define DO_STATS(X) do { } while (0)
44 /* ************************************************** */
47 struct list_head node
;
49 /* original request */
54 /* safe buffer info */
55 struct dmabounce_pool
*pool
;
57 dma_addr_t safe_dma_addr
;
60 struct dmabounce_pool
{
62 struct dma_pool
*pool
;
68 struct dmabounce_device_info
{
69 struct list_head node
;
72 struct list_head safe_buffers
;
74 unsigned long total_allocs
;
75 unsigned long map_op_count
;
76 unsigned long bounce_count
;
78 struct dmabounce_pool small
;
79 struct dmabounce_pool large
;
82 static LIST_HEAD(dmabounce_devs
);
85 static void print_alloc_stats(struct dmabounce_device_info
*device_info
)
88 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
89 device_info
->dev
->bus_id
,
90 device_info
->small
.allocs
, device_info
->large
.allocs
,
91 device_info
->total_allocs
- device_info
->small
.allocs
-
92 device_info
->large
.allocs
,
93 device_info
->total_allocs
);
97 /* find the given device in the dmabounce device list */
98 static inline struct dmabounce_device_info
*
99 find_dmabounce_dev(struct device
*dev
)
101 struct dmabounce_device_info
*d
;
103 list_for_each_entry(d
, &dmabounce_devs
, node
)
111 /* allocate a 'safe' buffer and keep track of it */
112 static inline struct safe_buffer
*
113 alloc_safe_buffer(struct dmabounce_device_info
*device_info
, void *ptr
,
114 size_t size
, enum dma_data_direction dir
)
116 struct safe_buffer
*buf
;
117 struct dmabounce_pool
*pool
;
118 struct device
*dev
= device_info
->dev
;
120 dev_dbg(dev
, "%s(ptr=%p, size=%d, dir=%d)\n",
121 __func__
, ptr
, size
, dir
);
123 if (size
<= device_info
->small
.size
) {
124 pool
= &device_info
->small
;
125 } else if (size
<= device_info
->large
.size
) {
126 pool
= &device_info
->large
;
131 buf
= kmalloc(sizeof(struct safe_buffer
), GFP_ATOMIC
);
133 dev_warn(dev
, "%s: kmalloc failed\n", __func__
);
139 buf
->direction
= dir
;
143 buf
->safe
= dma_pool_alloc(pool
->pool
, GFP_ATOMIC
,
144 &buf
->safe_dma_addr
);
146 buf
->safe
= dma_alloc_coherent(dev
, size
, &buf
->safe_dma_addr
,
150 if (buf
->safe
== NULL
) {
152 "%s: could not alloc dma memory (size=%d)\n",
161 device_info
->total_allocs
++;
162 if (device_info
->total_allocs
% 1000 == 0)
163 print_alloc_stats(device_info
);
166 list_add(&buf
->node
, &device_info
->safe_buffers
);
171 /* determine if a buffer is from our "safe" pool */
172 static inline struct safe_buffer
*
173 find_safe_buffer(struct dmabounce_device_info
*device_info
, dma_addr_t safe_dma_addr
)
175 struct safe_buffer
*b
;
177 list_for_each_entry(b
, &device_info
->safe_buffers
, node
)
178 if (b
->safe_dma_addr
== safe_dma_addr
)
185 free_safe_buffer(struct dmabounce_device_info
*device_info
, struct safe_buffer
*buf
)
187 dev_dbg(device_info
->dev
, "%s(buf=%p)\n", __func__
, buf
);
189 list_del(&buf
->node
);
192 dma_pool_free(buf
->pool
->pool
, buf
->safe
, buf
->safe_dma_addr
);
194 dma_free_coherent(device_info
->dev
, buf
->size
, buf
->safe
,
200 /* ************************************************** */
203 static void print_map_stats(struct dmabounce_device_info
*device_info
)
205 dev_info(device_info
->dev
,
206 "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
207 device_info
->map_op_count
, device_info
->bounce_count
);
211 static inline dma_addr_t
212 map_single(struct device
*dev
, void *ptr
, size_t size
,
213 enum dma_data_direction dir
)
215 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
217 int needs_bounce
= 0;
220 DO_STATS ( device_info
->map_op_count
++ );
222 dma_addr
= virt_to_dma(dev
, ptr
);
225 unsigned long mask
= *dev
->dma_mask
;
228 limit
= (mask
+ 1) & ~mask
;
229 if (limit
&& size
> limit
) {
230 dev_err(dev
, "DMA mapping too big (requested %#x "
231 "mask %#Lx)\n", size
, *dev
->dma_mask
);
236 * Figure out if we need to bounce from the DMA mask.
238 needs_bounce
= (dma_addr
| (dma_addr
+ size
- 1)) & ~mask
;
241 if (device_info
&& (needs_bounce
|| dma_needs_bounce(dev
, dma_addr
, size
))) {
242 struct safe_buffer
*buf
;
244 buf
= alloc_safe_buffer(device_info
, ptr
, size
, dir
);
246 dev_err(dev
, "%s: unable to map unsafe buffer %p!\n",
252 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
253 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
254 buf
->safe
, (void *) buf
->safe_dma_addr
);
256 if ((dir
== DMA_TO_DEVICE
) ||
257 (dir
== DMA_BIDIRECTIONAL
)) {
258 dev_dbg(dev
, "%s: copy unsafe %p to safe %p, size %d\n",
259 __func__
, ptr
, buf
->safe
, size
);
260 memcpy(buf
->safe
, ptr
, size
);
264 dma_addr
= buf
->safe_dma_addr
;
267 consistent_sync(ptr
, size
, dir
);
273 unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
274 enum dma_data_direction dir
)
276 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
277 struct safe_buffer
*buf
= NULL
;
280 * Trying to unmap an invalid mapping
282 if (dma_mapping_error(dma_addr
)) {
283 dev_err(dev
, "Trying to unmap invalid mapping\n");
288 buf
= find_safe_buffer(device_info
, dma_addr
);
291 BUG_ON(buf
->size
!= size
);
294 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
295 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
296 buf
->safe
, (void *) buf
->safe_dma_addr
);
298 DO_STATS ( device_info
->bounce_count
++ );
300 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
304 "%s: copy back safe %p to unsafe %p size %d\n",
305 __func__
, buf
->safe
, buf
->ptr
, size
);
306 memcpy(buf
->ptr
, buf
->safe
, size
);
309 * DMA buffers must have the same cache properties
310 * as if they were really used for DMA - which means
311 * data must be written back to RAM. Note that
312 * we don't use dmac_flush_range() here for the
313 * bidirectional case because we know the cache
314 * lines will be coherent with the data written.
316 ptr
= (unsigned long)buf
->ptr
;
317 dmac_clean_range(ptr
, ptr
+ size
);
319 free_safe_buffer(device_info
, buf
);
324 sync_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
325 enum dma_data_direction dir
)
327 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
328 struct safe_buffer
*buf
= NULL
;
331 buf
= find_safe_buffer(device_info
, dma_addr
);
335 * Both of these checks from original code need to be
336 * commented out b/c some drivers rely on the following:
338 * 1) Drivers may map a large chunk of memory into DMA space
339 * but only sync a small portion of it. Good example is
340 * allocating a large buffer, mapping it, and then
341 * breaking it up into small descriptors. No point
342 * in syncing the whole buffer if you only have to
343 * touch one descriptor.
345 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
346 * usually only synced in one dir at a time.
348 * See drivers/net/eepro100.c for examples of both cases.
352 * BUG_ON(buf->size != size);
353 * BUG_ON(buf->direction != dir);
357 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
358 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
359 buf
->safe
, (void *) buf
->safe_dma_addr
);
361 DO_STATS ( device_info
->bounce_count
++ );
364 case DMA_FROM_DEVICE
:
366 "%s: copy back safe %p to unsafe %p size %d\n",
367 __func__
, buf
->safe
, buf
->ptr
, size
);
368 memcpy(buf
->ptr
, buf
->safe
, size
);
372 "%s: copy out unsafe %p to safe %p, size %d\n",
373 __func__
,buf
->ptr
, buf
->safe
, size
);
374 memcpy(buf
->safe
, buf
->ptr
, size
);
376 case DMA_BIDIRECTIONAL
:
377 BUG(); /* is this allowed? what does it mean? */
381 consistent_sync(buf
->safe
, size
, dir
);
383 consistent_sync(dma_to_virt(dev
, dma_addr
), size
, dir
);
387 /* ************************************************** */
390 * see if a buffer address is in an 'unsafe' range. if it is
391 * allocate a 'safe' buffer and copy the unsafe buffer into it.
392 * substitute the safe buffer for the unsafe one.
393 * (basically move the buffer from an unsafe area to a safe one)
396 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
397 enum dma_data_direction dir
)
402 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
403 __func__
, ptr
, size
, dir
);
405 BUG_ON(dir
== DMA_NONE
);
407 local_irq_save(flags
);
409 dma_addr
= map_single(dev
, ptr
, size
, dir
);
411 local_irq_restore(flags
);
417 * see if a mapped address was really a "safe" buffer and if so, copy
418 * the data from the safe buffer back to the unsafe buffer and free up
419 * the safe buffer. (basically return things back to the way they
424 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
425 enum dma_data_direction dir
)
429 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
430 __func__
, (void *) dma_addr
, size
, dir
);
432 BUG_ON(dir
== DMA_NONE
);
434 local_irq_save(flags
);
436 unmap_single(dev
, dma_addr
, size
, dir
);
438 local_irq_restore(flags
);
442 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
443 enum dma_data_direction dir
)
448 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
449 __func__
, sg
, nents
, dir
);
451 BUG_ON(dir
== DMA_NONE
);
453 local_irq_save(flags
);
455 for (i
= 0; i
< nents
; i
++, sg
++) {
456 struct page
*page
= sg
->page
;
457 unsigned int offset
= sg
->offset
;
458 unsigned int length
= sg
->length
;
459 void *ptr
= page_address(page
) + offset
;
462 map_single(dev
, ptr
, length
, dir
);
465 local_irq_restore(flags
);
471 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
472 enum dma_data_direction dir
)
477 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
478 __func__
, sg
, nents
, dir
);
480 BUG_ON(dir
== DMA_NONE
);
482 local_irq_save(flags
);
484 for (i
= 0; i
< nents
; i
++, sg
++) {
485 dma_addr_t dma_addr
= sg
->dma_address
;
486 unsigned int length
= sg
->length
;
488 unmap_single(dev
, dma_addr
, length
, dir
);
491 local_irq_restore(flags
);
495 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
496 enum dma_data_direction dir
)
500 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
501 __func__
, (void *) dma_addr
, size
, dir
);
503 local_irq_save(flags
);
505 sync_single(dev
, dma_addr
, size
, dir
);
507 local_irq_restore(flags
);
511 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
512 enum dma_data_direction dir
)
516 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
517 __func__
, (void *) dma_addr
, size
, dir
);
519 local_irq_save(flags
);
521 sync_single(dev
, dma_addr
, size
, dir
);
523 local_irq_restore(flags
);
527 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nents
,
528 enum dma_data_direction dir
)
533 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
534 __func__
, sg
, nents
, dir
);
536 BUG_ON(dir
== DMA_NONE
);
538 local_irq_save(flags
);
540 for (i
= 0; i
< nents
; i
++, sg
++) {
541 dma_addr_t dma_addr
= sg
->dma_address
;
542 unsigned int length
= sg
->length
;
544 sync_single(dev
, dma_addr
, length
, dir
);
547 local_irq_restore(flags
);
551 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nents
,
552 enum dma_data_direction dir
)
557 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
558 __func__
, sg
, nents
, dir
);
560 BUG_ON(dir
== DMA_NONE
);
562 local_irq_save(flags
);
564 for (i
= 0; i
< nents
; i
++, sg
++) {
565 dma_addr_t dma_addr
= sg
->dma_address
;
566 unsigned int length
= sg
->length
;
568 sync_single(dev
, dma_addr
, length
, dir
);
571 local_irq_restore(flags
);
575 dmabounce_init_pool(struct dmabounce_pool
*pool
, struct device
*dev
, const char *name
,
579 DO_STATS(pool
->allocs
= 0);
580 pool
->pool
= dma_pool_create(name
, dev
, size
,
581 0 /* byte alignment */,
582 0 /* no page-crossing issues */);
584 return pool
->pool
? 0 : -ENOMEM
;
588 dmabounce_register_dev(struct device
*dev
, unsigned long small_buffer_size
,
589 unsigned long large_buffer_size
)
591 struct dmabounce_device_info
*device_info
;
594 device_info
= kmalloc(sizeof(struct dmabounce_device_info
), GFP_ATOMIC
);
597 "Could not allocated dmabounce_device_info for %s",
602 ret
= dmabounce_init_pool(&device_info
->small
, dev
,
603 "small_dmabounce_pool", small_buffer_size
);
606 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
611 if (large_buffer_size
) {
612 ret
= dmabounce_init_pool(&device_info
->large
, dev
,
613 "large_dmabounce_pool",
617 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
623 device_info
->dev
= dev
;
624 INIT_LIST_HEAD(&device_info
->safe_buffers
);
627 device_info
->total_allocs
= 0;
628 device_info
->map_op_count
= 0;
629 device_info
->bounce_count
= 0;
632 list_add(&device_info
->node
, &dmabounce_devs
);
634 printk(KERN_INFO
"dmabounce: registered device %s on %s bus\n",
635 dev
->bus_id
, dev
->bus
->name
);
640 dma_pool_destroy(device_info
->small
.pool
);
647 dmabounce_unregister_dev(struct device
*dev
)
649 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
653 "%s: Never registered with dmabounce but attempting" \
654 "to unregister!\n", dev
->bus_id
);
658 if (!list_empty(&device_info
->safe_buffers
)) {
660 "%s: Removing from dmabounce with pending buffers!\n",
665 if (device_info
->small
.pool
)
666 dma_pool_destroy(device_info
->small
.pool
);
667 if (device_info
->large
.pool
)
668 dma_pool_destroy(device_info
->large
.pool
);
671 print_alloc_stats(device_info
);
672 print_map_stats(device_info
);
675 list_del(&device_info
->node
);
679 printk(KERN_INFO
"dmabounce: device %s on %s bus unregistered\n",
680 dev
->bus_id
, dev
->bus
->name
);
684 EXPORT_SYMBOL(dma_map_single
);
685 EXPORT_SYMBOL(dma_unmap_single
);
686 EXPORT_SYMBOL(dma_map_sg
);
687 EXPORT_SYMBOL(dma_unmap_sg
);
688 EXPORT_SYMBOL(dma_sync_single
);
689 EXPORT_SYMBOL(dma_sync_sg
);
690 EXPORT_SYMBOL(dmabounce_register_dev
);
691 EXPORT_SYMBOL(dmabounce_unregister_dev
);
693 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
694 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
695 MODULE_LICENSE("GPL");