2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM, the remainder of memory is at the top and the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/page-flags.h>
29 #include <linux/device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmapool.h>
32 #include <linux/list.h>
33 #include <linux/scatterlist.h>
35 #include <asm/cacheflush.h>
40 #define DO_STATS(X) do { X ; } while (0)
42 #define DO_STATS(X) do { } while (0)
45 /* ************************************************** */
48 struct list_head node
;
50 /* original request */
55 /* safe buffer info */
56 struct dmabounce_pool
*pool
;
58 dma_addr_t safe_dma_addr
;
61 struct dmabounce_pool
{
63 struct dma_pool
*pool
;
69 struct dmabounce_device_info
{
71 struct list_head safe_buffers
;
73 unsigned long total_allocs
;
74 unsigned long map_op_count
;
75 unsigned long bounce_count
;
78 struct dmabounce_pool small
;
79 struct dmabounce_pool large
;
83 int (*needs_bounce
)(struct device
*, dma_addr_t
, size_t);
87 static ssize_t
dmabounce_show(struct device
*dev
, struct device_attribute
*attr
,
90 struct dmabounce_device_info
*device_info
= dev
->archdata
.dmabounce
;
91 return sprintf(buf
, "%lu %lu %lu %lu %lu %lu\n",
92 device_info
->small
.allocs
,
93 device_info
->large
.allocs
,
94 device_info
->total_allocs
- device_info
->small
.allocs
-
95 device_info
->large
.allocs
,
96 device_info
->total_allocs
,
97 device_info
->map_op_count
,
98 device_info
->bounce_count
);
101 static DEVICE_ATTR(dmabounce_stats
, 0400, dmabounce_show
, NULL
);
105 /* allocate a 'safe' buffer and keep track of it */
106 static inline struct safe_buffer
*
107 alloc_safe_buffer(struct dmabounce_device_info
*device_info
, void *ptr
,
108 size_t size
, enum dma_data_direction dir
)
110 struct safe_buffer
*buf
;
111 struct dmabounce_pool
*pool
;
112 struct device
*dev
= device_info
->dev
;
115 dev_dbg(dev
, "%s(ptr=%p, size=%d, dir=%d)\n",
116 __func__
, ptr
, size
, dir
);
118 if (size
<= device_info
->small
.size
) {
119 pool
= &device_info
->small
;
120 } else if (size
<= device_info
->large
.size
) {
121 pool
= &device_info
->large
;
126 buf
= kmalloc(sizeof(struct safe_buffer
), GFP_ATOMIC
);
128 dev_warn(dev
, "%s: kmalloc failed\n", __func__
);
134 buf
->direction
= dir
;
138 buf
->safe
= dma_pool_alloc(pool
->pool
, GFP_ATOMIC
,
139 &buf
->safe_dma_addr
);
141 buf
->safe
= dma_alloc_coherent(dev
, size
, &buf
->safe_dma_addr
,
145 if (buf
->safe
== NULL
) {
147 "%s: could not alloc dma memory (size=%d)\n",
156 device_info
->total_allocs
++;
159 write_lock_irqsave(&device_info
->lock
, flags
);
160 list_add(&buf
->node
, &device_info
->safe_buffers
);
161 write_unlock_irqrestore(&device_info
->lock
, flags
);
166 /* determine if a buffer is from our "safe" pool */
167 static inline struct safe_buffer
*
168 find_safe_buffer(struct dmabounce_device_info
*device_info
, dma_addr_t safe_dma_addr
)
170 struct safe_buffer
*b
, *rb
= NULL
;
173 read_lock_irqsave(&device_info
->lock
, flags
);
175 list_for_each_entry(b
, &device_info
->safe_buffers
, node
)
176 if (b
->safe_dma_addr
<= safe_dma_addr
&&
177 b
->safe_dma_addr
+ b
->size
> safe_dma_addr
) {
182 read_unlock_irqrestore(&device_info
->lock
, flags
);
187 free_safe_buffer(struct dmabounce_device_info
*device_info
, struct safe_buffer
*buf
)
191 dev_dbg(device_info
->dev
, "%s(buf=%p)\n", __func__
, buf
);
193 write_lock_irqsave(&device_info
->lock
, flags
);
195 list_del(&buf
->node
);
197 write_unlock_irqrestore(&device_info
->lock
, flags
);
200 dma_pool_free(buf
->pool
->pool
, buf
->safe
, buf
->safe_dma_addr
);
202 dma_free_coherent(device_info
->dev
, buf
->size
, buf
->safe
,
208 /* ************************************************** */
210 static struct safe_buffer
*find_safe_buffer_dev(struct device
*dev
,
211 dma_addr_t dma_addr
, const char *where
)
213 if (!dev
|| !dev
->archdata
.dmabounce
)
215 if (dma_mapping_error(dev
, dma_addr
)) {
216 dev_err(dev
, "Trying to %s invalid mapping\n", where
);
219 return find_safe_buffer(dev
->archdata
.dmabounce
, dma_addr
);
222 static int needs_bounce(struct device
*dev
, dma_addr_t dma_addr
, size_t size
)
224 if (!dev
|| !dev
->archdata
.dmabounce
)
228 unsigned long limit
, mask
= *dev
->dma_mask
;
230 limit
= (mask
+ 1) & ~mask
;
231 if (limit
&& size
> limit
) {
232 dev_err(dev
, "DMA mapping too big (requested %#x "
233 "mask %#Lx)\n", size
, *dev
->dma_mask
);
237 /* Figure out if we need to bounce from the DMA mask. */
238 if ((dma_addr
| (dma_addr
+ size
- 1)) & ~mask
)
242 return !!dev
->archdata
.dmabounce
->needs_bounce(dev
, dma_addr
, size
);
245 static inline dma_addr_t
map_single(struct device
*dev
, void *ptr
, size_t size
,
246 enum dma_data_direction dir
,
249 struct dmabounce_device_info
*device_info
= dev
->archdata
.dmabounce
;
250 struct safe_buffer
*buf
;
253 DO_STATS ( device_info
->map_op_count
++ );
255 buf
= alloc_safe_buffer(device_info
, ptr
, size
, dir
);
257 dev_err(dev
, "%s: unable to map unsafe buffer %p!\n",
259 return DMA_ERROR_CODE
;
262 dev_dbg(dev
, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
263 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
),
264 buf
->safe
, buf
->safe_dma_addr
);
266 if ((dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) &&
267 !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
)) {
268 dev_dbg(dev
, "%s: copy unsafe %p to safe %p, size %d\n",
269 __func__
, ptr
, buf
->safe
, size
);
270 memcpy(buf
->safe
, ptr
, size
);
273 return buf
->safe_dma_addr
;
276 static inline void unmap_single(struct device
*dev
, struct safe_buffer
*buf
,
277 size_t size
, enum dma_data_direction dir
,
280 BUG_ON(buf
->size
!= size
);
281 BUG_ON(buf
->direction
!= dir
);
283 dev_dbg(dev
, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
284 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
),
285 buf
->safe
, buf
->safe_dma_addr
);
287 DO_STATS(dev
->archdata
.dmabounce
->bounce_count
++);
289 if ((dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) &&
290 !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
)) {
291 void *ptr
= buf
->ptr
;
293 dev_dbg(dev
, "%s: copy back safe %p to unsafe %p size %d\n",
294 __func__
, buf
->safe
, ptr
, size
);
295 memcpy(ptr
, buf
->safe
, size
);
298 * Since we may have written to a page cache page,
299 * we need to ensure that the data will be coherent
300 * with user mappings.
302 __cpuc_flush_dcache_area(ptr
, size
);
304 free_safe_buffer(dev
->archdata
.dmabounce
, buf
);
307 /* ************************************************** */
310 * see if a buffer address is in an 'unsafe' range. if it is
311 * allocate a 'safe' buffer and copy the unsafe buffer into it.
312 * substitute the safe buffer for the unsafe one.
313 * (basically move the buffer from an unsafe area to a safe one)
315 static dma_addr_t
dmabounce_map_page(struct device
*dev
, struct page
*page
,
316 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
322 dev_dbg(dev
, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
323 __func__
, page
, offset
, size
, dir
);
325 dma_addr
= pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
327 ret
= needs_bounce(dev
, dma_addr
, size
);
329 return DMA_ERROR_CODE
;
332 arm_dma_ops
.sync_single_for_device(dev
, dma_addr
, size
, dir
);
336 if (PageHighMem(page
)) {
337 dev_err(dev
, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
338 return DMA_ERROR_CODE
;
341 return map_single(dev
, page_address(page
) + offset
, size
, dir
, attrs
);
345 * see if a mapped address was really a "safe" buffer and if so, copy
346 * the data from the safe buffer back to the unsafe buffer and free up
347 * the safe buffer. (basically return things back to the way they
350 static void dmabounce_unmap_page(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
351 enum dma_data_direction dir
, unsigned long attrs
)
353 struct safe_buffer
*buf
;
355 dev_dbg(dev
, "%s(dma=%#x,size=%d,dir=%x)\n",
356 __func__
, dma_addr
, size
, dir
);
358 buf
= find_safe_buffer_dev(dev
, dma_addr
, __func__
);
360 arm_dma_ops
.sync_single_for_cpu(dev
, dma_addr
, size
, dir
);
364 unmap_single(dev
, buf
, size
, dir
, attrs
);
367 static int __dmabounce_sync_for_cpu(struct device
*dev
, dma_addr_t addr
,
368 size_t sz
, enum dma_data_direction dir
)
370 struct safe_buffer
*buf
;
373 dev_dbg(dev
, "%s(dma=%#x,sz=%zx,dir=%x)\n",
374 __func__
, addr
, sz
, dir
);
376 buf
= find_safe_buffer_dev(dev
, addr
, __func__
);
380 off
= addr
- buf
->safe_dma_addr
;
382 BUG_ON(buf
->direction
!= dir
);
384 dev_dbg(dev
, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
385 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
), off
,
386 buf
->safe
, buf
->safe_dma_addr
);
388 DO_STATS(dev
->archdata
.dmabounce
->bounce_count
++);
390 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
391 dev_dbg(dev
, "%s: copy back safe %p to unsafe %p size %d\n",
392 __func__
, buf
->safe
+ off
, buf
->ptr
+ off
, sz
);
393 memcpy(buf
->ptr
+ off
, buf
->safe
+ off
, sz
);
398 static void dmabounce_sync_for_cpu(struct device
*dev
,
399 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
401 if (!__dmabounce_sync_for_cpu(dev
, handle
, size
, dir
))
404 arm_dma_ops
.sync_single_for_cpu(dev
, handle
, size
, dir
);
407 static int __dmabounce_sync_for_device(struct device
*dev
, dma_addr_t addr
,
408 size_t sz
, enum dma_data_direction dir
)
410 struct safe_buffer
*buf
;
413 dev_dbg(dev
, "%s(dma=%#x,sz=%zx,dir=%x)\n",
414 __func__
, addr
, sz
, dir
);
416 buf
= find_safe_buffer_dev(dev
, addr
, __func__
);
420 off
= addr
- buf
->safe_dma_addr
;
422 BUG_ON(buf
->direction
!= dir
);
424 dev_dbg(dev
, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
425 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
), off
,
426 buf
->safe
, buf
->safe_dma_addr
);
428 DO_STATS(dev
->archdata
.dmabounce
->bounce_count
++);
430 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
431 dev_dbg(dev
, "%s: copy out unsafe %p to safe %p, size %d\n",
432 __func__
,buf
->ptr
+ off
, buf
->safe
+ off
, sz
);
433 memcpy(buf
->safe
+ off
, buf
->ptr
+ off
, sz
);
438 static void dmabounce_sync_for_device(struct device
*dev
,
439 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
441 if (!__dmabounce_sync_for_device(dev
, handle
, size
, dir
))
444 arm_dma_ops
.sync_single_for_device(dev
, handle
, size
, dir
);
447 static int dmabounce_set_mask(struct device
*dev
, u64 dma_mask
)
449 if (dev
->archdata
.dmabounce
)
452 return arm_dma_ops
.set_dma_mask(dev
, dma_mask
);
455 static const struct dma_map_ops dmabounce_ops
= {
456 .alloc
= arm_dma_alloc
,
457 .free
= arm_dma_free
,
458 .mmap
= arm_dma_mmap
,
459 .get_sgtable
= arm_dma_get_sgtable
,
460 .map_page
= dmabounce_map_page
,
461 .unmap_page
= dmabounce_unmap_page
,
462 .sync_single_for_cpu
= dmabounce_sync_for_cpu
,
463 .sync_single_for_device
= dmabounce_sync_for_device
,
464 .map_sg
= arm_dma_map_sg
,
465 .unmap_sg
= arm_dma_unmap_sg
,
466 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
467 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
468 .set_dma_mask
= dmabounce_set_mask
,
471 static int dmabounce_init_pool(struct dmabounce_pool
*pool
, struct device
*dev
,
472 const char *name
, unsigned long size
)
475 DO_STATS(pool
->allocs
= 0);
476 pool
->pool
= dma_pool_create(name
, dev
, size
,
477 0 /* byte alignment */,
478 0 /* no page-crossing issues */);
480 return pool
->pool
? 0 : -ENOMEM
;
483 int dmabounce_register_dev(struct device
*dev
, unsigned long small_buffer_size
,
484 unsigned long large_buffer_size
,
485 int (*needs_bounce_fn
)(struct device
*, dma_addr_t
, size_t))
487 struct dmabounce_device_info
*device_info
;
490 device_info
= kmalloc(sizeof(struct dmabounce_device_info
), GFP_ATOMIC
);
493 "Could not allocated dmabounce_device_info\n");
497 ret
= dmabounce_init_pool(&device_info
->small
, dev
,
498 "small_dmabounce_pool", small_buffer_size
);
501 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
506 if (large_buffer_size
) {
507 ret
= dmabounce_init_pool(&device_info
->large
, dev
,
508 "large_dmabounce_pool",
512 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
518 device_info
->dev
= dev
;
519 INIT_LIST_HEAD(&device_info
->safe_buffers
);
520 rwlock_init(&device_info
->lock
);
521 device_info
->needs_bounce
= needs_bounce_fn
;
524 device_info
->total_allocs
= 0;
525 device_info
->map_op_count
= 0;
526 device_info
->bounce_count
= 0;
527 device_info
->attr_res
= device_create_file(dev
, &dev_attr_dmabounce_stats
);
530 dev
->archdata
.dmabounce
= device_info
;
531 set_dma_ops(dev
, &dmabounce_ops
);
533 dev_info(dev
, "dmabounce: registered device\n");
538 dma_pool_destroy(device_info
->small
.pool
);
543 EXPORT_SYMBOL(dmabounce_register_dev
);
545 void dmabounce_unregister_dev(struct device
*dev
)
547 struct dmabounce_device_info
*device_info
= dev
->archdata
.dmabounce
;
549 dev
->archdata
.dmabounce
= NULL
;
550 set_dma_ops(dev
, NULL
);
554 "Never registered with dmabounce but attempting"
559 if (!list_empty(&device_info
->safe_buffers
)) {
561 "Removing from dmabounce with pending buffers!\n");
565 if (device_info
->small
.pool
)
566 dma_pool_destroy(device_info
->small
.pool
);
567 if (device_info
->large
.pool
)
568 dma_pool_destroy(device_info
->large
.pool
);
571 if (device_info
->attr_res
== 0)
572 device_remove_file(dev
, &dev_attr_dmabounce_stats
);
577 dev_info(dev
, "dmabounce: device unregistered\n");
579 EXPORT_SYMBOL(dmabounce_unregister_dev
);
581 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
582 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
583 MODULE_LICENSE("GPL");