Merge remote-tracking branch 'remotes/powerpc/topic/xive' into kvm-ppc-next
[linux/fpc-iii.git] / arch / frv / mb93090-mb00 / pci-dma.c
blobe7130abc0dae67eb04202da599b49b66dd33f8e5
1 /* pci-dma.c: Dynamic DMA mapping support for the FRV CPUs that have MMUs
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/export.h>
17 #include <linux/highmem.h>
18 #include <linux/scatterlist.h>
19 #include <asm/io.h>
21 static void *frv_dma_alloc(struct device *hwdev, size_t size,
22 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
24 void *ret;
26 ret = consistent_alloc(gfp, size, dma_handle);
27 if (ret)
28 memset(ret, 0, size);
30 return ret;
33 static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
34 dma_addr_t dma_handle, unsigned long attrs)
36 consistent_free(vaddr);
39 static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
40 int nents, enum dma_data_direction direction,
41 unsigned long attrs)
43 struct scatterlist *sg;
44 unsigned long dampr2;
45 void *vaddr;
46 int i;
48 BUG_ON(direction == DMA_NONE);
50 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
51 return nents;
53 dampr2 = __get_DAMPR(2);
55 for_each_sg(sglist, sg, nents, i) {
56 vaddr = kmap_atomic_primary(sg_page(sg));
58 frv_dcache_writeback((unsigned long) vaddr,
59 (unsigned long) vaddr + PAGE_SIZE);
63 kunmap_atomic_primary(vaddr);
64 if (dampr2) {
65 __set_DAMPR(2, dampr2);
66 __set_IAMPR(2, dampr2);
69 return nents;
72 static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size,
74 enum dma_data_direction direction, unsigned long attrs)
76 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
77 flush_dcache_page(page);
79 return (dma_addr_t) page_to_phys(page) + offset;
82 static void frv_dma_sync_single_for_device(struct device *dev,
83 dma_addr_t dma_handle, size_t size,
84 enum dma_data_direction direction)
86 flush_write_buffers();
89 static void frv_dma_sync_sg_for_device(struct device *dev,
90 struct scatterlist *sg, int nelems,
91 enum dma_data_direction direction)
93 flush_write_buffers();
97 static int frv_dma_supported(struct device *dev, u64 mask)
100 * we fall back to GFP_DMA when the mask isn't all 1s,
101 * so we can't guarantee allocations that must be
102 * within a tighter range than GFP_DMA..
104 if (mask < 0x00ffffff)
105 return 0;
106 return 1;
109 const struct dma_map_ops frv_dma_ops = {
110 .alloc = frv_dma_alloc,
111 .free = frv_dma_free,
112 .map_page = frv_dma_map_page,
113 .map_sg = frv_dma_map_sg,
114 .sync_single_for_device = frv_dma_sync_single_for_device,
115 .sync_sg_for_device = frv_dma_sync_sg_for_device,
116 .dma_supported = frv_dma_supported,
118 EXPORT_SYMBOL(frv_dma_ops);