[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / ppc64 / kernel / dma.c
blobce714c9271344f6f1e7df5ca71bb903928f16d5f
1 /*
2 * Copyright (C) 2004 IBM Corporation
4 * Implements the generic device dma API for ppc64. Handles
5 * the pci and vio busses
6 */
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 /* Include the busses we support */
11 #include <linux/pci.h>
12 #include <asm/vio.h>
13 #include <asm/scatterlist.h>
14 #include <asm/bug.h>
16 static struct dma_mapping_ops *get_dma_ops(struct device *dev)
18 if (dev->bus == &pci_bus_type)
19 return &pci_dma_ops;
20 #ifdef CONFIG_IBMVIO
21 if (dev->bus == &vio_bus_type)
22 return &vio_dma_ops;
23 #endif
24 return NULL;
27 int dma_supported(struct device *dev, u64 mask)
29 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
31 if (dma_ops)
32 return dma_ops->dma_supported(dev, mask);
33 BUG();
34 return 0;
36 EXPORT_SYMBOL(dma_supported);
38 int dma_set_mask(struct device *dev, u64 dma_mask)
40 if (dev->bus == &pci_bus_type)
41 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
42 #ifdef CONFIG_IBMVIO
43 if (dev->bus == &vio_bus_type)
44 return -EIO;
45 #endif /* CONFIG_IBMVIO */
46 BUG();
47 return 0;
49 EXPORT_SYMBOL(dma_set_mask);
51 void *dma_alloc_coherent(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, unsigned int __nocast flag)
54 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
56 if (dma_ops)
57 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
58 BUG();
59 return NULL;
61 EXPORT_SYMBOL(dma_alloc_coherent);
63 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
64 dma_addr_t dma_handle)
66 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
68 if (dma_ops)
69 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
70 else
71 BUG();
73 EXPORT_SYMBOL(dma_free_coherent);
75 dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
76 enum dma_data_direction direction)
78 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
80 if (dma_ops)
81 return dma_ops->map_single(dev, cpu_addr, size, direction);
82 BUG();
83 return (dma_addr_t)0;
85 EXPORT_SYMBOL(dma_map_single);
87 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
88 enum dma_data_direction direction)
90 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
92 if (dma_ops)
93 dma_ops->unmap_single(dev, dma_addr, size, direction);
94 else
95 BUG();
97 EXPORT_SYMBOL(dma_unmap_single);
99 dma_addr_t dma_map_page(struct device *dev, struct page *page,
100 unsigned long offset, size_t size,
101 enum dma_data_direction direction)
103 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
105 if (dma_ops)
106 return dma_ops->map_single(dev,
107 (page_address(page) + offset), size, direction);
108 BUG();
109 return (dma_addr_t)0;
111 EXPORT_SYMBOL(dma_map_page);
113 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
114 enum dma_data_direction direction)
116 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
118 if (dma_ops)
119 dma_ops->unmap_single(dev, dma_address, size, direction);
120 else
121 BUG();
123 EXPORT_SYMBOL(dma_unmap_page);
125 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
126 enum dma_data_direction direction)
128 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
130 if (dma_ops)
131 return dma_ops->map_sg(dev, sg, nents, direction);
132 BUG();
133 return 0;
135 EXPORT_SYMBOL(dma_map_sg);
137 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
138 enum dma_data_direction direction)
140 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
142 if (dma_ops)
143 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
144 else
145 BUG();
147 EXPORT_SYMBOL(dma_unmap_sg);