2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
11 * Copyright (C) 2010 Cavium Networks, Inc.
13 #include <linux/dma-mapping.h>
14 #include <linux/scatterlist.h>
15 #include <linux/bootmem.h>
16 #include <linux/swiotlb.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
21 #include <asm/bootinfo.h>
23 #include <asm/octeon/octeon.h>
26 #include <asm/octeon/pci-octeon.h>
27 #include <asm/octeon/cvmx-npi-defs.h>
28 #include <asm/octeon/cvmx-pci-defs.h>
30 static dma_addr_t
octeon_hole_phys_to_dma(phys_addr_t paddr
)
32 if (paddr
>= CVMX_PCIE_BAR1_PHYS_BASE
&& paddr
< (CVMX_PCIE_BAR1_PHYS_BASE
+ CVMX_PCIE_BAR1_PHYS_SIZE
))
33 return paddr
- CVMX_PCIE_BAR1_PHYS_BASE
+ CVMX_PCIE_BAR1_RC_BASE
;
38 static phys_addr_t
octeon_hole_dma_to_phys(dma_addr_t daddr
)
40 if (daddr
>= CVMX_PCIE_BAR1_RC_BASE
)
41 return daddr
+ CVMX_PCIE_BAR1_PHYS_BASE
- CVMX_PCIE_BAR1_RC_BASE
;
46 static dma_addr_t
octeon_gen1_phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
48 if (paddr
>= 0x410000000ull
&& paddr
< 0x420000000ull
)
49 paddr
-= 0x400000000ull
;
50 return octeon_hole_phys_to_dma(paddr
);
53 static phys_addr_t
octeon_gen1_dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
55 daddr
= octeon_hole_dma_to_phys(daddr
);
57 if (daddr
>= 0x10000000ull
&& daddr
< 0x20000000ull
)
58 daddr
+= 0x400000000ull
;
63 static dma_addr_t
octeon_big_phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
65 if (paddr
>= 0x410000000ull
&& paddr
< 0x420000000ull
)
66 paddr
-= 0x400000000ull
;
68 /* Anything in the BAR1 hole or above goes via BAR2 */
69 if (paddr
>= 0xf0000000ull
)
70 paddr
= OCTEON_BAR2_PCI_ADDRESS
+ paddr
;
75 static phys_addr_t
octeon_big_dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
77 if (daddr
>= OCTEON_BAR2_PCI_ADDRESS
)
78 daddr
-= OCTEON_BAR2_PCI_ADDRESS
;
80 if (daddr
>= 0x10000000ull
&& daddr
< 0x20000000ull
)
81 daddr
+= 0x400000000ull
;
85 static dma_addr_t
octeon_small_phys_to_dma(struct device
*dev
,
88 if (paddr
>= 0x410000000ull
&& paddr
< 0x420000000ull
)
89 paddr
-= 0x400000000ull
;
91 /* Anything not in the BAR1 range goes via BAR2 */
92 if (paddr
>= octeon_bar1_pci_phys
&& paddr
< octeon_bar1_pci_phys
+ 0x8000000ull
)
93 paddr
= paddr
- octeon_bar1_pci_phys
;
95 paddr
= OCTEON_BAR2_PCI_ADDRESS
+ paddr
;
100 static phys_addr_t
octeon_small_dma_to_phys(struct device
*dev
,
103 if (daddr
>= OCTEON_BAR2_PCI_ADDRESS
)
104 daddr
-= OCTEON_BAR2_PCI_ADDRESS
;
106 daddr
+= octeon_bar1_pci_phys
;
108 if (daddr
>= 0x10000000ull
&& daddr
< 0x20000000ull
)
109 daddr
+= 0x400000000ull
;
113 #endif /* CONFIG_PCI */
115 static dma_addr_t
octeon_dma_map_page(struct device
*dev
, struct page
*page
,
116 unsigned long offset
, size_t size
, enum dma_data_direction direction
,
117 struct dma_attrs
*attrs
)
119 dma_addr_t daddr
= swiotlb_map_page(dev
, page
, offset
, size
,
126 static int octeon_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
127 int nents
, enum dma_data_direction direction
, struct dma_attrs
*attrs
)
129 int r
= swiotlb_map_sg_attrs(dev
, sg
, nents
, direction
, attrs
);
134 static void octeon_dma_sync_single_for_device(struct device
*dev
,
135 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction direction
)
137 swiotlb_sync_single_for_device(dev
, dma_handle
, size
, direction
);
141 static void octeon_dma_sync_sg_for_device(struct device
*dev
,
142 struct scatterlist
*sg
, int nelems
, enum dma_data_direction direction
)
144 swiotlb_sync_sg_for_device(dev
, sg
, nelems
, direction
);
148 static void *octeon_dma_alloc_coherent(struct device
*dev
, size_t size
,
149 dma_addr_t
*dma_handle
, gfp_t gfp
)
153 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
156 /* ignore region specifiers */
157 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
159 #ifdef CONFIG_ZONE_DMA
162 else if (dev
->coherent_dma_mask
<= DMA_BIT_MASK(24))
166 #ifdef CONFIG_ZONE_DMA32
167 if (dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
173 /* Don't invoke OOM killer */
174 gfp
|= __GFP_NORETRY
;
176 ret
= swiotlb_alloc_coherent(dev
, size
, dma_handle
, gfp
);
183 static void octeon_dma_free_coherent(struct device
*dev
, size_t size
,
184 void *vaddr
, dma_addr_t dma_handle
)
186 int order
= get_order(size
);
188 if (dma_release_from_coherent(dev
, order
, vaddr
))
191 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
194 static dma_addr_t
octeon_unity_phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
199 static phys_addr_t
octeon_unity_dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
204 struct octeon_dma_map_ops
{
205 struct dma_map_ops dma_map_ops
;
206 dma_addr_t (*phys_to_dma
)(struct device
*dev
, phys_addr_t paddr
);
207 phys_addr_t (*dma_to_phys
)(struct device
*dev
, dma_addr_t daddr
);
210 dma_addr_t
phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
212 struct octeon_dma_map_ops
*ops
= container_of(get_dma_ops(dev
),
213 struct octeon_dma_map_ops
,
216 return ops
->phys_to_dma(dev
, paddr
);
218 EXPORT_SYMBOL(phys_to_dma
);
220 phys_addr_t
dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
222 struct octeon_dma_map_ops
*ops
= container_of(get_dma_ops(dev
),
223 struct octeon_dma_map_ops
,
226 return ops
->dma_to_phys(dev
, daddr
);
228 EXPORT_SYMBOL(dma_to_phys
);
230 static struct octeon_dma_map_ops octeon_linear_dma_map_ops
= {
232 .alloc_coherent
= octeon_dma_alloc_coherent
,
233 .free_coherent
= octeon_dma_free_coherent
,
234 .map_page
= octeon_dma_map_page
,
235 .unmap_page
= swiotlb_unmap_page
,
236 .map_sg
= octeon_dma_map_sg
,
237 .unmap_sg
= swiotlb_unmap_sg_attrs
,
238 .sync_single_for_cpu
= swiotlb_sync_single_for_cpu
,
239 .sync_single_for_device
= octeon_dma_sync_single_for_device
,
240 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
241 .sync_sg_for_device
= octeon_dma_sync_sg_for_device
,
242 .mapping_error
= swiotlb_dma_mapping_error
,
243 .dma_supported
= swiotlb_dma_supported
245 .phys_to_dma
= octeon_unity_phys_to_dma
,
246 .dma_to_phys
= octeon_unity_dma_to_phys
249 char *octeon_swiotlb
;
251 void __init
plat_swiotlb_setup(void)
257 unsigned long swiotlb_nslabs
;
262 for (i
= 0 ; i
< boot_mem_map
.nr_map
; i
++) {
263 struct boot_mem_map_entry
*e
= &boot_mem_map
.map
[i
];
264 if (e
->type
!= BOOT_MEM_RAM
)
267 /* These addresses map low for PCI. */
268 if (e
->addr
> 0x410000000ull
)
271 addr_size
+= e
->size
;
273 if (max_addr
< e
->addr
+ e
->size
)
274 max_addr
= e
->addr
+ e
->size
;
278 swiotlbsize
= PAGE_SIZE
;
282 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
283 * size to a maximum of 64MB
285 if (OCTEON_IS_MODEL(OCTEON_CN31XX
)
286 || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
)) {
287 swiotlbsize
= addr_size
/ 4;
288 if (swiotlbsize
> 64 * (1<<20))
289 swiotlbsize
= 64 * (1<<20);
290 } else if (max_addr
> 0xf0000000ul
) {
292 * Otherwise only allocate a big iotlb if there is
293 * memory past the BAR1 hole.
295 swiotlbsize
= 64 * (1<<20);
298 swiotlb_nslabs
= swiotlbsize
>> IO_TLB_SHIFT
;
299 swiotlb_nslabs
= ALIGN(swiotlb_nslabs
, IO_TLB_SEGSIZE
);
300 swiotlbsize
= swiotlb_nslabs
<< IO_TLB_SHIFT
;
302 octeon_swiotlb
= alloc_bootmem_low_pages(swiotlbsize
);
304 swiotlb_init_with_tbl(octeon_swiotlb
, swiotlb_nslabs
, 1);
306 mips_dma_map_ops
= &octeon_linear_dma_map_ops
.dma_map_ops
;
310 static struct octeon_dma_map_ops _octeon_pci_dma_map_ops
= {
312 .alloc_coherent
= octeon_dma_alloc_coherent
,
313 .free_coherent
= octeon_dma_free_coherent
,
314 .map_page
= octeon_dma_map_page
,
315 .unmap_page
= swiotlb_unmap_page
,
316 .map_sg
= octeon_dma_map_sg
,
317 .unmap_sg
= swiotlb_unmap_sg_attrs
,
318 .sync_single_for_cpu
= swiotlb_sync_single_for_cpu
,
319 .sync_single_for_device
= octeon_dma_sync_single_for_device
,
320 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
321 .sync_sg_for_device
= octeon_dma_sync_sg_for_device
,
322 .mapping_error
= swiotlb_dma_mapping_error
,
323 .dma_supported
= swiotlb_dma_supported
327 struct dma_map_ops
*octeon_pci_dma_map_ops
;
329 void __init
octeon_pci_dma_init(void)
331 switch (octeon_dma_bar_type
) {
332 case OCTEON_DMA_BAR_TYPE_PCIE
:
333 _octeon_pci_dma_map_ops
.phys_to_dma
= octeon_gen1_phys_to_dma
;
334 _octeon_pci_dma_map_ops
.dma_to_phys
= octeon_gen1_dma_to_phys
;
336 case OCTEON_DMA_BAR_TYPE_BIG
:
337 _octeon_pci_dma_map_ops
.phys_to_dma
= octeon_big_phys_to_dma
;
338 _octeon_pci_dma_map_ops
.dma_to_phys
= octeon_big_dma_to_phys
;
340 case OCTEON_DMA_BAR_TYPE_SMALL
:
341 _octeon_pci_dma_map_ops
.phys_to_dma
= octeon_small_phys_to_dma
;
342 _octeon_pci_dma_map_ops
.dma_to_phys
= octeon_small_dma_to_phys
;
347 octeon_pci_dma_map_ops
= &_octeon_pci_dma_map_ops
.dma_map_ops
;
349 #endif /* CONFIG_PCI */