2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
11 * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
12 * the kernels original.
14 #include <linux/types.h>
16 #include <linux/module.h>
17 #include <linux/string.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/platform_device.h>
20 #include <linux/scatterlist.h>
22 #include <linux/cache.h>
25 #include <asm/octeon/octeon.h>
26 #include <asm/octeon/cvmx-npi-defs.h>
27 #include <asm/octeon/cvmx-pci-defs.h>
29 #include <dma-coherence.h>
32 #include <asm/octeon/pci-octeon.h>
35 #define BAR2_PCI_ADDRESS 0x8000000000ul
37 struct bar1_index_state
{
38 int16_t ref_count
; /* Number of PCI mappings using this index */
39 uint16_t address_bits
; /* Upper bits of physical address. This is
44 static DEFINE_SPINLOCK(bar1_lock
);
45 static struct bar1_index_state bar1_state
[32];
48 dma_addr_t
octeon_map_dma_mem(struct device
*dev
, void *ptr
, size_t size
)
51 /* Without PCI/PCIe this function can be called for Octeon internal
52 devices such as USB. These devices all support 64bit addressing */
54 return virt_to_phys(ptr
);
59 dma_addr_t result
= -1;
60 uint64_t physical
= virt_to_phys(ptr
);
65 * Use the DMA masks to determine the allowed memory
66 * region. For us it doesn't limit the actual memory, just the
67 * address visible over PCI. Devices with limits need to use
68 * lower indexed Bar1 entries.
71 dma_mask
= dev
->coherent_dma_mask
;
73 dma_mask
= *dev
->dma_mask
;
75 dma_mask
= 0xfffffffful
;
79 * Platform devices, such as the internal USB, skip all
80 * translation and use Octeon physical addresses directly.
82 if (!dev
|| dev
->bus
== &platform_bus_type
)
85 switch (octeon_dma_bar_type
) {
86 case OCTEON_DMA_BAR_TYPE_PCIE
:
87 if (unlikely(physical
< (16ul << 10)))
88 panic("dma_map_single: Not allowed to map first 16KB."
89 " It interferes with BAR0 special area\n");
90 else if ((physical
+ size
>= (256ul << 20)) &&
91 (physical
< (512ul << 20)))
92 panic("dma_map_single: Not allowed to map bootbus\n");
93 else if ((physical
+ size
>= 0x400000000ull
) &&
94 physical
< 0x410000000ull
)
95 panic("dma_map_single: "
96 "Attempt to map illegal memory address 0x%llx\n",
98 else if (physical
>= 0x420000000ull
)
99 panic("dma_map_single: "
100 "Attempt to map illegal memory address 0x%llx\n",
102 else if ((physical
+ size
>=
103 (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE
<<20))
104 && physical
< (4ull<<30))
105 pr_warning("dma_map_single: Warning: "
106 "Mapping memory address that might "
107 "conflict with devices 0x%llx-0x%llx\n",
108 physical
, physical
+size
-1);
109 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
110 if ((physical
>= 0x410000000ull
) && physical
< 0x420000000ull
)
111 result
= physical
- 0x400000000ull
;
114 if (((result
+size
-1) & dma_mask
) != result
+size
-1)
115 panic("dma_map_single: Attempt to map address "
116 "0x%llx-0x%llx, which can't be accessed "
117 "according to the dma mask 0x%llx\n",
118 physical
, physical
+size
-1, dma_mask
);
121 case OCTEON_DMA_BAR_TYPE_BIG
:
123 /* If the device supports 64bit addressing, then use BAR2 */
124 if (dma_mask
> BAR2_PCI_ADDRESS
) {
125 result
= physical
+ BAR2_PCI_ADDRESS
;
129 if (unlikely(physical
< (4ul << 10))) {
130 panic("dma_map_single: Not allowed to map first 4KB. "
131 "It interferes with BAR0 special area\n");
132 } else if (physical
< (256ul << 20)) {
133 if (unlikely(physical
+ size
> (256ul << 20)))
134 panic("dma_map_single: Requested memory spans "
135 "Bar0 0:256MB and bootbus\n");
138 } else if (unlikely(physical
< (512ul << 20))) {
139 panic("dma_map_single: Not allowed to map bootbus\n");
140 } else if (physical
< (2ul << 30)) {
141 if (unlikely(physical
+ size
> (2ul << 30)))
142 panic("dma_map_single: Requested memory spans "
143 "Bar0 512MB:2GB and BAR1\n");
146 } else if (physical
< (2ul << 30) + (128 << 20)) {
148 } else if (physical
<
149 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE
<< 20)) {
152 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE
<< 20)))
153 panic("dma_map_single: Requested memory "
154 "extends past Bar1 (4GB-%luMB)\n",
155 OCTEON_PCI_BAR1_HOLE_SIZE
);
158 } else if ((physical
>= 0x410000000ull
) &&
159 (physical
< 0x420000000ull
)) {
160 if (unlikely(physical
+ size
> 0x420000000ull
))
161 panic("dma_map_single: Requested memory spans "
162 "non existant memory\n");
163 /* BAR0 fixed mapping 256MB:512MB ->
164 * 16GB+256MB:16GB+512MB */
165 result
= physical
- 0x400000000ull
;
168 /* Continued below switch statement */
172 case OCTEON_DMA_BAR_TYPE_SMALL
:
174 /* If the device supports 64bit addressing, then use BAR2 */
175 if (dma_mask
> BAR2_PCI_ADDRESS
) {
176 result
= physical
+ BAR2_PCI_ADDRESS
;
180 /* Continued below switch statement */
184 panic("dma_map_single: Invalid octeon_dma_bar_type\n");
187 /* Don't allow mapping to span multiple Bar entries. The hardware guys
188 won't guarantee that DMA across boards work */
189 if (unlikely((physical
>> 22) != ((physical
+ size
- 1) >> 22)))
190 panic("dma_map_single: "
191 "Requested memory spans more than one Bar1 entry\n");
193 if (octeon_dma_bar_type
== OCTEON_DMA_BAR_TYPE_BIG
)
195 else if (unlikely(dma_mask
< (1ul << 27)))
196 start_index
= (dma_mask
>> 22);
200 /* Only one processor can access the Bar register at once */
201 spin_lock_irqsave(&bar1_lock
, flags
);
203 /* Look through Bar1 for existing mapping that will work */
204 for (index
= start_index
; index
>= 0; index
--) {
205 if ((bar1_state
[index
].address_bits
== physical
>> 22) &&
206 (bar1_state
[index
].ref_count
)) {
207 /* An existing mapping will work, use it */
208 bar1_state
[index
].ref_count
++;
209 if (unlikely(bar1_state
[index
].ref_count
< 0))
210 panic("dma_map_single: "
211 "Bar1[%d] reference count overflowed\n",
213 result
= (index
<< 22) | (physical
& ((1 << 22) - 1));
214 /* Large BAR1 is offset at 2GB */
215 if (octeon_dma_bar_type
== OCTEON_DMA_BAR_TYPE_BIG
)
221 /* No existing mappings, look for a free entry */
222 for (index
= start_index
; index
>= 0; index
--) {
223 if (unlikely(bar1_state
[index
].ref_count
== 0)) {
224 union cvmx_pci_bar1_indexx bar1_index
;
225 /* We have a free entry, use it */
226 bar1_state
[index
].ref_count
= 1;
227 bar1_state
[index
].address_bits
= physical
>> 22;
229 /* Address bits[35:22] sent to L2C */
230 bar1_index
.s
.addr_idx
= physical
>> 22;
231 /* Don't put PCI accesses in L2. */
233 /* Endian Swap Mode */
234 bar1_index
.s
.end_swp
= 1;
235 /* Set '1' when the selected address range is valid. */
236 bar1_index
.s
.addr_v
= 1;
237 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index
),
239 /* An existing mapping will work, use it */
240 result
= (index
<< 22) | (physical
& ((1 << 22) - 1));
241 /* Large BAR1 is offset at 2GB */
242 if (octeon_dma_bar_type
== OCTEON_DMA_BAR_TYPE_BIG
)
248 pr_err("dma_map_single: "
249 "Can't find empty BAR1 index for physical mapping 0x%llx\n",
250 (unsigned long long) physical
);
253 spin_unlock_irqrestore(&bar1_lock
, flags
);
255 pr_debug("dma_map_single 0x%llx->0x%llx\n", physical
, result
);
260 void octeon_unmap_dma_mem(struct device
*dev
, dma_addr_t dma_addr
)
264 * Without PCI/PCIe this function can be called for Octeon internal
265 * devices such as USB. These devices all support 64bit addressing.
273 * Platform devices, such as the internal USB, skip all
274 * translation and use Octeon physical addresses directly.
276 if (dev
->bus
== &platform_bus_type
)
279 switch (octeon_dma_bar_type
) {
280 case OCTEON_DMA_BAR_TYPE_PCIE
:
281 /* Nothing to do, all mappings are static */
284 case OCTEON_DMA_BAR_TYPE_BIG
:
286 /* Nothing to do for addresses using BAR2 */
287 if (dma_addr
>= BAR2_PCI_ADDRESS
)
290 if (unlikely(dma_addr
< (4ul << 10)))
291 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
293 else if (dma_addr
< (2ul << 30))
294 /* Nothing to do for addresses using BAR0 */
296 else if (dma_addr
< (2ul << 30) + (128ul << 20))
297 /* Need to unmap, fall through */
298 index
= (dma_addr
- (2ul << 30)) >> 22;
300 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE
<< 20))
301 goto done
; /* Nothing to do for the rest of BAR1 */
303 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
305 /* Continued below switch statement */
308 case OCTEON_DMA_BAR_TYPE_SMALL
:
310 /* Nothing to do for addresses using BAR2 */
311 if (dma_addr
>= BAR2_PCI_ADDRESS
)
314 index
= dma_addr
>> 22;
315 /* Continued below switch statement */
319 panic("dma_unmap_single: Invalid octeon_dma_bar_type\n");
322 if (unlikely(index
> 31))
323 panic("dma_unmap_single: "
324 "Attempt to unmap an invalid address (0x%llx)\n",
327 spin_lock_irqsave(&bar1_lock
, flags
);
328 bar1_state
[index
].ref_count
--;
329 if (bar1_state
[index
].ref_count
== 0)
330 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index
), 0);
331 else if (unlikely(bar1_state
[index
].ref_count
< 0))
332 panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
334 spin_unlock_irqrestore(&bar1_lock
, flags
);
336 pr_debug("dma_unmap_single 0x%llx\n", dma_addr
);