1 // SPDX-License-Identifier: GPL-2.0
3 * io-unit.c: IO-UNIT specific routines for memory management.
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
13 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
14 #include <linux/bitops.h>
15 #include <linux/dma-mapping.h>
17 #include <linux/of_device.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
22 #include <asm/io-unit.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
27 #include <asm/oplib.h>
31 /* #define IOUNIT_DEBUG */
33 #define IOD(x) printk(x)
35 #define IOD(x) do { } while (0)
38 #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
39 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
41 static const struct dma_map_ops iounit_dma_ops
;
43 static void __init
iounit_iommu_init(struct platform_device
*op
)
45 struct iounit_struct
*iounit
;
47 iopte_t __iomem
*xptend
;
49 iounit
= kzalloc(sizeof(struct iounit_struct
), GFP_ATOMIC
);
51 prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
55 iounit
->limit
[0] = IOUNIT_BMAP1_START
;
56 iounit
->limit
[1] = IOUNIT_BMAP2_START
;
57 iounit
->limit
[2] = IOUNIT_BMAPM_START
;
58 iounit
->limit
[3] = IOUNIT_BMAPM_END
;
59 iounit
->rotor
[1] = IOUNIT_BMAP2_START
;
60 iounit
->rotor
[2] = IOUNIT_BMAPM_START
;
62 xpt
= of_ioremap(&op
->resource
[2], 0, PAGE_SIZE
* 16, "XPT");
64 prom_printf("SUN4D: Cannot map External Page Table.");
68 op
->dev
.archdata
.iommu
= iounit
;
69 iounit
->page_table
= xpt
;
70 spin_lock_init(&iounit
->lock
);
72 xptend
= iounit
->page_table
+ (16 * PAGE_SIZE
) / sizeof(iopte_t
);
73 for (; xpt
< xptend
; xpt
++)
76 op
->dev
.dma_ops
= &iounit_dma_ops
;
79 static int __init
iounit_init(void)
81 extern void sun4d_init_sbi_irq(void);
82 struct device_node
*dp
;
84 for_each_node_by_name(dp
, "sbi") {
85 struct platform_device
*op
= of_find_device_by_node(dp
);
87 iounit_iommu_init(op
);
88 of_propagate_archdata(op
);
96 subsys_initcall(iounit_init
);
98 /* One has to hold iounit->lock to call this */
99 static unsigned long iounit_get_area(struct iounit_struct
*iounit
, unsigned long vaddr
, int size
)
102 unsigned long rotor
, scan
, limit
;
105 npages
= ((vaddr
& ~PAGE_MASK
) + size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
107 /* A tiny bit of magic ingredience :) */
109 case 1: i
= 0x0231; break;
110 case 2: i
= 0x0132; break;
111 default: i
= 0x0213; break;
114 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr
, size
, npages
));
117 rotor
= iounit
->rotor
[j
- 1];
118 limit
= iounit
->limit
[j
];
120 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
121 if (scan
+ npages
> limit
) {
122 if (limit
!= rotor
) {
124 scan
= iounit
->limit
[j
- 1];
129 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr
, size
);
132 for (k
= 1, scan
++; k
< npages
; k
++)
133 if (test_bit(scan
++, iounit
->bmap
))
135 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
137 iopte
= MKIOPTE(__pa(vaddr
& PAGE_MASK
));
138 vaddr
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
) + (vaddr
& ~PAGE_MASK
);
139 for (k
= 0; k
< npages
; k
++, iopte
= __iopte(iopte_val(iopte
) + 0x100), scan
++) {
140 set_bit(scan
, iounit
->bmap
);
141 sbus_writel(iopte_val(iopte
), &iounit
->page_table
[scan
]);
143 IOD(("%08lx\n", vaddr
));
147 static dma_addr_t
iounit_map_page(struct device
*dev
, struct page
*page
,
148 unsigned long offset
, size_t len
, enum dma_data_direction dir
,
151 void *vaddr
= page_address(page
) + offset
;
152 struct iounit_struct
*iounit
= dev
->archdata
.iommu
;
153 unsigned long ret
, flags
;
155 /* XXX So what is maxphys for us and how do drivers know it? */
156 if (!len
|| len
> 256 * 1024)
157 return DMA_MAPPING_ERROR
;
159 spin_lock_irqsave(&iounit
->lock
, flags
);
160 ret
= iounit_get_area(iounit
, (unsigned long)vaddr
, len
);
161 spin_unlock_irqrestore(&iounit
->lock
, flags
);
165 static int iounit_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
166 enum dma_data_direction dir
, unsigned long attrs
)
168 struct iounit_struct
*iounit
= dev
->archdata
.iommu
;
169 struct scatterlist
*sg
;
173 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
174 spin_lock_irqsave(&iounit
->lock
, flags
);
175 for_each_sg(sgl
, sg
, nents
, i
) {
176 sg
->dma_address
= iounit_get_area(iounit
, (unsigned long) sg_virt(sg
), sg
->length
);
177 sg
->dma_length
= sg
->length
;
179 spin_unlock_irqrestore(&iounit
->lock
, flags
);
183 static void iounit_unmap_page(struct device
*dev
, dma_addr_t vaddr
, size_t len
,
184 enum dma_data_direction dir
, unsigned long attrs
)
186 struct iounit_struct
*iounit
= dev
->archdata
.iommu
;
189 spin_lock_irqsave(&iounit
->lock
, flags
);
190 len
= ((vaddr
& ~PAGE_MASK
) + len
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
191 vaddr
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
192 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
193 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
194 clear_bit(vaddr
, iounit
->bmap
);
195 spin_unlock_irqrestore(&iounit
->lock
, flags
);
198 static void iounit_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
199 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
201 struct iounit_struct
*iounit
= dev
->archdata
.iommu
;
202 unsigned long flags
, vaddr
, len
;
203 struct scatterlist
*sg
;
206 spin_lock_irqsave(&iounit
->lock
, flags
);
207 for_each_sg(sgl
, sg
, nents
, i
) {
208 len
= ((sg
->dma_address
& ~PAGE_MASK
) + sg
->length
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
209 vaddr
= (sg
->dma_address
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
210 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
211 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
212 clear_bit(vaddr
, iounit
->bmap
);
214 spin_unlock_irqrestore(&iounit
->lock
, flags
);
218 static void *iounit_alloc(struct device
*dev
, size_t len
,
219 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
221 struct iounit_struct
*iounit
= dev
->archdata
.iommu
;
222 unsigned long va
, addr
, page
, end
, ret
;
224 iopte_t __iomem
*iopte
;
226 /* XXX So what is maxphys for us and how do drivers know it? */
227 if (!len
|| len
> 256 * 1024)
230 len
= PAGE_ALIGN(len
);
231 va
= __get_free_pages(gfp
| __GFP_ZERO
, get_order(len
));
235 addr
= ret
= sparc_dma_alloc_resource(dev
, len
);
240 dvma_prot
= __pgprot(SRMMU_CACHE
| SRMMU_ET_PTE
| SRMMU_PRIV
);
241 end
= PAGE_ALIGN((addr
+ len
));
252 pgdp
= pgd_offset(&init_mm
, addr
);
253 p4dp
= p4d_offset(pgdp
, addr
);
254 pudp
= pud_offset(p4dp
, addr
);
255 pmdp
= pmd_offset(pudp
, addr
);
256 ptep
= pte_offset_map(pmdp
, addr
);
258 set_pte(ptep
, mk_pte(virt_to_page(page
), dvma_prot
));
260 i
= ((addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
);
262 iopte
= iounit
->page_table
+ i
;
263 sbus_writel(iopte_val(MKIOPTE(__pa(page
))), iopte
);
274 free_pages(va
, get_order(len
));
278 static void iounit_free(struct device
*dev
, size_t size
, void *cpu_addr
,
279 dma_addr_t dma_addr
, unsigned long attrs
)
281 /* XXX Somebody please fill this in */
285 static const struct dma_map_ops iounit_dma_ops
= {
287 .alloc
= iounit_alloc
,
290 .map_page
= iounit_map_page
,
291 .unmap_page
= iounit_unmap_page
,
292 .map_sg
= iounit_map_sg
,
293 .unmap_sg
= iounit_unmap_sg
,