1 /* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
2 * io-unit.c: IO-UNIT specific routines for memory management.
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
12 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
13 #include <linux/bitops.h>
15 #include <asm/scatterlist.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
20 #include <asm/io-unit.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
25 #include <asm/oplib.h>
27 /* #define IOUNIT_DEBUG */
29 #define IOD(x) printk(x)
31 #define IOD(x) do { } while (0)
34 #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
35 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
38 iounit_init(int sbi_node
, int io_node
, struct sbus_bus
*sbus
)
40 iopte_t
*xpt
, *xptend
;
41 struct iounit_struct
*iounit
;
42 struct linux_prom_registers iommu_promregs
[PROMREG_MAX
];
45 iounit
= kzalloc(sizeof(struct iounit_struct
), GFP_ATOMIC
);
47 prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
51 iounit
->limit
[0] = IOUNIT_BMAP1_START
;
52 iounit
->limit
[1] = IOUNIT_BMAP2_START
;
53 iounit
->limit
[2] = IOUNIT_BMAPM_START
;
54 iounit
->limit
[3] = IOUNIT_BMAPM_END
;
55 iounit
->rotor
[1] = IOUNIT_BMAP2_START
;
56 iounit
->rotor
[2] = IOUNIT_BMAPM_START
;
59 if(prom_getproperty(sbi_node
, "reg", (void *) iommu_promregs
,
60 sizeof(iommu_promregs
)) != -1) {
61 prom_apply_generic_ranges(io_node
, 0, iommu_promregs
, 3);
62 memset(&r
, 0, sizeof(r
));
63 r
.flags
= iommu_promregs
[2].which_io
;
64 r
.start
= iommu_promregs
[2].phys_addr
;
65 xpt
= (iopte_t
*) sbus_ioremap(&r
, 0, PAGE_SIZE
* 16, "XPT");
67 if(!xpt
) panic("Cannot map External Page Table.");
69 sbus
->ofdev
.dev
.archdata
.iommu
= iounit
;
70 iounit
->page_table
= xpt
;
71 spin_lock_init(&iounit
->lock
);
73 for (xptend
= iounit
->page_table
+ (16 * PAGE_SIZE
) / sizeof(iopte_t
);
75 iopte_val(*xpt
++) = 0;
78 /* One has to hold iounit->lock to call this */
79 static unsigned long iounit_get_area(struct iounit_struct
*iounit
, unsigned long vaddr
, int size
)
82 unsigned long rotor
, scan
, limit
;
85 npages
= ((vaddr
& ~PAGE_MASK
) + size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
87 /* A tiny bit of magic ingredience :) */
89 case 1: i
= 0x0231; break;
90 case 2: i
= 0x0132; break;
91 default: i
= 0x0213; break;
94 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr
, size
, npages
));
97 rotor
= iounit
->rotor
[j
- 1];
98 limit
= iounit
->limit
[j
];
100 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
101 if (scan
+ npages
> limit
) {
102 if (limit
!= rotor
) {
104 scan
= iounit
->limit
[j
- 1];
109 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr
, size
);
112 for (k
= 1, scan
++; k
< npages
; k
++)
113 if (test_bit(scan
++, iounit
->bmap
))
115 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
117 iopte
= MKIOPTE(__pa(vaddr
& PAGE_MASK
));
118 vaddr
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
) + (vaddr
& ~PAGE_MASK
);
119 for (k
= 0; k
< npages
; k
++, iopte
= __iopte(iopte_val(iopte
) + 0x100), scan
++) {
120 set_bit(scan
, iounit
->bmap
);
121 iounit
->page_table
[scan
] = iopte
;
123 IOD(("%08lx\n", vaddr
));
127 static __u32
iounit_get_scsi_one(char *vaddr
, unsigned long len
, struct sbus_bus
*sbus
)
129 unsigned long ret
, flags
;
130 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
132 spin_lock_irqsave(&iounit
->lock
, flags
);
133 ret
= iounit_get_area(iounit
, (unsigned long)vaddr
, len
);
134 spin_unlock_irqrestore(&iounit
->lock
, flags
);
138 static void iounit_get_scsi_sgl(struct scatterlist
*sg
, int sz
, struct sbus_bus
*sbus
)
141 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
143 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
144 spin_lock_irqsave(&iounit
->lock
, flags
);
147 sg
[sz
].dvma_address
= iounit_get_area(iounit
, (unsigned long)page_address(sg
[sz
].page
) + sg
[sz
].offset
, sg
[sz
].length
);
148 sg
[sz
].dvma_length
= sg
[sz
].length
;
150 spin_unlock_irqrestore(&iounit
->lock
, flags
);
153 static void iounit_release_scsi_one(__u32 vaddr
, unsigned long len
, struct sbus_bus
*sbus
)
156 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
158 spin_lock_irqsave(&iounit
->lock
, flags
);
159 len
= ((vaddr
& ~PAGE_MASK
) + len
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
160 vaddr
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
161 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
162 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
163 clear_bit(vaddr
, iounit
->bmap
);
164 spin_unlock_irqrestore(&iounit
->lock
, flags
);
167 static void iounit_release_scsi_sgl(struct scatterlist
*sg
, int sz
, struct sbus_bus
*sbus
)
170 unsigned long vaddr
, len
;
171 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
173 spin_lock_irqsave(&iounit
->lock
, flags
);
176 len
= ((sg
[sz
].dvma_address
& ~PAGE_MASK
) + sg
[sz
].length
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
177 vaddr
= (sg
[sz
].dvma_address
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
178 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
179 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
180 clear_bit(vaddr
, iounit
->bmap
);
182 spin_unlock_irqrestore(&iounit
->lock
, flags
);
186 static int iounit_map_dma_area(dma_addr_t
*pba
, unsigned long va
, __u32 addr
, int len
)
188 unsigned long page
, end
;
191 struct sbus_bus
*sbus
;
195 dvma_prot
= __pgprot(SRMMU_CACHE
| SRMMU_ET_PTE
| SRMMU_PRIV
);
196 end
= PAGE_ALIGN((addr
+ len
));
205 pgdp
= pgd_offset(&init_mm
, addr
);
206 pmdp
= pmd_offset(pgdp
, addr
);
207 ptep
= pte_offset_map(pmdp
, addr
);
209 set_pte(ptep
, mk_pte(virt_to_page(page
), dvma_prot
));
211 i
= ((addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
);
213 for_each_sbus(sbus
) {
214 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
216 iopte
= (iopte_t
*)(iounit
->page_table
+ i
);
217 *iopte
= MKIOPTE(__pa(page
));
229 static void iounit_unmap_dma_area(unsigned long addr
, int len
)
231 /* XXX Somebody please fill this in */
234 /* XXX We do not pass sbus device here, bad. */
235 static struct page
*iounit_translate_dvma(unsigned long addr
)
237 struct sbus_bus
*sbus
= sbus_root
; /* They are all the same */
238 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
242 i
= ((addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
);
243 iopte
= (iopte_t
*)(iounit
->page_table
+ i
);
244 return pfn_to_page(iopte_val(*iopte
) >> (PAGE_SHIFT
-4)); /* XXX sun4d guru, help */
248 static char *iounit_lockarea(char *vaddr
, unsigned long len
)
250 /* FIXME: Write this */
254 static void iounit_unlockarea(char *vaddr
, unsigned long len
)
256 /* FIXME: Write this */
259 void __init
ld_mmu_iounit(void)
261 BTFIXUPSET_CALL(mmu_lockarea
, iounit_lockarea
, BTFIXUPCALL_RETO0
);
262 BTFIXUPSET_CALL(mmu_unlockarea
, iounit_unlockarea
, BTFIXUPCALL_NOP
);
264 BTFIXUPSET_CALL(mmu_get_scsi_one
, iounit_get_scsi_one
, BTFIXUPCALL_NORM
);
265 BTFIXUPSET_CALL(mmu_get_scsi_sgl
, iounit_get_scsi_sgl
, BTFIXUPCALL_NORM
);
266 BTFIXUPSET_CALL(mmu_release_scsi_one
, iounit_release_scsi_one
, BTFIXUPCALL_NORM
);
267 BTFIXUPSET_CALL(mmu_release_scsi_sgl
, iounit_release_scsi_sgl
, BTFIXUPCALL_NORM
);
270 BTFIXUPSET_CALL(mmu_map_dma_area
, iounit_map_dma_area
, BTFIXUPCALL_NORM
);
271 BTFIXUPSET_CALL(mmu_unmap_dma_area
, iounit_unmap_dma_area
, BTFIXUPCALL_NORM
);
272 BTFIXUPSET_CALL(mmu_translate_dvma
, iounit_translate_dvma
, BTFIXUPCALL_NORM
);
276 __u32
iounit_map_dma_init(struct sbus_bus
*sbus
, int size
)
279 unsigned long rotor
, scan
, limit
;
282 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
284 npages
= (size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
286 spin_lock_irqsave(&iounit
->lock
, flags
);
288 rotor
= iounit
->rotor
[j
- 1];
289 limit
= iounit
->limit
[j
];
291 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
292 if (scan
+ npages
> limit
) {
293 if (limit
!= rotor
) {
295 scan
= iounit
->limit
[j
- 1];
300 panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size
);
303 for (k
= 1, scan
++; k
< npages
; k
++)
304 if (test_bit(scan
++, iounit
->bmap
))
306 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
308 ret
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
);
309 for (k
= 0; k
< npages
; k
++, scan
++)
310 set_bit(scan
, iounit
->bmap
);
311 spin_unlock_irqrestore(&iounit
->lock
, flags
);
315 __u32
iounit_map_dma_page(__u32 vaddr
, void *addr
, struct sbus_bus
*sbus
)
317 int scan
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
318 struct iounit_struct
*iounit
= sbus
->ofdev
.dev
.archdata
.iommu
;
320 iounit
->page_table
[scan
] = MKIOPTE(__pa(((unsigned long)addr
) & PAGE_MASK
));
321 return vaddr
+ (((unsigned long)addr
) & ~PAGE_MASK
);