[POWERPC] QEIC: Implement pluggable handlers, fix MPIC cascading
[pv_ops_mirror.git] / arch / sparc / mm / io-unit.c
blob7c89893b1fe8789e71a05694660540ac943bb4c8
1 /* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
2 * io-unit.c: IO-UNIT specific routines for memory management.
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/mm.h>
12 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
13 #include <linux/bitops.h>
15 #include <asm/scatterlist.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 #include <asm/sbus.h>
19 #include <asm/io.h>
20 #include <asm/io-unit.h>
21 #include <asm/mxcc.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/dma.h>
25 #include <asm/oplib.h>
27 /* #define IOUNIT_DEBUG */
28 #ifdef IOUNIT_DEBUG
29 #define IOD(x) printk(x)
30 #else
31 #define IOD(x) do { } while (0)
32 #endif
34 #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
35 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
37 void __init
38 iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
40 iopte_t *xpt, *xptend;
41 struct iounit_struct *iounit;
42 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
43 struct resource r;
45 iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
46 if (!iounit) {
47 prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
48 prom_halt();
51 iounit->limit[0] = IOUNIT_BMAP1_START;
52 iounit->limit[1] = IOUNIT_BMAP2_START;
53 iounit->limit[2] = IOUNIT_BMAPM_START;
54 iounit->limit[3] = IOUNIT_BMAPM_END;
55 iounit->rotor[1] = IOUNIT_BMAP2_START;
56 iounit->rotor[2] = IOUNIT_BMAPM_START;
58 xpt = NULL;
59 if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
60 sizeof(iommu_promregs)) != -1) {
61 prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
62 memset(&r, 0, sizeof(r));
63 r.flags = iommu_promregs[2].which_io;
64 r.start = iommu_promregs[2].phys_addr;
65 xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
67 if(!xpt) panic("Cannot map External Page Table.");
69 sbus->ofdev.dev.archdata.iommu = iounit;
70 iounit->page_table = xpt;
71 spin_lock_init(&iounit->lock);
73 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
74 xpt < xptend;)
75 iopte_val(*xpt++) = 0;
78 /* One has to hold iounit->lock to call this */
79 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
81 int i, j, k, npages;
82 unsigned long rotor, scan, limit;
83 iopte_t iopte;
85 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
87 /* A tiny bit of magic ingredience :) */
88 switch (npages) {
89 case 1: i = 0x0231; break;
90 case 2: i = 0x0132; break;
91 default: i = 0x0213; break;
94 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
96 next: j = (i & 15);
97 rotor = iounit->rotor[j - 1];
98 limit = iounit->limit[j];
99 scan = rotor;
100 nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
101 if (scan + npages > limit) {
102 if (limit != rotor) {
103 limit = rotor;
104 scan = iounit->limit[j - 1];
105 goto nexti;
107 i >>= 4;
108 if (!(i & 15))
109 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
110 goto next;
112 for (k = 1, scan++; k < npages; k++)
113 if (test_bit(scan++, iounit->bmap))
114 goto nexti;
115 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
116 scan -= npages;
117 iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
118 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
119 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
120 set_bit(scan, iounit->bmap);
121 iounit->page_table[scan] = iopte;
123 IOD(("%08lx\n", vaddr));
124 return vaddr;
127 static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
129 unsigned long ret, flags;
130 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
132 spin_lock_irqsave(&iounit->lock, flags);
133 ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
134 spin_unlock_irqrestore(&iounit->lock, flags);
135 return ret;
138 static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
140 unsigned long flags;
141 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
143 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
144 spin_lock_irqsave(&iounit->lock, flags);
145 while (sz != 0) {
146 --sz;
147 sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
148 sg[sz].dvma_length = sg[sz].length;
150 spin_unlock_irqrestore(&iounit->lock, flags);
153 static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
155 unsigned long flags;
156 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
158 spin_lock_irqsave(&iounit->lock, flags);
159 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
160 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
161 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
162 for (len += vaddr; vaddr < len; vaddr++)
163 clear_bit(vaddr, iounit->bmap);
164 spin_unlock_irqrestore(&iounit->lock, flags);
167 static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
169 unsigned long flags;
170 unsigned long vaddr, len;
171 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
173 spin_lock_irqsave(&iounit->lock, flags);
174 while (sz != 0) {
175 --sz;
176 len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
177 vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
178 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
179 for (len += vaddr; vaddr < len; vaddr++)
180 clear_bit(vaddr, iounit->bmap);
182 spin_unlock_irqrestore(&iounit->lock, flags);
185 #ifdef CONFIG_SBUS
186 static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
188 unsigned long page, end;
189 pgprot_t dvma_prot;
190 iopte_t *iopte;
191 struct sbus_bus *sbus;
193 *pba = addr;
195 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
196 end = PAGE_ALIGN((addr + len));
197 while(addr < end) {
198 page = va;
200 pgd_t *pgdp;
201 pmd_t *pmdp;
202 pte_t *ptep;
203 long i;
205 pgdp = pgd_offset(&init_mm, addr);
206 pmdp = pmd_offset(pgdp, addr);
207 ptep = pte_offset_map(pmdp, addr);
209 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
211 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
213 for_each_sbus(sbus) {
214 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
216 iopte = (iopte_t *)(iounit->page_table + i);
217 *iopte = MKIOPTE(__pa(page));
220 addr += PAGE_SIZE;
221 va += PAGE_SIZE;
223 flush_cache_all();
224 flush_tlb_all();
226 return 0;
229 static void iounit_unmap_dma_area(unsigned long addr, int len)
231 /* XXX Somebody please fill this in */
234 /* XXX We do not pass sbus device here, bad. */
235 static struct page *iounit_translate_dvma(unsigned long addr)
237 struct sbus_bus *sbus = sbus_root; /* They are all the same */
238 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
239 int i;
240 iopte_t *iopte;
242 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
243 iopte = (iopte_t *)(iounit->page_table + i);
244 return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
246 #endif
248 static char *iounit_lockarea(char *vaddr, unsigned long len)
250 /* FIXME: Write this */
251 return vaddr;
254 static void iounit_unlockarea(char *vaddr, unsigned long len)
256 /* FIXME: Write this */
259 void __init ld_mmu_iounit(void)
261 BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
262 BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
264 BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
265 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
266 BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
267 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
269 #ifdef CONFIG_SBUS
270 BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
271 BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
272 BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
273 #endif
276 __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
278 int i, j, k, npages;
279 unsigned long rotor, scan, limit;
280 unsigned long flags;
281 __u32 ret;
282 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
284 npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
285 i = 0x0213;
286 spin_lock_irqsave(&iounit->lock, flags);
287 next: j = (i & 15);
288 rotor = iounit->rotor[j - 1];
289 limit = iounit->limit[j];
290 scan = rotor;
291 nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
292 if (scan + npages > limit) {
293 if (limit != rotor) {
294 limit = rotor;
295 scan = iounit->limit[j - 1];
296 goto nexti;
298 i >>= 4;
299 if (!(i & 15))
300 panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
301 goto next;
303 for (k = 1, scan++; k < npages; k++)
304 if (test_bit(scan++, iounit->bmap))
305 goto nexti;
306 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
307 scan -= npages;
308 ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
309 for (k = 0; k < npages; k++, scan++)
310 set_bit(scan, iounit->bmap);
311 spin_unlock_irqrestore(&iounit->lock, flags);
312 return ret;
315 __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
317 int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
318 struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
320 iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
321 return vaddr + (((unsigned long)addr) & ~PAGE_MASK);