1 /* $Id: srmmu.c,v 1.192 1999/09/10 10:40:40 davem Exp $
2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/config.h>
11 #include <linux/kernel.h>
13 #include <linux/malloc.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/init.h>
17 #include <linux/blk.h>
18 #include <linux/spinlock.h>
21 #include <asm/pgtable.h>
23 #include <asm/kdebug.h>
24 #include <asm/vaddrs.h>
25 #include <asm/traps.h>
28 #include <asm/cache.h>
29 #include <asm/oplib.h>
33 #include <asm/a.out.h>
34 #include <asm/mmu_context.h>
35 #include <asm/io-unit.h>
37 /* Now the cpu specific definitions. */
38 #include <asm/viking.h>
41 #include <asm/tsunami.h>
42 #include <asm/swift.h>
43 #include <asm/turbosparc.h>
45 #include <asm/btfixup.h>
47 /* #define DEBUG_MAP_KERNEL */
48 /* #define PAGESKIP_DEBUG */
50 enum mbus_module srmmu_modtype
;
51 unsigned int hwbug_bitmask
;
56 extern unsigned long sparc_iobase_vaddr
;
59 #define FLUSH_BEGIN(mm)
62 #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
66 static int phys_mem_contig
;
67 BTFIXUPDEF_SETHI(page_contig_offset
)
69 BTFIXUPDEF_CALL(void, ctxd_set
, ctxd_t
*, pgd_t
*)
70 BTFIXUPDEF_CALL(void, pmd_set
, pmd_t
*, pte_t
*)
72 #define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp)
73 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
75 BTFIXUPDEF_CALL(void, flush_page_for_dma
, unsigned long)
76 BTFIXUPDEF_CALL(void, flush_chunk
, unsigned long)
78 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
79 int flush_page_for_dma_global
= 1;
80 #define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk)
82 BTFIXUPDEF_CALL(void, local_flush_page_for_dma
, unsigned long)
84 #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
87 static struct srmmu_stats
{
96 ctxd_t
*srmmu_ctx_table_phys
;
97 ctxd_t
*srmmu_context_table
;
99 /* Don't change this without changing access to this
100 * in arch/sparc/mm/viking.S
102 static struct srmmu_trans
{
106 } srmmu_map
[SPARC_PHYS_BANKS
];
108 #define SRMMU_HASHSZ 256
110 /* Not static, viking.S uses it. */
111 unsigned long srmmu_v2p_hash
[SRMMU_HASHSZ
];
112 static unsigned long srmmu_p2v_hash
[SRMMU_HASHSZ
];
114 #define srmmu_ahashfn(addr) ((addr) >> 24)
116 int viking_mxcc_present
= 0;
117 static spinlock_t srmmu_context_spinlock
= SPIN_LOCK_UNLOCKED
;
119 /* Physical memory can be _very_ non-contiguous on the sun4m, especially
120 * the SS10/20 class machines and with the latest openprom revisions.
121 * So we have to do a quick lookup.
122 * We use the same for SS1000/SC2000 as a fall back, when phys memory is
125 static inline unsigned long srmmu_v2p(unsigned long vaddr
)
127 unsigned long off
= srmmu_v2p_hash
[srmmu_ahashfn(vaddr
)];
129 return (vaddr
+ off
);
132 static inline unsigned long srmmu_p2v(unsigned long paddr
)
134 unsigned long off
= srmmu_p2v_hash
[srmmu_ahashfn(paddr
)];
136 if (off
!= 0xffffffffUL
)
137 return (paddr
- off
);
142 /* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case
143 * as a special case to make things faster.
145 /* FIXME: gcc is stupid here and generates very very bad code in this
146 * heavily used routine. So we help it a bit. */
147 static inline unsigned long srmmu_c_v2p(unsigned long vaddr
)
149 #if KERNBASE != 0xf0000000
150 if (vaddr
>= KERNBASE
) return vaddr
- KERNBASE
;
151 return vaddr
- BTFIXUP_SETHI(page_contig_offset
);
153 register unsigned long kernbase
;
155 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase
));
156 return vaddr
- ((vaddr
>= kernbase
) ? kernbase
: BTFIXUP_SETHI(page_contig_offset
));
160 static inline unsigned long srmmu_c_p2v(unsigned long paddr
)
162 #if KERNBASE != 0xf0000000
163 if (paddr
< (0xfd000000 - KERNBASE
)) return paddr
+ KERNBASE
;
164 return (paddr
+ BTFIXUP_SETHI(page_contig_offset
));
166 register unsigned long kernbase
;
167 register unsigned long limit
;
169 __asm__ ("sethi %%hi(0x0d000000), %0" : "=r"(limit
));
170 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase
));
172 return paddr
+ ((paddr
< limit
) ? kernbase
: BTFIXUP_SETHI(page_contig_offset
));
176 /* On boxes where there is no lots_of_ram, KERNBASE is mapped to PA<0> and highest
177 PA is below 0x0d000000, we can optimize even more :) */
178 static inline unsigned long srmmu_s_v2p(unsigned long vaddr
)
180 return vaddr
- PAGE_OFFSET
;
183 static inline unsigned long srmmu_s_p2v(unsigned long paddr
)
185 return paddr
+ PAGE_OFFSET
;
188 /* In general all page table modifications should use the V8 atomic
189 * swap instruction. This insures the mmu and the cpu are in sync
190 * with respect to ref/mod bits in the page tables.
192 static inline unsigned long srmmu_swap(unsigned long *addr
, unsigned long value
)
194 __asm__
__volatile__("swap [%2], %0" : "=&r" (value
) : "0" (value
), "r" (addr
));
198 /* Functions really use this, not srmmu_swap directly. */
199 #define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry))
201 #ifdef PAGESKIP_DEBUG
202 #define PGSKIP_DEBUG(from,to) prom_printf("PG_skip %ld->%ld\n", (long)(from), (long)(to)); printk("PG_skip %ld->%ld\n", (long)(from), (long)(to))
204 #define PGSKIP_DEBUG(from,to) do { } while (0)
207 void __init
srmmu_frob_mem_map(unsigned long start_mem
)
209 unsigned long bank_start
, bank_end
= 0;
213 /* First, mark all pages as invalid. */
214 for(addr
= PAGE_OFFSET
; MAP_NR(addr
) < max_mapnr
; addr
+= PAGE_SIZE
)
215 mem_map
[MAP_NR(addr
)].flags
|= (1<<PG_reserved
);
217 /* Next, pg[0-3] is sun4c cruft, so we can free it... */
218 mem_map
[MAP_NR(pg0
)].flags
&= ~(1<<PG_reserved
);
219 mem_map
[MAP_NR(pg1
)].flags
&= ~(1<<PG_reserved
);
220 mem_map
[MAP_NR(pg2
)].flags
&= ~(1<<PG_reserved
);
221 mem_map
[MAP_NR(pg3
)].flags
&= ~(1<<PG_reserved
);
223 start_mem
= PAGE_ALIGN(start_mem
);
224 for(i
= 0; srmmu_map
[i
].size
; i
++) {
225 bank_start
= srmmu_map
[i
].vbase
;
227 /* Making a one or two pages PG_skip holes
228 * is not necessary. We add one more because
229 * we must set the PG_skip flag on the first
230 * two mem_map[] entries for the hole. Go and
231 * see the mm/filemap.c:shrink_mmap() loop for
234 if (i
&& bank_start
- bank_end
> 3 * PAGE_SIZE
) {
235 mem_map
[MAP_NR(bank_end
)].flags
|= (1<<PG_skip
);
236 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
+ MAP_NR(bank_start
);
237 mem_map
[MAP_NR(bank_end
)+1UL].flags
|= (1<<PG_skip
);
238 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
+ MAP_NR(bank_start
);
239 PGSKIP_DEBUG(MAP_NR(bank_end
), MAP_NR(bank_start
));
240 if (bank_end
> KERNBASE
&& bank_start
< KERNBASE
) {
241 mem_map
[0].flags
|= (1<<PG_skip
);
242 mem_map
[0].next_hash
= mem_map
+ MAP_NR(bank_start
);
243 mem_map
[1].flags
|= (1<<PG_skip
);
244 mem_map
[1].next_hash
= mem_map
+ MAP_NR(bank_start
);
245 PGSKIP_DEBUG(0, MAP_NR(bank_start
));
249 bank_end
= bank_start
+ srmmu_map
[i
].size
;
250 while(bank_start
< bank_end
) {
251 set_bit(MAP_NR(bank_start
) >> 8, sparc_valid_addr_bitmap
);
252 if((bank_start
>= KERNBASE
) &&
253 (bank_start
< start_mem
)) {
254 bank_start
+= PAGE_SIZE
;
257 mem_map
[MAP_NR(bank_start
)].flags
&= ~(1<<PG_reserved
);
258 bank_start
+= PAGE_SIZE
;
261 if (bank_end
== 0xfd000000)
262 bank_end
= PAGE_OFFSET
;
265 if (bank_end
< KERNBASE
) {
266 mem_map
[MAP_NR(bank_end
)].flags
|= (1<<PG_skip
);
267 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
+ MAP_NR(KERNBASE
);
268 mem_map
[MAP_NR(bank_end
)+1UL].flags
|= (1<<PG_skip
);
269 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
+ MAP_NR(KERNBASE
);
270 PGSKIP_DEBUG(MAP_NR(bank_end
), MAP_NR(KERNBASE
));
271 } else if (MAP_NR(bank_end
) < max_mapnr
) {
272 mem_map
[MAP_NR(bank_end
)].flags
|= (1<<PG_skip
);
273 mem_map
[MAP_NR(bank_end
)+1UL].flags
|= (1<<PG_skip
);
274 if (mem_map
[0].flags
& (1 << PG_skip
)) {
275 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
[0].next_hash
;
276 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
[0].next_hash
;
277 PGSKIP_DEBUG(MAP_NR(bank_end
), mem_map
[0].next_hash
- mem_map
);
279 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
;
280 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
;
281 PGSKIP_DEBUG(MAP_NR(bank_end
), 0);
286 /* The very generic SRMMU page table operations. */
287 static inline int srmmu_device_memory(unsigned long x
)
289 return ((x
& 0xF0000000) != 0);
292 static unsigned long srmmu_pgd_page(pgd_t pgd
)
293 { return srmmu_device_memory(pgd_val(pgd
))?~0:srmmu_p2v((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4); }
295 static unsigned long srmmu_pmd_page(pmd_t pmd
)
296 { return srmmu_device_memory(pmd_val(pmd
))?~0:srmmu_p2v((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4); }
298 static unsigned long srmmu_pte_page(pte_t pte
)
299 { return srmmu_device_memory(pte_val(pte
))?~0:srmmu_p2v((pte_val(pte
) & SRMMU_PTE_PMASK
) << 4); }
301 static unsigned long srmmu_c_pgd_page(pgd_t pgd
)
302 { return srmmu_device_memory(pgd_val(pgd
))?~0:srmmu_c_p2v((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4); }
304 static unsigned long srmmu_c_pmd_page(pmd_t pmd
)
305 { return srmmu_device_memory(pmd_val(pmd
))?~0:srmmu_c_p2v((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4); }
307 static unsigned long srmmu_c_pte_page(pte_t pte
)
308 { return srmmu_device_memory(pte_val(pte
))?~0:srmmu_c_p2v((pte_val(pte
) & SRMMU_PTE_PMASK
) << 4); }
310 static unsigned long srmmu_s_pgd_page(pgd_t pgd
)
311 { return srmmu_device_memory(pgd_val(pgd
))?~0:srmmu_s_p2v((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4); }
313 static unsigned long srmmu_s_pmd_page(pmd_t pmd
)
314 { return srmmu_device_memory(pmd_val(pmd
))?~0:srmmu_s_p2v((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4); }
316 static unsigned long srmmu_s_pte_page(pte_t pte
)
317 { return srmmu_device_memory(pte_val(pte
))?~0:srmmu_s_p2v((pte_val(pte
) & SRMMU_PTE_PMASK
) << 4); }
319 static inline int srmmu_pte_none(pte_t pte
)
320 { return !(pte_val(pte
) & 0xFFFFFFF); }
321 static inline int srmmu_pte_present(pte_t pte
)
322 { return ((pte_val(pte
) & SRMMU_ET_MASK
) == SRMMU_ET_PTE
); }
324 static inline void srmmu_pte_clear(pte_t
*ptep
) { set_pte(ptep
, __pte(0)); }
326 static inline int srmmu_pmd_none(pmd_t pmd
)
327 { return !(pmd_val(pmd
) & 0xFFFFFFF); }
328 static inline int srmmu_pmd_bad(pmd_t pmd
)
329 { return (pmd_val(pmd
) & SRMMU_ET_MASK
) != SRMMU_ET_PTD
; }
331 static inline int srmmu_pmd_present(pmd_t pmd
)
332 { return ((pmd_val(pmd
) & SRMMU_ET_MASK
) == SRMMU_ET_PTD
); }
334 static inline void srmmu_pmd_clear(pmd_t
*pmdp
) { set_pte((pte_t
*)pmdp
, __pte(0)); }
336 static inline int srmmu_pgd_none(pgd_t pgd
)
337 { return !(pgd_val(pgd
) & 0xFFFFFFF); }
339 static inline int srmmu_pgd_bad(pgd_t pgd
)
340 { return (pgd_val(pgd
) & SRMMU_ET_MASK
) != SRMMU_ET_PTD
; }
342 static inline int srmmu_pgd_present(pgd_t pgd
)
343 { return ((pgd_val(pgd
) & SRMMU_ET_MASK
) == SRMMU_ET_PTD
); }
345 static inline void srmmu_pgd_clear(pgd_t
* pgdp
) { set_pte((pte_t
*)pgdp
, __pte(0)); }
347 static inline int srmmu_pte_write(pte_t pte
) { return pte_val(pte
) & SRMMU_WRITE
; }
348 static inline int srmmu_pte_dirty(pte_t pte
) { return pte_val(pte
) & SRMMU_DIRTY
; }
349 static inline int srmmu_pte_young(pte_t pte
) { return pte_val(pte
) & SRMMU_REF
; }
351 static inline pte_t
srmmu_pte_wrprotect(pte_t pte
) { return __pte(pte_val(pte
) & ~SRMMU_WRITE
);}
352 static inline pte_t
srmmu_pte_mkclean(pte_t pte
) { return __pte(pte_val(pte
) & ~SRMMU_DIRTY
);}
353 static inline pte_t
srmmu_pte_mkold(pte_t pte
) { return __pte(pte_val(pte
) & ~SRMMU_REF
);}
354 static inline pte_t
srmmu_pte_mkwrite(pte_t pte
) { return __pte(pte_val(pte
) | SRMMU_WRITE
);}
355 static inline pte_t
srmmu_pte_mkdirty(pte_t pte
) { return __pte(pte_val(pte
) | SRMMU_DIRTY
);}
356 static inline pte_t
srmmu_pte_mkyoung(pte_t pte
) { return __pte(pte_val(pte
) | SRMMU_REF
);}
359 * Conversion functions: convert a page and protection to a page entry,
360 * and a page entry and page directory to the page they refer to.
362 static pte_t
srmmu_mk_pte(unsigned long page
, pgprot_t pgprot
)
363 { return __pte(((srmmu_v2p(page
)) >> 4) | pgprot_val(pgprot
)); }
365 static pte_t
srmmu_c_mk_pte(unsigned long page
, pgprot_t pgprot
)
366 { return __pte(((srmmu_c_v2p(page
)) >> 4) | pgprot_val(pgprot
)); }
368 static pte_t
srmmu_s_mk_pte(unsigned long page
, pgprot_t pgprot
)
369 { return __pte(((srmmu_s_v2p(page
)) >> 4) | pgprot_val(pgprot
)); }
371 static pte_t
srmmu_mk_pte_phys(unsigned long page
, pgprot_t pgprot
)
372 { return __pte(((page
) >> 4) | pgprot_val(pgprot
)); }
374 static pte_t
srmmu_mk_pte_io(unsigned long page
, pgprot_t pgprot
, int space
)
376 return __pte(((page
) >> 4) | (space
<< 28) | pgprot_val(pgprot
));
379 static void srmmu_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
381 set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) pgdp
) >> 4)));
384 static void srmmu_pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
386 set_pte((pte_t
*)pgdp
, (SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) pmdp
) >> 4)));
389 static void srmmu_pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
391 set_pte((pte_t
*)pmdp
, (SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) ptep
) >> 4)));
394 static void srmmu_c_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
396 set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (srmmu_c_v2p((unsigned long) pgdp
) >> 4)));
399 static void srmmu_c_pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
401 set_pte((pte_t
*)pgdp
, (SRMMU_ET_PTD
| (srmmu_c_v2p((unsigned long) pmdp
) >> 4)));
404 static void srmmu_c_pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
406 set_pte((pte_t
*)pmdp
, (SRMMU_ET_PTD
| (srmmu_c_v2p((unsigned long) ptep
) >> 4)));
409 static void srmmu_s_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
411 set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (srmmu_s_v2p((unsigned long) pgdp
) >> 4)));
414 static void srmmu_s_pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
416 set_pte((pte_t
*)pgdp
, (SRMMU_ET_PTD
| (srmmu_s_v2p((unsigned long) pmdp
) >> 4)));
419 static void srmmu_s_pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
421 set_pte((pte_t
*)pmdp
, (SRMMU_ET_PTD
| (srmmu_s_v2p((unsigned long) ptep
) >> 4)));
424 static inline pte_t
srmmu_pte_modify(pte_t pte
, pgprot_t newprot
)
426 return __pte((pte_val(pte
) & SRMMU_CHG_MASK
) | pgprot_val(newprot
));
429 /* to find an entry in a top-level page table... */
430 static inline pgd_t
*srmmu_pgd_offset(struct mm_struct
* mm
, unsigned long address
)
432 return mm
->pgd
+ (address
>> SRMMU_PGDIR_SHIFT
);
435 /* Find an entry in the second-level page table.. */
436 static inline pmd_t
*srmmu_pmd_offset(pgd_t
* dir
, unsigned long address
)
438 return (pmd_t
*) srmmu_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
441 /* Find an entry in the third-level page table.. */
442 static inline pte_t
*srmmu_pte_offset(pmd_t
* dir
, unsigned long address
)
444 return (pte_t
*) srmmu_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
447 static inline pmd_t
*srmmu_c_pmd_offset(pgd_t
* dir
, unsigned long address
)
449 return (pmd_t
*) srmmu_c_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
452 static inline pte_t
*srmmu_c_pte_offset(pmd_t
* dir
, unsigned long address
)
454 return (pte_t
*) srmmu_c_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
457 static inline pmd_t
*srmmu_s_pmd_offset(pgd_t
* dir
, unsigned long address
)
459 return (pmd_t
*) srmmu_s_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
462 static inline pte_t
*srmmu_s_pte_offset(pmd_t
* dir
, unsigned long address
)
464 return (pte_t
*) srmmu_s_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
467 /* This must update the context table entry for this process. */
468 static void srmmu_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
470 if(tsk
->mm
->context
!= NO_CONTEXT
&&
471 tsk
->mm
->pgd
!= pgdp
) {
472 flush_cache_mm(tsk
->mm
);
473 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
474 flush_tlb_mm(tsk
->mm
);
478 static inline pte_t
*srmmu_get_pte_fast(void)
482 spin_lock(&pte_spinlock
);
483 if ((ret
= (struct page
*)pte_quicklist
) != NULL
) {
484 unsigned int mask
= (unsigned int)ret
->pprev_hash
;
485 unsigned int tmp
, off
;
488 for (tmp
= 0x001, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= 256);
490 for (tmp
= 0x100, off
= 2048; (mask
& tmp
) == 0; tmp
<<= 1, off
+= 256);
491 (unsigned int)ret
->pprev_hash
= mask
& ~tmp
;
493 pte_quicklist
= (unsigned long *)ret
->next_hash
;
494 ret
= (struct page
*)(page_address(ret
) + off
);
495 pgtable_cache_size
--;
497 spin_unlock(&pte_spinlock
);
501 static inline pte_t
*srmmu_get_pte_slow(void)
506 ret
= (pte_t
*)get_free_page(GFP_KERNEL
);
508 page
= mem_map
+ MAP_NR(ret
);
509 flush_chunk((unsigned long)ret
);
510 (unsigned int)page
->pprev_hash
= 0xfffe;
511 spin_lock(&pte_spinlock
);
512 (unsigned long *)page
->next_hash
= pte_quicklist
;
513 pte_quicklist
= (unsigned long *)page
;
514 pgtable_cache_size
+= 15;
519 static inline pgd_t
*srmmu_get_pgd_fast(void)
523 spin_lock(&pgd_spinlock
);
524 if ((ret
= (struct page
*)pgd_quicklist
) != NULL
) {
525 unsigned int mask
= (unsigned int)ret
->pprev_hash
;
526 unsigned int tmp
, off
;
528 for (tmp
= 0x001, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= 1024);
529 (unsigned int)ret
->pprev_hash
= mask
& ~tmp
;
531 pgd_quicklist
= (unsigned long *)ret
->next_hash
;
532 ret
= (struct page
*)(page_address(ret
) + off
);
535 spin_unlock(&pgd_spinlock
);
539 static inline pgd_t
*srmmu_get_pgd_slow(void)
544 ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
);
546 pgd_t
*init
= pgd_offset(&init_mm
, 0);
547 memset(ret
+ (0 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
548 memcpy(ret
+ (0 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
549 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
550 memset(ret
+ (1 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
551 memcpy(ret
+ (1 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
552 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
553 memset(ret
+ (2 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
554 memcpy(ret
+ (2 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
555 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
556 memset(ret
+ (3 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
557 memcpy(ret
+ (3 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
558 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
559 page
= mem_map
+ MAP_NR(ret
);
560 flush_chunk((unsigned long)ret
);
561 (unsigned int)page
->pprev_hash
= 0xe;
562 spin_lock(&pgd_spinlock
);
563 (unsigned long *)page
->next_hash
= pgd_quicklist
;
564 pgd_quicklist
= (unsigned long *)page
;
566 spin_unlock(&pgd_spinlock
);
571 static void srmmu_free_pte_slow(pte_t
*pte
)
575 static void srmmu_free_pgd_slow(pgd_t
*pgd
)
579 static inline void srmmu_pte_free(pte_t
*pte
)
581 struct page
*page
= mem_map
+ MAP_NR(pte
);
583 spin_lock(&pte_spinlock
);
584 if (!page
->pprev_hash
) {
585 (unsigned long *)page
->next_hash
= pte_quicklist
;
586 pte_quicklist
= (unsigned long *)page
;
588 (unsigned int)page
->pprev_hash
|= (1 << ((((unsigned long)pte
) >> 8) & 15));
589 pgtable_cache_size
++;
590 spin_unlock(&pte_spinlock
);
593 static pte_t
*srmmu_pte_alloc(pmd_t
* pmd
, unsigned long address
)
595 address
= (address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1);
596 if(srmmu_pmd_none(*pmd
)) {
597 pte_t
*page
= srmmu_get_pte_fast();
601 return page
+ address
;
603 page
= srmmu_get_pte_slow();
604 if(srmmu_pmd_none(*pmd
)) {
606 spin_unlock(&pte_spinlock
);
608 return page
+ address
;
610 pmd_set(pmd
, BAD_PAGETABLE
);
614 (unsigned int)(((struct page
*)pte_quicklist
)->pprev_hash
) = 0xffff;
615 pgtable_cache_size
++;
616 spin_unlock(&pte_spinlock
);
619 if(srmmu_pmd_bad(*pmd
)) {
620 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
621 pmd_set(pmd
, BAD_PAGETABLE
);
624 return ((pte_t
*) pmd_page(*pmd
)) + address
;
627 /* Real three-level page tables on SRMMU. */
628 static void srmmu_pmd_free(pmd_t
* pmd
)
630 return srmmu_pte_free((pte_t
*)pmd
);
633 static pmd_t
*srmmu_pmd_alloc(pgd_t
* pgd
, unsigned long address
)
635 address
= (address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1);
636 if(srmmu_pgd_none(*pgd
)) {
637 pmd_t
*page
= (pmd_t
*)srmmu_get_pte_fast();
641 return page
+ address
;
643 page
= (pmd_t
*)srmmu_get_pte_slow();
644 if(srmmu_pgd_none(*pgd
)) {
646 spin_unlock(&pte_spinlock
);
648 return page
+ address
;
650 pgd_set(pgd
, (pmd_t
*) BAD_PAGETABLE
);
654 (unsigned int)(((struct page
*)pte_quicklist
)->pprev_hash
) = 0xffff;
655 pgtable_cache_size
++;
656 spin_unlock(&pte_spinlock
);
659 if(srmmu_pgd_bad(*pgd
)) {
660 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd
));
661 pgd_set(pgd
, (pmd_t
*) BAD_PAGETABLE
);
664 return (pmd_t
*) pgd_page(*pgd
) + address
;
667 static void srmmu_pgd_free(pgd_t
*pgd
)
669 struct page
*page
= mem_map
+ MAP_NR(pgd
);
671 spin_lock(&pgd_spinlock
);
672 if (!page
->pprev_hash
) {
673 (unsigned long *)page
->next_hash
= pgd_quicklist
;
674 pgd_quicklist
= (unsigned long *)page
;
676 (unsigned int)page
->pprev_hash
|= (1 << ((((unsigned long)pgd
) >> 10) & 3));
678 spin_unlock(&pgd_spinlock
);
681 static pgd_t
*srmmu_pgd_alloc(void)
685 ret
= srmmu_get_pgd_fast();
687 return srmmu_get_pgd_slow();
691 static void srmmu_set_pgdir(unsigned long address
, pgd_t entry
)
693 struct task_struct
* p
;
696 read_lock(&tasklist_lock
);
700 *pgd_offset(p
->mm
,address
) = entry
;
702 read_unlock(&tasklist_lock
);
703 spin_lock(&pgd_spinlock
);
704 address
>>= SRMMU_PGDIR_SHIFT
;
705 for (page
= (struct page
*)pgd_quicklist
; page
; page
= page
->next_hash
) {
706 pgd_t
*pgd
= (pgd_t
*)page_address(page
);
707 unsigned int mask
= (unsigned int)page
->pprev_hash
;
710 pgd
[address
+ 0 * SRMMU_PTRS_PER_PGD
] = entry
;
712 pgd
[address
+ 1 * SRMMU_PTRS_PER_PGD
] = entry
;
714 pgd
[address
+ 2 * SRMMU_PTRS_PER_PGD
] = entry
;
716 pgd
[address
+ 3 * SRMMU_PTRS_PER_PGD
] = entry
;
718 flush_chunk((unsigned long)pgd
);
720 spin_unlock(&pgd_spinlock
);
723 static void srmmu_set_pte_cacheable(pte_t
*ptep
, pte_t pteval
)
725 srmmu_set_entry(ptep
, pte_val(pteval
));
728 static void srmmu_set_pte_nocache_cypress(pte_t
*ptep
, pte_t pteval
)
730 register unsigned long a
, b
, c
, d
, e
, f
, g
;
731 unsigned long line
, page
;
733 srmmu_set_entry(ptep
, pte_val(pteval
));
734 page
= ((unsigned long)ptep
) & PAGE_MASK
;
735 line
= (page
+ PAGE_SIZE
) - 0x100;
736 a
= 0x20; b
= 0x40; c
= 0x60; d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
741 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
742 "sta %%g0, [%0 + %2] %1\n\t"
743 "sta %%g0, [%0 + %3] %1\n\t"
744 "sta %%g0, [%0 + %4] %1\n\t"
745 "sta %%g0, [%0 + %5] %1\n\t"
746 "sta %%g0, [%0 + %6] %1\n\t"
747 "sta %%g0, [%0 + %7] %1\n\t"
748 "sta %%g0, [%0 + %8] %1\n\t" : :
750 "i" (ASI_M_FLUSH_PAGE
),
751 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
752 "r" (e
), "r" (f
), "r" (g
));
753 } while(line
!= page
);
756 static void srmmu_set_pte_nocache_viking(pte_t
*ptep
, pte_t pteval
)
762 set
= ((unsigned long)ptep
>> 5) & 0x7f;
763 vaddr
= (KERNBASE
+ PAGE_SIZE
) | (set
<< 5);
764 srmmu_set_entry(ptep
, pte_val(pteval
));
765 for (i
= 0; i
< 8; i
++) {
766 __asm__
__volatile__ ("ld [%0], %%g0" : : "r" (vaddr
));
771 static void srmmu_quick_kernel_fault(unsigned long address
)
774 printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
775 smp_processor_id(), address
);
778 printk("Kernel faults at addr=0x%08lx\n", address
);
779 printk("PTE=%08lx\n", srmmu_hwprobe((address
& PAGE_MASK
)));
780 die_if_kernel("SRMMU bolixed...", current
->tss
.kregs
);
784 static inline void alloc_context(struct mm_struct
*mm
)
786 struct ctx_list
*ctxp
;
788 ctxp
= ctx_free
.next
;
789 if(ctxp
!= &ctx_free
) {
790 remove_from_ctx_list(ctxp
);
791 add_to_used_ctxlist(ctxp
);
792 mm
->context
= ctxp
->ctx_number
;
796 ctxp
= ctx_used
.next
;
797 if(ctxp
->ctx_mm
== current
->mm
)
799 if(ctxp
== &ctx_used
)
800 panic("out of mmu contexts");
801 flush_cache_mm(ctxp
->ctx_mm
);
802 flush_tlb_mm(ctxp
->ctx_mm
);
803 remove_from_ctx_list(ctxp
);
804 add_to_used_ctxlist(ctxp
);
805 ctxp
->ctx_mm
->context
= NO_CONTEXT
;
807 mm
->context
= ctxp
->ctx_number
;
810 static inline void free_context(int context
)
812 struct ctx_list
*ctx_old
;
814 ctx_old
= ctx_list_pool
+ context
;
815 remove_from_ctx_list(ctx_old
);
816 add_to_free_ctxlist(ctx_old
);
820 static void srmmu_switch_to_context(struct task_struct
*tsk
)
822 if(tsk
->mm
->context
== NO_CONTEXT
) {
823 spin_lock(&srmmu_context_spinlock
);
824 alloc_context(tsk
->mm
);
825 spin_unlock(&srmmu_context_spinlock
);
826 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], tsk
->mm
->pgd
);
828 srmmu_set_context(tsk
->mm
->context
);
831 static void srmmu_init_new_context(struct mm_struct
*mm
)
833 spin_lock(&srmmu_context_spinlock
);
835 spin_unlock(&srmmu_context_spinlock
);
838 ctxd_set(&srmmu_context_table
[mm
->context
], mm
->pgd
);
841 if(mm
== current
->mm
)
842 srmmu_set_context(mm
->context
);
845 /* Low level IO area allocation on the SRMMU. */
846 void srmmu_mapioaddr(unsigned long physaddr
, unsigned long virt_addr
, int bus_type
, int rdonly
)
853 physaddr
&= PAGE_MASK
;
854 pgdp
= srmmu_pgd_offset(&init_mm
, virt_addr
);
855 pmdp
= pmd_offset(pgdp
, virt_addr
);
856 ptep
= pte_offset(pmdp
, virt_addr
);
857 tmp
= (physaddr
>> 4) | SRMMU_ET_PTE
;
859 /* I need to test whether this is consistent over all
860 * sun4m's. The bus_type represents the upper 4 bits of
861 * 36-bit physical address on the I/O space lines...
863 tmp
|= (bus_type
<< 28);
865 tmp
|= SRMMU_PRIV_RDONLY
;
868 flush_page_to_ram(virt_addr
);
869 set_pte(ptep
, __pte(tmp
));
873 void srmmu_unmapioaddr(unsigned long virt_addr
)
879 pgdp
= srmmu_pgd_offset(&init_mm
, virt_addr
);
880 pmdp
= pmd_offset(pgdp
, virt_addr
);
881 ptep
= pte_offset(pmdp
, virt_addr
);
883 /* No need to flush uncacheable page. */
884 set_pte(ptep
, mk_pte((unsigned long) EMPTY_PGE
, PAGE_SHARED
));
888 /* This is used in many routines below. */
889 #define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
891 /* On the SRMMU we do not have the problems with limited tlb entries
892 * for mapping kernel pages, so we just take things from the free page
893 * pool. As a side effect we are putting a little too much pressure
894 * on the gfp() subsystem. This setup also makes the logic of the
895 * iommu mapping code a lot easier as we can transparently handle
896 * mappings on the kernel stack without any special code as we did
899 struct task_struct
*srmmu_alloc_task_struct(void)
901 return (struct task_struct
*) __get_free_pages(GFP_KERNEL
, 1);
904 static void srmmu_free_task_struct(struct task_struct
*tsk
)
906 free_pages((unsigned long)tsk
, 1);
910 extern void tsunami_flush_cache_all(void);
911 extern void tsunami_flush_cache_mm(struct mm_struct
*mm
);
912 extern void tsunami_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
913 extern void tsunami_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
914 extern void tsunami_flush_page_to_ram(unsigned long page
);
915 extern void tsunami_flush_page_for_dma(unsigned long page
);
916 extern void tsunami_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
917 extern void tsunami_flush_chunk(unsigned long chunk
);
918 extern void tsunami_flush_tlb_all(void);
919 extern void tsunami_flush_tlb_mm(struct mm_struct
*mm
);
920 extern void tsunami_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
921 extern void tsunami_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
923 /* Workaround, until we find what's going on with Swift. When low on memory, it sometimes
924 * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/
925 * fault again on the same instruction. I really don't understand it, have checked it and contexts
926 * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj
928 static void swift_update_mmu_cache(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
930 static unsigned long last
;
932 if (last
== address
) viking_hwprobe(address
);
936 /* Swift flushes. It has the recommended SRMMU specification flushing
937 * facilities, so we can do things in a more fine grained fashion than we
938 * could on the tsunami. Let's watch out for HARDWARE BUGS...
941 static void swift_flush_cache_all(void)
943 flush_user_windows();
944 swift_idflash_clear();
947 static void swift_flush_cache_mm(struct mm_struct
*mm
)
950 flush_user_windows();
951 swift_idflash_clear();
955 static void swift_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
958 flush_user_windows();
959 swift_idflash_clear();
963 static void swift_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
965 FLUSH_BEGIN(vma
->vm_mm
)
966 flush_user_windows();
967 if(vma
->vm_flags
& VM_EXEC
)
968 swift_flush_icache();
969 swift_flush_dcache();
973 /* Not copy-back on swift. */
974 static void swift_flush_page_to_ram(unsigned long page
)
978 /* But not IO coherent either. */
979 static void swift_flush_page_for_dma(unsigned long page
)
981 swift_flush_dcache();
984 /* Again, Swift is non-snooping split I/D cache'd just like tsunami,
985 * so have to punt the icache for on-stack signal insns. Only the
986 * icache need be flushed since the dcache is write-through.
988 static void swift_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
990 swift_flush_icache();
993 static void swift_flush_chunk(unsigned long chunk
)
997 static void swift_flush_tlb_all(void)
999 srmmu_flush_whole_tlb();
1000 module_stats
.invall
++;
1003 static void swift_flush_tlb_mm(struct mm_struct
*mm
)
1006 srmmu_flush_whole_tlb();
1007 module_stats
.invmm
++;
1011 static void swift_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
1014 srmmu_flush_whole_tlb();
1015 module_stats
.invrnge
++;
1019 static void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1021 FLUSH_BEGIN(vma
->vm_mm
)
1022 srmmu_flush_whole_tlb();
1023 module_stats
.invpg
++;
1027 /* The following are all MBUS based SRMMU modules, and therefore could
1028 * be found in a multiprocessor configuration. On the whole, these
1029 * chips seems to be much more touchy about DVMA and page tables
1030 * with respect to cache coherency.
1033 /* Cypress flushes. */
1034 static void cypress_flush_cache_all(void)
1036 volatile unsigned long cypress_sucks
;
1037 unsigned long faddr
, tagval
;
1039 flush_user_windows();
1040 for(faddr
= 0; faddr
< 0x10000; faddr
+= 0x20) {
1041 __asm__
__volatile__("lda [%1 + %2] %3, %0\n\t" :
1043 "r" (faddr
), "r" (0x40000),
1044 "i" (ASI_M_DATAC_TAG
));
1046 /* If modified and valid, kick it. */
1047 if((tagval
& 0x60) == 0x60)
1048 cypress_sucks
= *(unsigned long *)(0xf0020000 + faddr
);
1052 static void cypress_flush_cache_mm(struct mm_struct
*mm
)
1054 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1055 unsigned long flags
, faddr
;
1059 flush_user_windows();
1060 __save_and_cli(flags
);
1061 octx
= srmmu_get_context();
1062 srmmu_set_context(mm
->context
);
1063 a
= 0x20; b
= 0x40; c
= 0x60;
1064 d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1066 faddr
= (0x10000 - 0x100);
1071 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1072 "sta %%g0, [%0 + %2] %1\n\t"
1073 "sta %%g0, [%0 + %3] %1\n\t"
1074 "sta %%g0, [%0 + %4] %1\n\t"
1075 "sta %%g0, [%0 + %5] %1\n\t"
1076 "sta %%g0, [%0 + %6] %1\n\t"
1077 "sta %%g0, [%0 + %7] %1\n\t"
1078 "sta %%g0, [%0 + %8] %1\n\t" : :
1079 "r" (faddr
), "i" (ASI_M_FLUSH_CTX
),
1080 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1081 "r" (e
), "r" (f
), "r" (g
));
1083 srmmu_set_context(octx
);
1084 __restore_flags(flags
);
1088 static void cypress_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
1090 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1091 unsigned long flags
, faddr
;
1095 flush_user_windows();
1096 __save_and_cli(flags
);
1097 octx
= srmmu_get_context();
1098 srmmu_set_context(mm
->context
);
1099 a
= 0x20; b
= 0x40; c
= 0x60;
1100 d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1102 start
&= SRMMU_PMD_MASK
;
1103 while(start
< end
) {
1104 faddr
= (start
+ (0x10000 - 0x100));
1109 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1110 "sta %%g0, [%0 + %2] %1\n\t"
1111 "sta %%g0, [%0 + %3] %1\n\t"
1112 "sta %%g0, [%0 + %4] %1\n\t"
1113 "sta %%g0, [%0 + %5] %1\n\t"
1114 "sta %%g0, [%0 + %6] %1\n\t"
1115 "sta %%g0, [%0 + %7] %1\n\t"
1116 "sta %%g0, [%0 + %8] %1\n\t" : :
1118 "i" (ASI_M_FLUSH_SEG
),
1119 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1120 "r" (e
), "r" (f
), "r" (g
));
1121 } while (faddr
!= start
);
1122 start
+= SRMMU_PMD_SIZE
;
1124 srmmu_set_context(octx
);
1125 __restore_flags(flags
);
1129 static void cypress_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1131 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1132 struct mm_struct
*mm
= vma
->vm_mm
;
1133 unsigned long flags
, line
;
1137 flush_user_windows();
1138 __save_and_cli(flags
);
1139 octx
= srmmu_get_context();
1140 srmmu_set_context(mm
->context
);
1141 a
= 0x20; b
= 0x40; c
= 0x60;
1142 d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1145 line
= (page
+ PAGE_SIZE
) - 0x100;
1150 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1151 "sta %%g0, [%0 + %2] %1\n\t"
1152 "sta %%g0, [%0 + %3] %1\n\t"
1153 "sta %%g0, [%0 + %4] %1\n\t"
1154 "sta %%g0, [%0 + %5] %1\n\t"
1155 "sta %%g0, [%0 + %6] %1\n\t"
1156 "sta %%g0, [%0 + %7] %1\n\t"
1157 "sta %%g0, [%0 + %8] %1\n\t" : :
1159 "i" (ASI_M_FLUSH_PAGE
),
1160 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1161 "r" (e
), "r" (f
), "r" (g
));
1162 } while(line
!= page
);
1163 srmmu_set_context(octx
);
1164 __restore_flags(flags
);
1168 /* Cypress is copy-back, at least that is how we configure it. */
1169 static void cypress_flush_page_to_ram(unsigned long page
)
1171 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1174 a
= 0x20; b
= 0x40; c
= 0x60; d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1176 line
= (page
+ PAGE_SIZE
) - 0x100;
1181 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1182 "sta %%g0, [%0 + %2] %1\n\t"
1183 "sta %%g0, [%0 + %3] %1\n\t"
1184 "sta %%g0, [%0 + %4] %1\n\t"
1185 "sta %%g0, [%0 + %5] %1\n\t"
1186 "sta %%g0, [%0 + %6] %1\n\t"
1187 "sta %%g0, [%0 + %7] %1\n\t"
1188 "sta %%g0, [%0 + %8] %1\n\t" : :
1190 "i" (ASI_M_FLUSH_PAGE
),
1191 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1192 "r" (e
), "r" (f
), "r" (g
));
1193 } while(line
!= page
);
1196 static void cypress_flush_chunk(unsigned long chunk
)
1198 cypress_flush_page_to_ram(chunk
);
1201 /* Cypress is also IO cache coherent. */
1202 static void cypress_flush_page_for_dma(unsigned long page
)
1206 /* Cypress has unified L2 VIPT, from which both instructions and data
1207 * are stored. It does not have an onboard icache of any sort, therefore
1208 * no flush is necessary.
1210 static void cypress_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1214 static void cypress_flush_tlb_all(void)
1216 srmmu_flush_whole_tlb();
1217 module_stats
.invall
++;
1220 static void cypress_flush_tlb_mm(struct mm_struct
*mm
)
1223 __asm__
__volatile__("
1229 : "r" (SRMMU_CTX_REG
), "r" (0x300), "r" (mm
->context
),
1230 "i" (ASI_M_MMUREGS
), "i" (ASI_M_FLUSH_PROBE
)
1232 module_stats
.invmm
++;
1236 static void cypress_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
1241 start
&= SRMMU_PGDIR_MASK
;
1242 size
= SRMMU_PGDIR_ALIGN(end
) - start
;
1243 __asm__
__volatile__("
1248 sta %%g0, [%2 + %3] %6
1251 : "r" (SRMMU_CTX_REG
), "r" (mm
->context
), "r" (start
| 0x200),
1252 "r" (size
), "r" (SRMMU_PGDIR_SIZE
), "i" (ASI_M_MMUREGS
),
1253 "i" (ASI_M_FLUSH_PROBE
)
1255 module_stats
.invrnge
++;
1259 static void cypress_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1261 struct mm_struct
*mm
= vma
->vm_mm
;
1264 __asm__
__volatile__("
1270 : "r" (SRMMU_CTX_REG
), "r" (mm
->context
), "r" (page
& PAGE_MASK
),
1271 "i" (ASI_M_MMUREGS
), "i" (ASI_M_FLUSH_PROBE
)
1273 module_stats
.invpg
++;
1278 extern void viking_flush_cache_all(void);
1279 extern void viking_flush_cache_mm(struct mm_struct
*mm
);
1280 extern void viking_flush_cache_range(struct mm_struct
*mm
, unsigned long start
,
1282 extern void viking_flush_cache_page(struct vm_area_struct
*vma
,
1283 unsigned long page
);
1284 extern void viking_flush_page_to_ram(unsigned long page
);
1285 extern void viking_flush_page_for_dma(unsigned long page
);
1286 extern void viking_flush_sig_insns(struct mm_struct
*mm
, unsigned long addr
);
1287 extern void viking_flush_page(unsigned long page
);
1288 extern void viking_mxcc_flush_page(unsigned long page
);
1289 extern void viking_flush_chunk(unsigned long chunk
);
1290 extern void viking_c_flush_chunk(unsigned long chunk
);
1291 extern void viking_s_flush_chunk(unsigned long chunk
);
1292 extern void viking_mxcc_flush_chunk(unsigned long chunk
);
1293 extern void viking_flush_tlb_all(void);
1294 extern void viking_flush_tlb_mm(struct mm_struct
*mm
);
1295 extern void viking_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
1297 extern void viking_flush_tlb_page(struct vm_area_struct
*vma
,
1298 unsigned long page
);
1299 extern void sun4dsmp_flush_tlb_all(void);
1300 extern void sun4dsmp_flush_tlb_mm(struct mm_struct
*mm
);
1301 extern void sun4dsmp_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
1303 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct
*vma
,
1304 unsigned long page
);
1307 extern void hypersparc_flush_cache_all(void);
1308 extern void hypersparc_flush_cache_mm(struct mm_struct
*mm
);
1309 extern void hypersparc_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
1310 extern void hypersparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
1311 extern void hypersparc_flush_page_to_ram(unsigned long page
);
1312 extern void hypersparc_flush_chunk(unsigned long chunk
);
1313 extern void hypersparc_flush_page_for_dma(unsigned long page
);
1314 extern void hypersparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
1315 extern void hypersparc_flush_tlb_all(void);
1316 extern void hypersparc_flush_tlb_mm(struct mm_struct
*mm
);
1317 extern void hypersparc_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
1318 extern void hypersparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
1319 extern void hypersparc_setup_blockops(void);
1321 static void srmmu_set_pte_nocache_hyper(pte_t
*ptep
, pte_t pteval
)
1323 unsigned long page
= ((unsigned long)ptep
) & PAGE_MASK
;
1325 srmmu_set_entry(ptep
, pte_val(pteval
));
1326 hypersparc_flush_page_to_ram(page
);
1329 static void hypersparc_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
1331 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) pgdp
) >> 4))));
1332 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
1333 hyper_flush_whole_icache();
1336 static void hypersparc_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
1338 unsigned long page
= ((unsigned long) pgdp
) & PAGE_MASK
;
1340 if(pgdp
!= swapper_pg_dir
)
1341 hypersparc_flush_page_to_ram(page
);
1343 if(tsk
->mm
->context
!= NO_CONTEXT
&&
1344 tsk
->mm
->pgd
!= pgdp
) {
1345 flush_cache_mm(tsk
->mm
);
1346 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
1347 flush_tlb_mm(tsk
->mm
);
1351 static void viking_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
1353 if(pgdp
!= swapper_pg_dir
)
1354 flush_chunk((unsigned long)pgdp
);
1355 if(tsk
->mm
->context
!= NO_CONTEXT
&&
1356 tsk
->mm
->pgd
!= pgdp
) {
1357 flush_cache_mm(tsk
->mm
);
1358 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
1359 flush_tlb_mm(tsk
->mm
);
1363 static void cypress_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
1365 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1366 unsigned long page
= ((unsigned long) pgdp
) & PAGE_MASK
;
1369 if(pgdp
== swapper_pg_dir
)
1372 a
= 0x20; b
= 0x40; c
= 0x60; d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1374 line
= (page
+ PAGE_SIZE
) - 0x100;
1379 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1380 "sta %%g0, [%0 + %2] %1\n\t"
1381 "sta %%g0, [%0 + %3] %1\n\t"
1382 "sta %%g0, [%0 + %4] %1\n\t"
1383 "sta %%g0, [%0 + %5] %1\n\t"
1384 "sta %%g0, [%0 + %6] %1\n\t"
1385 "sta %%g0, [%0 + %7] %1\n\t"
1386 "sta %%g0, [%0 + %8] %1\n\t" : :
1388 "i" (ASI_M_FLUSH_PAGE
),
1389 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1390 "r" (e
), "r" (f
), "r" (g
));
1391 } while(line
!= page
);
1393 if(tsk
->mm
->context
!= NO_CONTEXT
&&
1394 tsk
->mm
->pgd
!= pgdp
) {
1395 flush_cache_mm(tsk
->mm
);
1396 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
1397 flush_tlb_mm(tsk
->mm
);
1401 static void hypersparc_switch_to_context(struct task_struct
*tsk
)
1403 if(tsk
->mm
->context
== NO_CONTEXT
) {
1406 spin_lock(&srmmu_context_spinlock
);
1407 alloc_context(tsk
->mm
);
1408 spin_unlock(&srmmu_context_spinlock
);
1409 ctxp
= &srmmu_context_table
[tsk
->mm
->context
];
1410 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) tsk
->mm
->pgd
) >> 4))));
1411 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
1413 hyper_flush_whole_icache();
1414 srmmu_set_context(tsk
->mm
->context
);
1417 static void hypersparc_init_new_context(struct mm_struct
*mm
)
1421 spin_lock(&srmmu_context_spinlock
);
1423 spin_unlock(&srmmu_context_spinlock
);
1425 ctxp
= &srmmu_context_table
[mm
->context
];
1426 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) mm
->pgd
) >> 4))));
1427 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
1429 if(mm
== current
->mm
) {
1430 hyper_flush_whole_icache();
1431 srmmu_set_context(mm
->context
);
1435 static unsigned long mempool
;
1437 /* NOTE: All of this startup code assumes the low 16mb (approx.) of
1438 * kernel mappings are done with one single contiguous chunk of
1439 * ram. On small ram machines (classics mainly) we only get
1440 * around 8mb mapped for us.
1443 static unsigned long kbpage
;
1445 /* Some dirty hacks to abstract away the painful boot up init. */
1446 static inline unsigned long srmmu_early_paddr(unsigned long vaddr
)
1448 return ((vaddr
- KERNBASE
) + kbpage
);
1451 static inline void srmmu_early_pgd_set(pgd_t
*pgdp
, pmd_t
*pmdp
)
1453 set_pte((pte_t
*)pgdp
, __pte((SRMMU_ET_PTD
| (srmmu_early_paddr((unsigned long) pmdp
) >> 4))));
1456 static inline void srmmu_early_pmd_set(pmd_t
*pmdp
, pte_t
*ptep
)
1458 set_pte((pte_t
*)pmdp
, __pte((SRMMU_ET_PTD
| (srmmu_early_paddr((unsigned long) ptep
) >> 4))));
1461 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd
)
1463 return (((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4) - kbpage
) + KERNBASE
;
1466 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd
)
1468 return (((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4) - kbpage
) + KERNBASE
;
1471 static inline pmd_t
*srmmu_early_pmd_offset(pgd_t
*dir
, unsigned long address
)
1473 return (pmd_t
*) srmmu_early_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
1476 static inline pte_t
*srmmu_early_pte_offset(pmd_t
*dir
, unsigned long address
)
1478 return (pte_t
*) srmmu_early_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
1481 static inline void srmmu_allocate_ptable_skeleton(unsigned long start
, unsigned long end
)
1487 while(start
< end
) {
1488 pgdp
= srmmu_pgd_offset(&init_mm
, start
);
1489 if(srmmu_pgd_none(*pgdp
)) {
1490 pmdp
= sparc_init_alloc(&mempool
, SRMMU_PMD_TABLE_SIZE
);
1491 srmmu_early_pgd_set(pgdp
, pmdp
);
1493 pmdp
= srmmu_early_pmd_offset(pgdp
, start
);
1494 if(srmmu_pmd_none(*pmdp
)) {
1495 ptep
= sparc_init_alloc(&mempool
, SRMMU_PTE_TABLE_SIZE
);
1496 srmmu_early_pmd_set(pmdp
, ptep
);
1498 start
= (start
+ SRMMU_PMD_SIZE
) & SRMMU_PMD_MASK
;
1502 /* This is much cleaner than poking around physical address space
1503 * looking at the prom's page table directly which is what most
1504 * other OS's do. Yuck... this is much better.
1506 void __init
srmmu_inherit_prom_mappings(unsigned long start
,unsigned long end
)
1511 int what
= 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1512 unsigned long prompte
;
1514 while(start
<= end
) {
1516 break; /* probably wrap around */
1517 if(start
== 0xfef00000)
1518 start
= KADB_DEBUGGER_BEGVM
;
1519 if(!(prompte
= srmmu_hwprobe(start
))) {
1524 /* A red snapper, see what it really is. */
1527 if(!(start
& ~(SRMMU_PMD_MASK
))) {
1528 if(srmmu_hwprobe((start
-PAGE_SIZE
) + SRMMU_PMD_SIZE
) == prompte
)
1532 if(!(start
& ~(SRMMU_PGDIR_MASK
))) {
1533 if(srmmu_hwprobe((start
-PAGE_SIZE
) + SRMMU_PGDIR_SIZE
) ==
1538 pgdp
= srmmu_pgd_offset(&init_mm
, start
);
1540 *pgdp
= __pgd(prompte
);
1541 start
+= SRMMU_PGDIR_SIZE
;
1544 if(srmmu_pgd_none(*pgdp
)) {
1545 pmdp
= sparc_init_alloc(&mempool
, SRMMU_PMD_TABLE_SIZE
);
1546 srmmu_early_pgd_set(pgdp
, pmdp
);
1548 pmdp
= srmmu_early_pmd_offset(pgdp
, start
);
1550 *pmdp
= __pmd(prompte
);
1551 start
+= SRMMU_PMD_SIZE
;
1554 if(srmmu_pmd_none(*pmdp
)) {
1555 ptep
= sparc_init_alloc(&mempool
, SRMMU_PTE_TABLE_SIZE
);
1556 srmmu_early_pmd_set(pmdp
, ptep
);
1558 ptep
= srmmu_early_pte_offset(pmdp
, start
);
1559 *ptep
= __pte(prompte
);
1564 #ifdef DEBUG_MAP_KERNEL
1565 #define MKTRACE(foo) prom_printf foo
1567 #define MKTRACE(foo)
1570 static int lots_of_ram __initdata
= 0;
1571 static int srmmu_low_pa __initdata
= 0;
1572 static unsigned long end_of_phys_memory __initdata
= 0;
1574 void __init
srmmu_end_memory(unsigned long memory_size
, unsigned long *end_mem_p
)
1576 unsigned int sum
= 0;
1577 unsigned long last
= 0xff000000;
1580 unsigned long total
= 0;
1583 pa
= srmmu_hwprobe(KERNBASE
+ PAGE_SIZE
);
1584 pa
= (pa
& SRMMU_PTE_PMASK
) << 4;
1585 if (!sp_banks
[0].base_addr
&& pa
== PAGE_SIZE
) {
1586 for(i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1587 if (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
> 0x0d000000)
1590 if (!sp_banks
[i
].num_bytes
) {
1592 end_of_phys_memory
= SRMMU_PGDIR_ALIGN(sp_banks
[i
-1].base_addr
+ sp_banks
[i
-1].num_bytes
);
1593 *end_mem_p
= KERNBASE
+ end_of_phys_memory
;
1594 if (sp_banks
[0].num_bytes
>= (6 * 1024 * 1024) || end_of_phys_memory
<= 0x06000000) {
1595 /* Make sure there will be enough memory for the whole mem_map (even if sparse) */
1600 for(i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1601 pa
= sp_banks
[i
].base_addr
;
1602 first
= (pa
& (~SRMMU_PGDIR_MASK
));
1603 cur
= (sp_banks
[i
].num_bytes
+ first
- SRMMU_PGDIR_SIZE
);
1604 if (cur
< 0) cur
= 0;
1605 if (!first
|| last
!= (pa
& SRMMU_PGDIR_MASK
))
1606 total
+= SRMMU_PGDIR_SIZE
;
1607 sum
+= sp_banks
[i
].num_bytes
;
1609 if (sum
> memory_size
) {
1610 sp_banks
[i
].num_bytes
-=
1611 (sum
- memory_size
);
1612 cur
= (sp_banks
[i
].num_bytes
+ first
- SRMMU_PGDIR_SIZE
);
1613 if (cur
< 0) cur
= 0;
1614 total
+= SRMMU_PGDIR_ALIGN(cur
);
1616 sp_banks
[++i
].base_addr
= 0xdeadbeef;
1617 sp_banks
[i
].num_bytes
= 0;
1621 total
+= SRMMU_PGDIR_ALIGN(cur
);
1622 last
= (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
- 1) & SRMMU_PGDIR_MASK
;
1624 if (total
<= 0x0d000000)
1625 *end_mem_p
= KERNBASE
+ total
;
1627 *end_mem_p
= 0xfd000000;
1630 end_of_phys_memory
= total
;
1633 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1635 /* Create a third-level SRMMU 16MB page mapping. */
1636 static void __init
do_large_mapping(unsigned long vaddr
, unsigned long phys_base
)
1638 pgd_t
*pgdp
= srmmu_pgd_offset(&init_mm
, vaddr
);
1639 unsigned long big_pte
;
1641 MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr
, phys_base
));
1642 big_pte
= KERNEL_PTE(phys_base
>> 4);
1643 *pgdp
= __pgd(big_pte
);
1646 /* Look in the sp_bank for the given physical page, return the
1647 * index number the entry was found in, or -1 for not found.
1649 static inline int find_in_spbanks(unsigned long phys_page
)
1653 for(entry
= 0; sp_banks
[entry
].num_bytes
; entry
++) {
1654 unsigned long start
= sp_banks
[entry
].base_addr
;
1655 unsigned long end
= start
+ sp_banks
[entry
].num_bytes
;
1657 if((start
<= phys_page
) && (phys_page
< end
))
1663 /* Find an spbank entry not mapped as of yet, TAKEN_VECTOR is an
1664 * array of char's, each member indicating if that spbank is mapped
1667 static int __init
find_free_spbank(char *taken_vector
)
1671 for(entry
= 0; sp_banks
[entry
].num_bytes
; entry
++)
1672 if(!taken_vector
[entry
])
1677 static unsigned long map_spbank_last_pa __initdata
= 0xff000000;
1679 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
1681 static unsigned long __init
map_spbank(unsigned long vbase
, int sp_entry
)
1683 unsigned long pstart
= (sp_banks
[sp_entry
].base_addr
& SRMMU_PGDIR_MASK
);
1684 unsigned long vstart
= (vbase
& SRMMU_PGDIR_MASK
);
1685 unsigned long vend
= SRMMU_PGDIR_ALIGN(vbase
+ sp_banks
[sp_entry
].num_bytes
);
1686 static int srmmu_bank
= 0;
1688 MKTRACE(("map_spbank %d[v<%08lx>p<%08lx>s<%08lx>]", sp_entry
, vbase
, sp_banks
[sp_entry
].base_addr
, sp_banks
[sp_entry
].num_bytes
));
1689 MKTRACE(("map_spbank2 %d[p%08lx v%08lx-%08lx]", sp_entry
, pstart
, vstart
, vend
));
1690 while(vstart
< vend
) {
1691 do_large_mapping(vstart
, pstart
);
1692 vstart
+= SRMMU_PGDIR_SIZE
; pstart
+= SRMMU_PGDIR_SIZE
;
1694 srmmu_map
[srmmu_bank
].vbase
= vbase
;
1695 srmmu_map
[srmmu_bank
].pbase
= sp_banks
[sp_entry
].base_addr
;
1696 srmmu_map
[srmmu_bank
].size
= sp_banks
[sp_entry
].num_bytes
;
1698 map_spbank_last_pa
= pstart
- SRMMU_PGDIR_SIZE
;
1702 static inline void memprobe_error(char *msg
)
1705 prom_printf("Halting now...\n");
1709 /* Assumptions: The bank given to the kernel from the prom/bootloader
1710 * is part of a full bank which is at least 4MB in size and begins at
1711 * 0xf0000000 (ie. KERNBASE).
1713 static inline void map_kernel(void)
1715 unsigned long raw_pte
, physpage
;
1716 unsigned long vaddr
, low_base
;
1717 char etaken
[SPARC_PHYS_BANKS
];
1720 /* Step 1: Clear out sp_banks taken map. */
1721 MKTRACE(("map_kernel: clearing etaken vector... "));
1722 for(entry
= 0; entry
< SPARC_PHYS_BANKS
; entry
++)
1725 low_base
= KERNBASE
;
1727 /* Step 2: Fill in KERNBASE base pgd. Lots of sanity checking here. */
1728 raw_pte
= srmmu_hwprobe(KERNBASE
+ PAGE_SIZE
);
1729 if((raw_pte
& SRMMU_ET_MASK
) != SRMMU_ET_PTE
)
1730 memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
1731 physpage
= (raw_pte
& SRMMU_PTE_PMASK
) << 4;
1732 physpage
-= PAGE_SIZE
;
1733 if(physpage
& ~(SRMMU_PGDIR_MASK
))
1734 memprobe_error("Wheee, kernel not mapped on 16MB physical boundry.\n");
1735 entry
= find_in_spbanks(physpage
);
1736 if(entry
== -1 || (sp_banks
[entry
].base_addr
!= physpage
))
1737 memprobe_error("Kernel mapped in non-existant memory.\n");
1738 MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE
, entry
, sp_banks
[entry
].base_addr
, sp_banks
[entry
].num_bytes
));
1739 if (sp_banks
[entry
].num_bytes
> 0x0d000000) {
1740 unsigned long orig_base
= sp_banks
[entry
].base_addr
;
1741 unsigned long orig_len
= sp_banks
[entry
].num_bytes
;
1742 unsigned long can_map
= 0x0d000000;
1744 /* Map a partial bank in this case, adjust the base
1745 * and the length, but don't mark it used.
1747 sp_banks
[entry
].num_bytes
= can_map
;
1748 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base
, can_map
));
1749 vaddr
= map_spbank(KERNBASE
, entry
);
1750 MKTRACE(("vaddr now %08lx ", vaddr
));
1751 sp_banks
[entry
].base_addr
= orig_base
+ can_map
;
1752 sp_banks
[entry
].num_bytes
= orig_len
- can_map
;
1753 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base
+ can_map
), (orig_len
- can_map
)));
1754 MKTRACE(("map_kernel: skipping first loop\n"));
1757 vaddr
= map_spbank(KERNBASE
, entry
);
1760 /* Step 3: Map what we can above KERNBASE. */
1761 MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr
));
1763 unsigned long bank_size
;
1765 MKTRACE(("map_kernel: ffsp()"));
1766 entry
= find_free_spbank(&etaken
[0]);
1767 bank_size
= sp_banks
[entry
].num_bytes
;
1768 MKTRACE(("<%d> base=%08lx bs=%08lx ", entry
, sp_banks
[entry
].base_addr
, bank_size
));
1772 vaddr
= KERNBASE
+ sp_banks
[entry
].base_addr
;
1773 else if (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
)) {
1774 if (map_spbank_last_pa
== (sp_banks
[entry
].base_addr
& SRMMU_PGDIR_MASK
))
1775 vaddr
-= SRMMU_PGDIR_SIZE
;
1776 vaddr
+= (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
));
1778 if ((vaddr
+ bank_size
- KERNBASE
) > 0x0d000000) {
1779 unsigned long orig_base
= sp_banks
[entry
].base_addr
;
1780 unsigned long orig_len
= sp_banks
[entry
].num_bytes
;
1781 unsigned long can_map
= (0xfd000000 - vaddr
);
1783 /* Map a partial bank in this case, adjust the base
1784 * and the length, but don't mark it used.
1786 sp_banks
[entry
].num_bytes
= can_map
;
1787 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base
, can_map
));
1788 vaddr
= map_spbank(vaddr
, entry
);
1789 MKTRACE(("vaddr now %08lx ", vaddr
));
1790 sp_banks
[entry
].base_addr
= orig_base
+ can_map
;
1791 sp_banks
[entry
].num_bytes
= orig_len
- can_map
;
1792 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base
+ can_map
), (orig_len
- can_map
)));
1796 /* Ok, we can map this one, do it. */
1797 MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr
, entry
));
1798 vaddr
= map_spbank(vaddr
, entry
);
1800 MKTRACE(("vaddr now %08lx\n", vaddr
));
1803 /* If not lots_of_ram, assume we did indeed map it all above. */
1806 goto check_and_return
;
1808 /* Step 4: Map the rest (if any) right below KERNBASE. */
1809 MKTRACE(("map_kernel: doing low mappings... "));
1810 low_base
= (KERNBASE
- end_of_phys_memory
+ 0x0d000000);
1811 MKTRACE(("end_of_phys_memory=%08lx low_base=%08lx\n", end_of_phys_memory
, low_base
));
1813 /* Ok, now map 'em. */
1814 MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base
,KERNBASE
));
1815 srmmu_allocate_ptable_skeleton(low_base
, KERNBASE
);
1817 map_spbank_last_pa
= 0xff000000;
1818 MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr
));
1820 unsigned long bank_size
;
1822 entry
= find_free_spbank(&etaken
[0]);
1823 bank_size
= sp_banks
[entry
].num_bytes
;
1824 MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry
, sp_banks
[entry
].base_addr
, bank_size
));
1827 if (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
)) {
1828 if (map_spbank_last_pa
== (sp_banks
[entry
].base_addr
& SRMMU_PGDIR_MASK
))
1829 vaddr
-= SRMMU_PGDIR_SIZE
;
1830 vaddr
+= (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
));
1832 if((vaddr
+ bank_size
) > KERNBASE
)
1833 memprobe_error("Wheee, kernel low mapping overflow.\n");
1834 MKTRACE(("map_spbank(%08lx, %d) ", vaddr
, entry
));
1835 vaddr
= map_spbank(vaddr
, entry
);
1837 MKTRACE(("Now, vaddr=%08lx end_of_phys_memory=%08lx\n", vaddr
, end_of_phys_memory
));
1842 /* Step 5: Sanity check, make sure we did it all. */
1843 MKTRACE(("check_and_return: "));
1844 for(entry
= 0; sp_banks
[entry
].num_bytes
; entry
++) {
1845 MKTRACE(("e[%d]=%d ", entry
, etaken
[entry
]));
1846 if(!etaken
[entry
]) {
1847 MKTRACE(("oops\n"));
1848 memprobe_error("Some bank did not get mapped.\n");
1851 MKTRACE(("success\n"));
1852 init_mm
.mmap
->vm_start
= page_offset
= low_base
;
1853 stack_top
= page_offset
- PAGE_SIZE
;
1854 BTFIXUPSET_SETHI(page_offset
, low_base
);
1855 BTFIXUPSET_SETHI(stack_top
, page_offset
- PAGE_SIZE
);
1856 BTFIXUPSET_SIMM13(user_ptrs_per_pgd
, page_offset
/ SRMMU_PGDIR_SIZE
);
1859 for(entry
= 0; srmmu_map
[entry
].size
; entry
++) {
1860 printk("[%d]: v[%08lx,%08lx](%lx) p[%08lx]\n", entry
,
1861 srmmu_map
[entry
].vbase
,
1862 srmmu_map
[entry
].vbase
+ srmmu_map
[entry
].size
,
1863 srmmu_map
[entry
].size
,
1864 srmmu_map
[entry
].pbase
);
1868 /* Now setup the p2v/v2p hash tables. */
1869 for(entry
= 0; entry
< SRMMU_HASHSZ
; entry
++)
1870 srmmu_v2p_hash
[entry
] = ((0xff - entry
) << 24);
1871 for(entry
= 0; entry
< SRMMU_HASHSZ
; entry
++)
1872 srmmu_p2v_hash
[entry
] = 0xffffffffUL
;
1873 for(entry
= 0; srmmu_map
[entry
].size
; entry
++) {
1876 for(addr
= srmmu_map
[entry
].vbase
;
1877 addr
< (srmmu_map
[entry
].vbase
+ srmmu_map
[entry
].size
);
1879 srmmu_v2p_hash
[srmmu_ahashfn(addr
)] =
1880 srmmu_map
[entry
].pbase
- srmmu_map
[entry
].vbase
;
1881 for(addr
= srmmu_map
[entry
].pbase
;
1882 addr
< (srmmu_map
[entry
].pbase
+ srmmu_map
[entry
].size
);
1884 srmmu_p2v_hash
[srmmu_ahashfn(addr
)] =
1885 srmmu_map
[entry
].pbase
- srmmu_map
[entry
].vbase
;
1888 BTFIXUPSET_SETHI(page_contig_offset
, page_offset
- (0xfd000000 - KERNBASE
));
1890 phys_mem_contig
= 0;
1892 phys_mem_contig
= 1;
1893 for(entry
= 0; srmmu_map
[entry
].size
; entry
++)
1894 if (srmmu_map
[entry
].pbase
!= srmmu_c_v2p (srmmu_map
[entry
].vbase
)) {
1895 phys_mem_contig
= 0;
1899 if (phys_mem_contig
) {
1900 printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes.\n");
1901 BTFIXUPSET_CALL(pte_page
, srmmu_c_pte_page
, BTFIXUPCALL_NORM
);
1902 BTFIXUPSET_CALL(pmd_page
, srmmu_c_pmd_page
, BTFIXUPCALL_NORM
);
1903 BTFIXUPSET_CALL(pgd_page
, srmmu_c_pgd_page
, BTFIXUPCALL_NORM
);
1904 BTFIXUPSET_CALL(mk_pte
, srmmu_c_mk_pte
, BTFIXUPCALL_NORM
);
1905 BTFIXUPSET_CALL(pte_offset
, srmmu_c_pte_offset
, BTFIXUPCALL_NORM
);
1906 BTFIXUPSET_CALL(pmd_offset
, srmmu_c_pmd_offset
, BTFIXUPCALL_NORM
);
1907 if (BTFIXUPVAL_CALL(ctxd_set
) == (unsigned long)srmmu_ctxd_set
)
1908 BTFIXUPSET_CALL(ctxd_set
, srmmu_c_ctxd_set
, BTFIXUPCALL_NORM
);
1909 BTFIXUPSET_CALL(pgd_set
, srmmu_c_pgd_set
, BTFIXUPCALL_NORM
);
1910 BTFIXUPSET_CALL(pmd_set
, srmmu_c_pmd_set
, BTFIXUPCALL_NORM
);
1911 BTFIXUPSET_CALL(mmu_v2p
, srmmu_c_v2p
, BTFIXUPCALL_NORM
);
1912 BTFIXUPSET_CALL(mmu_p2v
, srmmu_c_p2v
, BTFIXUPCALL_NORM
);
1913 if (BTFIXUPVAL_CALL(flush_chunk
) == (unsigned long)viking_flush_chunk
)
1914 BTFIXUPSET_CALL(flush_chunk
, viking_c_flush_chunk
, BTFIXUPCALL_NORM
);
1915 } else if (srmmu_low_pa
) {
1916 printk ("SRMMU: Compact physical memory. Using strightforward VA<->PA translations.\n");
1917 BTFIXUPSET_CALL(pte_page
, srmmu_s_pte_page
, BTFIXUPCALL_NORM
);
1918 BTFIXUPSET_CALL(pmd_page
, srmmu_s_pmd_page
, BTFIXUPCALL_NORM
);
1919 BTFIXUPSET_CALL(pgd_page
, srmmu_s_pgd_page
, BTFIXUPCALL_NORM
);
1920 BTFIXUPSET_CALL(mk_pte
, srmmu_s_mk_pte
, BTFIXUPCALL_NORM
);
1921 BTFIXUPSET_CALL(pte_offset
, srmmu_s_pte_offset
, BTFIXUPCALL_NORM
);
1922 BTFIXUPSET_CALL(pmd_offset
, srmmu_s_pmd_offset
, BTFIXUPCALL_NORM
);
1923 if (BTFIXUPVAL_CALL(ctxd_set
) == (unsigned long)srmmu_ctxd_set
)
1924 BTFIXUPSET_CALL(ctxd_set
, srmmu_s_ctxd_set
, BTFIXUPCALL_NORM
);
1925 BTFIXUPSET_CALL(pgd_set
, srmmu_s_pgd_set
, BTFIXUPCALL_NORM
);
1926 BTFIXUPSET_CALL(pmd_set
, srmmu_s_pmd_set
, BTFIXUPCALL_NORM
);
1927 BTFIXUPSET_CALL(mmu_v2p
, srmmu_s_v2p
, BTFIXUPCALL_NORM
);
1928 BTFIXUPSET_CALL(mmu_p2v
, srmmu_s_p2v
, BTFIXUPCALL_NORM
);
1929 if (BTFIXUPVAL_CALL(flush_chunk
) == (unsigned long)viking_flush_chunk
)
1930 BTFIXUPSET_CALL(flush_chunk
, viking_s_flush_chunk
, BTFIXUPCALL_NORM
);
1934 return; /* SUCCESS! */
1937 /* Paging initialization on the Sparc Reference MMU. */
1938 extern unsigned long free_area_init(unsigned long, unsigned long);
1939 extern unsigned long sparc_context_init(unsigned long, int);
1941 extern int physmem_mapped_contig
;
1942 extern int linux_num_cpus
;
1944 void (*poke_srmmu
)(void) __initdata
= NULL
;
1946 unsigned long __init
srmmu_paging_init(unsigned long start_mem
, unsigned long end_mem
)
1948 unsigned long ptables_start
;
1952 sparc_iobase_vaddr
= 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
1953 physmem_mapped_contig
= 0; /* for init.c:taint_real_pages() */
1955 if (sparc_cpu_model
== sun4d
)
1956 num_contexts
= 65536; /* We know it is Viking */
1958 /* Find the number of contexts on the srmmu. */
1959 cpunode
= prom_getchild(prom_root_node
);
1961 while(cpunode
!= 0) {
1962 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
1963 if(!strcmp(node_str
, "cpu")) {
1964 num_contexts
= prom_getintdefault(cpunode
, "mmu-nctx", 0x8);
1967 cpunode
= prom_getsibling(cpunode
);
1972 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1976 ptables_start
= mempool
= PAGE_ALIGN(start_mem
);
1977 memset(swapper_pg_dir
, 0, PAGE_SIZE
);
1978 kbpage
= srmmu_hwprobe(KERNBASE
+ PAGE_SIZE
);
1979 kbpage
= (kbpage
& SRMMU_PTE_PMASK
) << 4;
1980 kbpage
-= PAGE_SIZE
;
1982 srmmu_allocate_ptable_skeleton(KERNBASE
, end_mem
);
1984 srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr
, IOBASE_END
);
1985 srmmu_allocate_ptable_skeleton(DVMA_VADDR
, DVMA_END
);
1988 mempool
= PAGE_ALIGN(mempool
);
1989 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM
-PAGE_SIZE
));
1991 srmmu_context_table
= sparc_init_alloc(&mempool
, num_contexts
*sizeof(ctxd_t
));
1992 srmmu_ctx_table_phys
= (ctxd_t
*) srmmu_v2p((unsigned long) srmmu_context_table
);
1993 for(i
= 0; i
< num_contexts
; i
++)
1994 ctxd_set(&srmmu_context_table
[i
], swapper_pg_dir
);
1996 start_mem
= PAGE_ALIGN(mempool
);
1999 if(BTFIXUPVAL_CALL(flush_page_for_dma
) == (unsigned long)viking_flush_page
) {
2000 unsigned long start
= ptables_start
;
2001 unsigned long end
= start_mem
;
2003 while(start
< end
) {
2004 viking_flush_page(start
);
2008 srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys
);
2012 start_mem
= sparc_context_init(start_mem
, num_contexts
);
2013 start_mem
= free_area_init(start_mem
, end_mem
);
2015 #ifdef CONFIG_BLK_DEV_INITRD
2016 /* If initial ramdisk was specified with physical address,
2017 translate it here, as the p2v translation in srmmu
2018 is not straightforward. */
2019 if (initrd_start
&& initrd_start
< KERNBASE
) {
2020 initrd_start
= srmmu_p2v(initrd_start
);
2021 initrd_end
= srmmu_p2v(initrd_end
);
2022 if (initrd_end
<= initrd_start
)
2027 return PAGE_ALIGN(start_mem
);
2030 static int srmmu_mmu_info(char *buf
)
2040 module_stats
.invall
,
2042 module_stats
.invrnge
,
2048 static void srmmu_update_mmu_cache(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
2052 static void srmmu_destroy_context(struct mm_struct
*mm
)
2054 if(mm
->context
!= NO_CONTEXT
&& atomic_read(&mm
->count
) == 1) {
2055 /* XXX This could be drastically improved.
2056 * XXX We are only called from __exit_mm and it just did
2057 * XXX cache/tlb mm flush and right after this will (re-)
2058 * XXX SET_PAGE_DIR to swapper_pg_dir. -DaveM
2061 ctxd_set(&srmmu_context_table
[mm
->context
], swapper_pg_dir
);
2063 free_context(mm
->context
);
2064 mm
->context
= NO_CONTEXT
;
2068 static void srmmu_vac_update_mmu_cache(struct vm_area_struct
* vma
,
2069 unsigned long address
, pte_t pte
)
2071 if((vma
->vm_flags
& (VM_WRITE
|VM_SHARED
)) == (VM_WRITE
|VM_SHARED
)) {
2072 struct vm_area_struct
*vmaring
;
2074 struct inode
*inode
;
2075 unsigned long flags
, offset
, vaddr
, start
;
2076 int alias_found
= 0;
2081 __save_and_cli(flags
);
2083 file
= vma
->vm_file
;
2086 inode
= file
->f_dentry
->d_inode
;
2087 offset
= (address
& PAGE_MASK
) - vma
->vm_start
;
2088 spin_lock(&inode
->i_shared_lock
);
2089 vmaring
= inode
->i_mmap
;
2091 /* Do not mistake ourselves as another mapping. */
2095 vaddr
= vmaring
->vm_start
+ offset
;
2096 if ((vaddr
^ address
) & vac_badbits
) {
2098 start
= vmaring
->vm_start
;
2099 while (start
< vmaring
->vm_end
) {
2100 pgdp
= srmmu_pgd_offset(vmaring
->vm_mm
, start
);
2101 if(!pgdp
) goto next
;
2102 pmdp
= srmmu_pmd_offset(pgdp
, start
);
2103 if(!pmdp
) goto next
;
2104 ptep
= srmmu_pte_offset(pmdp
, start
);
2105 if(!ptep
) goto next
;
2107 if((pte_val(*ptep
) & SRMMU_ET_MASK
) == SRMMU_VALID
) {
2109 printk("Fixing USER/USER alias [%ld:%08lx]\n",
2110 vmaring
->vm_mm
->context
, start
);
2112 flush_cache_page(vmaring
, start
);
2113 set_pte(ptep
, __pte((pte_val(*ptep
) &
2115 flush_tlb_page(vmaring
, start
);
2121 } while ((vmaring
= vmaring
->vm_next_share
) != NULL
);
2122 spin_unlock(&inode
->i_shared_lock
);
2124 if(alias_found
&& ((pte_val(pte
) & SRMMU_CACHE
) != 0)) {
2125 pgdp
= srmmu_pgd_offset(vma
->vm_mm
, address
);
2126 pmdp
= srmmu_pmd_offset(pgdp
, address
);
2127 ptep
= srmmu_pte_offset(pmdp
, address
);
2128 flush_cache_page(vma
, address
);
2129 set_pte(ptep
, __pte((pte_val(*ptep
) & ~SRMMU_CACHE
)));
2130 flush_tlb_page(vma
, address
);
2133 __restore_flags(flags
);
2137 static void hypersparc_destroy_context(struct mm_struct
*mm
)
2139 if(mm
->context
!= NO_CONTEXT
&& atomic_read(&mm
->count
) == 1) {
2142 /* HyperSparc is copy-back, any data for this
2143 * process in a modified cache line is stale
2144 * and must be written back to main memory now
2145 * else we eat shit later big time.
2149 ctxp
= &srmmu_context_table
[mm
->context
];
2150 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) swapper_pg_dir
) >> 4))));
2151 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
2154 free_context(mm
->context
);
2155 mm
->context
= NO_CONTEXT
;
2159 /* Init various srmmu chip types. */
2160 static void __init
srmmu_is_bad(void)
2162 prom_printf("Could not determine SRMMU chip type.\n");
2166 static void __init
init_vac_layout(void)
2168 int nd
, cache_lines
;
2172 unsigned long max_size
= 0;
2173 unsigned long min_line_size
= 0x10000000;
2176 nd
= prom_getchild(prom_root_node
);
2177 while((nd
= prom_getsibling(nd
)) != 0) {
2178 prom_getstring(nd
, "device_type", node_str
, sizeof(node_str
));
2179 if(!strcmp(node_str
, "cpu")) {
2180 vac_line_size
= prom_getint(nd
, "cache-line-size");
2181 if (vac_line_size
== -1) {
2182 prom_printf("can't determine cache-line-size, "
2186 cache_lines
= prom_getint(nd
, "cache-nlines");
2187 if (cache_lines
== -1) {
2188 prom_printf("can't determine cache-nlines, halting.\n");
2192 vac_cache_size
= cache_lines
* vac_line_size
;
2193 vac_badbits
= (vac_cache_size
- 1) & PAGE_MASK
;
2195 if(vac_cache_size
> max_size
)
2196 max_size
= vac_cache_size
;
2197 if(vac_line_size
< min_line_size
)
2198 min_line_size
= vac_line_size
;
2200 if(cpu
== smp_num_cpus
)
2208 prom_printf("No CPU nodes found, halting.\n");
2212 vac_cache_size
= max_size
;
2213 vac_line_size
= min_line_size
;
2214 vac_badbits
= (vac_cache_size
- 1) & PAGE_MASK
;
2216 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
2217 (int)vac_cache_size
, (int)vac_line_size
);
2220 static void __init
poke_hypersparc(void)
2222 volatile unsigned long clear
;
2223 unsigned long mreg
= srmmu_get_mmureg();
2225 hyper_flush_unconditional_combined();
2227 mreg
&= ~(HYPERSPARC_CWENABLE
);
2228 mreg
|= (HYPERSPARC_CENABLE
| HYPERSPARC_WBENABLE
);
2229 mreg
|= (HYPERSPARC_CMODE
);
2231 srmmu_set_mmureg(mreg
);
2233 #if 0 /* I think this is bad news... -DaveM */
2234 hyper_clear_all_tags();
2237 put_ross_icr(HYPERSPARC_ICCR_FTD
| HYPERSPARC_ICCR_ICE
);
2238 hyper_flush_whole_icache();
2239 clear
= srmmu_get_faddr();
2240 clear
= srmmu_get_fstatus();
2243 static void __init
init_hypersparc(void)
2245 srmmu_name
= "ROSS HyperSparc";
2249 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_nocache_hyper
, BTFIXUPCALL_NORM
);
2250 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_NORM
);
2251 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_NORM
);
2252 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_NORM
);
2253 BTFIXUPSET_CALL(flush_cache_all
, hypersparc_flush_cache_all
, BTFIXUPCALL_NORM
);
2254 BTFIXUPSET_CALL(flush_cache_mm
, hypersparc_flush_cache_mm
, BTFIXUPCALL_NORM
);
2255 BTFIXUPSET_CALL(flush_cache_range
, hypersparc_flush_cache_range
, BTFIXUPCALL_NORM
);
2256 BTFIXUPSET_CALL(flush_cache_page
, hypersparc_flush_cache_page
, BTFIXUPCALL_NORM
);
2258 BTFIXUPSET_CALL(flush_tlb_all
, hypersparc_flush_tlb_all
, BTFIXUPCALL_NORM
);
2259 BTFIXUPSET_CALL(flush_tlb_mm
, hypersparc_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2260 BTFIXUPSET_CALL(flush_tlb_range
, hypersparc_flush_tlb_range
, BTFIXUPCALL_NORM
);
2261 BTFIXUPSET_CALL(flush_tlb_page
, hypersparc_flush_tlb_page
, BTFIXUPCALL_NORM
);
2263 BTFIXUPSET_CALL(flush_page_to_ram
, hypersparc_flush_page_to_ram
, BTFIXUPCALL_NORM
);
2264 BTFIXUPSET_CALL(flush_sig_insns
, hypersparc_flush_sig_insns
, BTFIXUPCALL_NORM
);
2265 BTFIXUPSET_CALL(flush_page_for_dma
, hypersparc_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2267 BTFIXUPSET_CALL(flush_chunk
, hypersparc_flush_chunk
, BTFIXUPCALL_NORM
); /* local flush _only_ */
2269 BTFIXUPSET_CALL(ctxd_set
, hypersparc_ctxd_set
, BTFIXUPCALL_NORM
);
2270 BTFIXUPSET_CALL(switch_to_context
, hypersparc_switch_to_context
, BTFIXUPCALL_NORM
);
2271 BTFIXUPSET_CALL(init_new_context
, hypersparc_init_new_context
, BTFIXUPCALL_NORM
);
2272 BTFIXUPSET_CALL(destroy_context
, hypersparc_destroy_context
, BTFIXUPCALL_NORM
);
2273 BTFIXUPSET_CALL(update_mmu_cache
, srmmu_vac_update_mmu_cache
, BTFIXUPCALL_NORM
);
2274 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, hypersparc_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2275 poke_srmmu
= poke_hypersparc
;
2277 hypersparc_setup_blockops();
2280 static void __init
poke_cypress(void)
2282 unsigned long mreg
= srmmu_get_mmureg();
2283 unsigned long faddr
, tagval
;
2284 volatile unsigned long cypress_sucks
;
2285 volatile unsigned long clear
;
2287 clear
= srmmu_get_faddr();
2288 clear
= srmmu_get_fstatus();
2290 if (!(mreg
& CYPRESS_CENABLE
)) {
2291 for(faddr
= 0x0; faddr
< 0x10000; faddr
+= 20) {
2292 __asm__
__volatile__("sta %%g0, [%0 + %1] %2\n\t"
2293 "sta %%g0, [%0] %2\n\t" : :
2294 "r" (faddr
), "r" (0x40000),
2295 "i" (ASI_M_DATAC_TAG
));
2298 for(faddr
= 0; faddr
< 0x10000; faddr
+= 0x20) {
2299 __asm__
__volatile__("lda [%1 + %2] %3, %0\n\t" :
2301 "r" (faddr
), "r" (0x40000),
2302 "i" (ASI_M_DATAC_TAG
));
2304 /* If modified and valid, kick it. */
2305 if((tagval
& 0x60) == 0x60)
2306 cypress_sucks
= *(unsigned long *)
2307 (0xf0020000 + faddr
);
2311 /* And one more, for our good neighbor, Mr. Broken Cypress. */
2312 clear
= srmmu_get_faddr();
2313 clear
= srmmu_get_fstatus();
2315 mreg
|= (CYPRESS_CENABLE
| CYPRESS_CMODE
);
2316 srmmu_set_mmureg(mreg
);
2319 static void __init
init_cypress_common(void)
2323 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_nocache_cypress
, BTFIXUPCALL_NORM
);
2324 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_NORM
);
2325 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_NORM
);
2326 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_NORM
);
2327 BTFIXUPSET_CALL(flush_cache_all
, cypress_flush_cache_all
, BTFIXUPCALL_NORM
);
2328 BTFIXUPSET_CALL(flush_cache_mm
, cypress_flush_cache_mm
, BTFIXUPCALL_NORM
);
2329 BTFIXUPSET_CALL(flush_cache_range
, cypress_flush_cache_range
, BTFIXUPCALL_NORM
);
2330 BTFIXUPSET_CALL(flush_cache_page
, cypress_flush_cache_page
, BTFIXUPCALL_NORM
);
2332 BTFIXUPSET_CALL(flush_tlb_all
, cypress_flush_tlb_all
, BTFIXUPCALL_NORM
);
2333 BTFIXUPSET_CALL(flush_tlb_mm
, cypress_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2334 BTFIXUPSET_CALL(flush_tlb_page
, cypress_flush_tlb_page
, BTFIXUPCALL_NORM
);
2335 BTFIXUPSET_CALL(flush_tlb_range
, cypress_flush_tlb_range
, BTFIXUPCALL_NORM
);
2337 BTFIXUPSET_CALL(flush_chunk
, cypress_flush_chunk
, BTFIXUPCALL_NORM
); /* local flush _only_ */
2339 BTFIXUPSET_CALL(flush_page_to_ram
, cypress_flush_page_to_ram
, BTFIXUPCALL_NORM
);
2340 BTFIXUPSET_CALL(flush_sig_insns
, cypress_flush_sig_insns
, BTFIXUPCALL_NOP
);
2341 BTFIXUPSET_CALL(flush_page_for_dma
, cypress_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2342 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, cypress_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2344 BTFIXUPSET_CALL(update_mmu_cache
, srmmu_vac_update_mmu_cache
, BTFIXUPCALL_NORM
);
2345 poke_srmmu
= poke_cypress
;
2348 static void __init
init_cypress_604(void)
2350 srmmu_name
= "ROSS Cypress-604(UP)";
2351 srmmu_modtype
= Cypress
;
2352 init_cypress_common();
2355 static void __init
init_cypress_605(unsigned long mrev
)
2357 srmmu_name
= "ROSS Cypress-605(MP)";
2359 srmmu_modtype
= Cypress_vE
;
2360 hwbug_bitmask
|= HWBUG_COPYBACK_BROKEN
;
2363 srmmu_modtype
= Cypress_vD
;
2364 hwbug_bitmask
|= HWBUG_ASIFLUSH_BROKEN
;
2366 srmmu_modtype
= Cypress
;
2369 init_cypress_common();
2372 static void __init
poke_swift(void)
2374 unsigned long mreg
= srmmu_get_mmureg();
2376 /* Clear any crap from the cache or else... */
2377 swift_idflash_clear();
2378 mreg
|= (SWIFT_IE
| SWIFT_DE
); /* I & D caches on */
2380 /* The Swift branch folding logic is completely broken. At
2381 * trap time, if things are just right, if can mistakenly
2382 * think that a trap is coming from kernel mode when in fact
2383 * it is coming from user mode (it mis-executes the branch in
2384 * the trap code). So you see things like crashme completely
2385 * hosing your machine which is completely unacceptable. Turn
2386 * this shit off... nice job Fujitsu.
2388 mreg
&= ~(SWIFT_BF
);
2389 srmmu_set_mmureg(mreg
);
2392 #define SWIFT_MASKID_ADDR 0x10003018
2393 static void __init
init_swift(void)
2395 unsigned long swift_rev
;
2397 __asm__
__volatile__("lda [%1] %2, %0\n\t"
2398 "srl %0, 0x18, %0\n\t" :
2400 "r" (SWIFT_MASKID_ADDR
), "i" (ASI_M_BYPASS
));
2401 srmmu_name
= "Fujitsu Swift";
2407 srmmu_modtype
= Swift_lots_o_bugs
;
2408 hwbug_bitmask
|= (HWBUG_KERN_ACCBROKEN
| HWBUG_KERN_CBITBROKEN
);
2409 /* Gee george, I wonder why Sun is so hush hush about
2410 * this hardware bug... really braindamage stuff going
2411 * on here. However I think we can find a way to avoid
2412 * all of the workaround overhead under Linux. Basically,
2413 * any page fault can cause kernel pages to become user
2414 * accessible (the mmu gets confused and clears some of
2415 * the ACC bits in kernel ptes). Aha, sounds pretty
2416 * horrible eh? But wait, after extensive testing it appears
2417 * that if you use pgd_t level large kernel pte's (like the
2418 * 4MB pages on the Pentium) the bug does not get tripped
2419 * at all. This avoids almost all of the major overhead.
2420 * Welcome to a world where your vendor tells you to,
2421 * "apply this kernel patch" instead of "sorry for the
2422 * broken hardware, send it back and we'll give you
2423 * properly functioning parts"
2428 srmmu_modtype
= Swift_bad_c
;
2429 hwbug_bitmask
|= HWBUG_KERN_CBITBROKEN
;
2430 /* You see Sun allude to this hardware bug but never
2431 * admit things directly, they'll say things like,
2432 * "the Swift chip cache problems" or similar.
2436 srmmu_modtype
= Swift_ok
;
2440 BTFIXUPSET_CALL(flush_cache_all
, swift_flush_cache_all
, BTFIXUPCALL_NORM
);
2441 BTFIXUPSET_CALL(flush_cache_mm
, swift_flush_cache_mm
, BTFIXUPCALL_NORM
);
2442 BTFIXUPSET_CALL(flush_cache_page
, swift_flush_cache_page
, BTFIXUPCALL_NORM
);
2443 BTFIXUPSET_CALL(flush_cache_range
, swift_flush_cache_range
, BTFIXUPCALL_NORM
);
2445 BTFIXUPSET_CALL(flush_chunk
, swift_flush_chunk
, BTFIXUPCALL_NOP
); /* local flush _only_ */
2447 BTFIXUPSET_CALL(flush_tlb_all
, swift_flush_tlb_all
, BTFIXUPCALL_NORM
);
2448 BTFIXUPSET_CALL(flush_tlb_mm
, swift_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2449 BTFIXUPSET_CALL(flush_tlb_page
, swift_flush_tlb_page
, BTFIXUPCALL_NORM
);
2450 BTFIXUPSET_CALL(flush_tlb_range
, swift_flush_tlb_range
, BTFIXUPCALL_NORM
);
2452 BTFIXUPSET_CALL(flush_page_to_ram
, swift_flush_page_to_ram
, BTFIXUPCALL_NOP
);
2453 BTFIXUPSET_CALL(flush_sig_insns
, swift_flush_sig_insns
, BTFIXUPCALL_NORM
);
2454 BTFIXUPSET_CALL(flush_page_for_dma
, swift_flush_page_for_dma
, BTFIXUPCALL_NORM
);
2456 BTFIXUPSET_CALL(update_mmu_cache
, swift_update_mmu_cache
, BTFIXUPCALL_NORM
);
2458 /* Are you now convinced that the Swift is one of the
2459 * biggest VLSI abortions of all time? Bravo Fujitsu!
2460 * Fujitsu, the !#?!%$'d up processor people. I bet if
2461 * you examined the microcode of the Swift you'd find
2462 * XXX's all over the place.
2464 poke_srmmu
= poke_swift
;
2467 static void turbosparc_flush_cache_all(void)
2469 flush_user_windows();
2470 turbosparc_idflash_clear();
2473 static void turbosparc_flush_cache_mm(struct mm_struct
*mm
)
2476 flush_user_windows();
2477 turbosparc_idflash_clear();
2481 static void turbosparc_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
2484 flush_user_windows();
2485 turbosparc_idflash_clear();
2489 static void turbosparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
2491 FLUSH_BEGIN(vma
->vm_mm
)
2492 flush_user_windows();
2493 if (vma
->vm_flags
& VM_EXEC
)
2494 turbosparc_flush_icache();
2495 turbosparc_flush_dcache();
2499 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
2500 static void turbosparc_flush_page_to_ram(unsigned long page
)
2502 #ifdef TURBOSPARC_WRITEBACK
2503 volatile unsigned long clear
;
2505 if (srmmu_hwprobe(page
))
2506 turbosparc_flush_page_cache(page
);
2507 clear
= srmmu_get_fstatus();
2511 static void turbosparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
2515 static void turbosparc_flush_page_for_dma(unsigned long page
)
2517 turbosparc_flush_dcache();
2520 static void turbosparc_flush_chunk(unsigned long chunk
)
2524 static void turbosparc_flush_tlb_all(void)
2526 srmmu_flush_whole_tlb();
2527 module_stats
.invall
++;
2530 static void turbosparc_flush_tlb_mm(struct mm_struct
*mm
)
2533 srmmu_flush_whole_tlb();
2534 module_stats
.invmm
++;
2538 static void turbosparc_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
2541 srmmu_flush_whole_tlb();
2542 module_stats
.invrnge
++;
2546 static void turbosparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
2548 FLUSH_BEGIN(vma
->vm_mm
)
2549 srmmu_flush_whole_tlb();
2550 module_stats
.invpg
++;
2555 static void __init
poke_turbosparc(void)
2557 unsigned long mreg
= srmmu_get_mmureg();
2558 unsigned long ccreg
;
2560 /* Clear any crap from the cache or else... */
2561 turbosparc_flush_cache_all();
2562 mreg
&= ~(TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* Temporarily disable I & D caches */
2563 mreg
&= ~(TURBOSPARC_PCENABLE
); /* Don't check parity */
2564 srmmu_set_mmureg(mreg
);
2566 ccreg
= turbosparc_get_ccreg();
2568 #ifdef TURBOSPARC_WRITEBACK
2569 ccreg
|= (TURBOSPARC_SNENABLE
); /* Do DVMA snooping in Dcache */
2570 ccreg
&= ~(TURBOSPARC_uS2
| TURBOSPARC_WTENABLE
);
2571 /* Write-back D-cache, emulate VLSI
2572 * abortion number three, not number one */
2574 /* For now let's play safe, optimize later */
2575 ccreg
|= (TURBOSPARC_SNENABLE
| TURBOSPARC_WTENABLE
);
2576 /* Do DVMA snooping in Dcache, Write-thru D-cache */
2577 ccreg
&= ~(TURBOSPARC_uS2
);
2578 /* Emulate VLSI abortion number three, not number one */
2581 switch (ccreg
& 7) {
2582 case 0: /* No SE cache */
2583 case 7: /* Test mode */
2586 ccreg
|= (TURBOSPARC_SCENABLE
);
2588 turbosparc_set_ccreg (ccreg
);
2590 mreg
|= (TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* I & D caches on */
2591 mreg
|= (TURBOSPARC_ICSNOOP
); /* Icache snooping on */
2592 srmmu_set_mmureg(mreg
);
2595 static void __init
init_turbosparc(void)
2597 srmmu_name
= "Fujitsu TurboSparc";
2598 srmmu_modtype
= TurboSparc
;
2600 BTFIXUPSET_CALL(flush_cache_all
, turbosparc_flush_cache_all
, BTFIXUPCALL_NORM
);
2601 BTFIXUPSET_CALL(flush_cache_mm
, turbosparc_flush_cache_mm
, BTFIXUPCALL_NORM
);
2602 BTFIXUPSET_CALL(flush_cache_page
, turbosparc_flush_cache_page
, BTFIXUPCALL_NORM
);
2603 BTFIXUPSET_CALL(flush_cache_range
, turbosparc_flush_cache_range
, BTFIXUPCALL_NORM
);
2605 BTFIXUPSET_CALL(flush_tlb_all
, turbosparc_flush_tlb_all
, BTFIXUPCALL_NORM
);
2606 BTFIXUPSET_CALL(flush_tlb_mm
, turbosparc_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2607 BTFIXUPSET_CALL(flush_tlb_page
, turbosparc_flush_tlb_page
, BTFIXUPCALL_NORM
);
2608 BTFIXUPSET_CALL(flush_tlb_range
, turbosparc_flush_tlb_range
, BTFIXUPCALL_NORM
);
2610 BTFIXUPSET_CALL(flush_page_to_ram
, turbosparc_flush_page_to_ram
, BTFIXUPCALL_NORM
);
2611 BTFIXUPSET_CALL(flush_chunk
, turbosparc_flush_chunk
, BTFIXUPCALL_NORM
);
2613 BTFIXUPSET_CALL(flush_sig_insns
, turbosparc_flush_sig_insns
, BTFIXUPCALL_NOP
);
2614 BTFIXUPSET_CALL(flush_page_for_dma
, turbosparc_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2616 poke_srmmu
= poke_turbosparc
;
2619 static void __init
poke_tsunami(void)
2621 unsigned long mreg
= srmmu_get_mmureg();
2623 tsunami_flush_icache();
2624 tsunami_flush_dcache();
2625 mreg
&= ~TSUNAMI_ITD
;
2626 mreg
|= (TSUNAMI_IENAB
| TSUNAMI_DENAB
);
2627 srmmu_set_mmureg(mreg
);
2630 static void __init
init_tsunami(void)
2632 /* Tsunami's pretty sane, Sun and TI actually got it
2633 * somewhat right this time. Fujitsu should have
2634 * taken some lessons from them.
2637 srmmu_name
= "TI Tsunami";
2638 srmmu_modtype
= Tsunami
;
2640 BTFIXUPSET_CALL(flush_cache_all
, tsunami_flush_cache_all
, BTFIXUPCALL_NORM
);
2641 BTFIXUPSET_CALL(flush_cache_mm
, tsunami_flush_cache_mm
, BTFIXUPCALL_NORM
);
2642 BTFIXUPSET_CALL(flush_cache_page
, tsunami_flush_cache_page
, BTFIXUPCALL_NORM
);
2643 BTFIXUPSET_CALL(flush_cache_range
, tsunami_flush_cache_range
, BTFIXUPCALL_NORM
);
2645 BTFIXUPSET_CALL(flush_chunk
, tsunami_flush_chunk
, BTFIXUPCALL_NOP
); /* local flush _only_ */
2647 BTFIXUPSET_CALL(flush_tlb_all
, tsunami_flush_tlb_all
, BTFIXUPCALL_NORM
);
2648 BTFIXUPSET_CALL(flush_tlb_mm
, tsunami_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2649 BTFIXUPSET_CALL(flush_tlb_page
, tsunami_flush_tlb_page
, BTFIXUPCALL_NORM
);
2650 BTFIXUPSET_CALL(flush_tlb_range
, tsunami_flush_tlb_range
, BTFIXUPCALL_NORM
);
2652 BTFIXUPSET_CALL(flush_page_to_ram
, tsunami_flush_page_to_ram
, BTFIXUPCALL_NOP
);
2653 BTFIXUPSET_CALL(flush_sig_insns
, tsunami_flush_sig_insns
, BTFIXUPCALL_NORM
);
2654 BTFIXUPSET_CALL(flush_page_for_dma
, tsunami_flush_page_for_dma
, BTFIXUPCALL_NORM
);
2656 poke_srmmu
= poke_tsunami
;
2659 static void __init
poke_viking(void)
2661 unsigned long mreg
= srmmu_get_mmureg();
2662 static int smp_catch
= 0;
2664 if(viking_mxcc_present
) {
2665 unsigned long mxcc_control
= mxcc_get_creg();
2667 mxcc_control
|= (MXCC_CTL_ECE
| MXCC_CTL_PRE
| MXCC_CTL_MCE
);
2668 mxcc_control
&= ~(MXCC_CTL_RRC
);
2669 mxcc_set_creg(mxcc_control
);
2671 /* We don't need memory parity checks.
2672 * XXX This is a mess, have to dig out later. ecd.
2673 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
2676 /* We do cache ptables on MXCC. */
2677 mreg
|= VIKING_TCENABLE
;
2679 unsigned long bpreg
;
2681 mreg
&= ~(VIKING_TCENABLE
);
2683 /* Must disable mixed-cmd mode here for
2686 bpreg
= viking_get_bpreg();
2687 bpreg
&= ~(VIKING_ACTION_MIX
);
2688 viking_set_bpreg(bpreg
);
2690 /* Just in case PROM does something funny. */
2695 mreg
|= VIKING_SPENABLE
;
2696 mreg
|= (VIKING_ICENABLE
| VIKING_DCENABLE
);
2697 mreg
|= VIKING_SBENABLE
;
2698 mreg
&= ~(VIKING_ACENABLE
);
2699 srmmu_set_mmureg(mreg
);
2702 /* Avoid unnecessary cross calls. */
2703 BTFIXUPCOPY_CALL(flush_cache_all
, local_flush_cache_all
);
2704 BTFIXUPCOPY_CALL(flush_cache_mm
, local_flush_cache_mm
);
2705 BTFIXUPCOPY_CALL(flush_cache_range
, local_flush_cache_range
);
2706 BTFIXUPCOPY_CALL(flush_cache_page
, local_flush_cache_page
);
2707 BTFIXUPCOPY_CALL(flush_page_to_ram
, local_flush_page_to_ram
);
2708 BTFIXUPCOPY_CALL(flush_sig_insns
, local_flush_sig_insns
);
2709 BTFIXUPCOPY_CALL(flush_page_for_dma
, local_flush_page_for_dma
);
2714 static void __init
init_viking(void)
2716 unsigned long mreg
= srmmu_get_mmureg();
2718 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
2719 if(mreg
& VIKING_MMODE
) {
2720 srmmu_name
= "TI Viking";
2721 viking_mxcc_present
= 0;
2724 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_nocache_viking
, BTFIXUPCALL_NORM
);
2725 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_NORM
);
2726 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_NORM
);
2727 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_NORM
);
2728 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, viking_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2730 BTFIXUPSET_CALL(flush_chunk
, viking_flush_chunk
, BTFIXUPCALL_NORM
); /* local flush _only_ */
2732 /* We need this to make sure old viking takes no hits
2733 * on it's cache for dma snoops to workaround the
2734 * "load from non-cacheable memory" interrupt bug.
2735 * This is only necessary because of the new way in
2736 * which we use the IOMMU.
2738 BTFIXUPSET_CALL(flush_page_for_dma
, viking_flush_page
, BTFIXUPCALL_NORM
);
2739 /* Also, this is so far the only chip which actually uses
2740 the page argument to flush_page_for_dma */
2741 flush_page_for_dma_global
= 0;
2743 srmmu_name
= "TI Viking/MXCC";
2744 viking_mxcc_present
= 1;
2746 BTFIXUPSET_CALL(flush_chunk
, viking_mxcc_flush_chunk
, BTFIXUPCALL_NOP
); /* local flush _only_ */
2748 /* MXCC vikings lack the DMA snooping bug. */
2749 BTFIXUPSET_CALL(flush_page_for_dma
, viking_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2752 BTFIXUPSET_CALL(flush_cache_all
, viking_flush_cache_all
, BTFIXUPCALL_NORM
);
2753 BTFIXUPSET_CALL(flush_cache_mm
, viking_flush_cache_mm
, BTFIXUPCALL_NORM
);
2754 BTFIXUPSET_CALL(flush_cache_page
, viking_flush_cache_page
, BTFIXUPCALL_NORM
);
2755 BTFIXUPSET_CALL(flush_cache_range
, viking_flush_cache_range
, BTFIXUPCALL_NORM
);
2758 if (sparc_cpu_model
== sun4d
) {
2759 BTFIXUPSET_CALL(flush_tlb_all
, sun4dsmp_flush_tlb_all
, BTFIXUPCALL_NORM
);
2760 BTFIXUPSET_CALL(flush_tlb_mm
, sun4dsmp_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2761 BTFIXUPSET_CALL(flush_tlb_page
, sun4dsmp_flush_tlb_page
, BTFIXUPCALL_NORM
);
2762 BTFIXUPSET_CALL(flush_tlb_range
, sun4dsmp_flush_tlb_range
, BTFIXUPCALL_NORM
);
2766 BTFIXUPSET_CALL(flush_tlb_all
, viking_flush_tlb_all
, BTFIXUPCALL_NORM
);
2767 BTFIXUPSET_CALL(flush_tlb_mm
, viking_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2768 BTFIXUPSET_CALL(flush_tlb_page
, viking_flush_tlb_page
, BTFIXUPCALL_NORM
);
2769 BTFIXUPSET_CALL(flush_tlb_range
, viking_flush_tlb_range
, BTFIXUPCALL_NORM
);
2772 BTFIXUPSET_CALL(flush_page_to_ram
, viking_flush_page_to_ram
, BTFIXUPCALL_NOP
);
2773 BTFIXUPSET_CALL(flush_sig_insns
, viking_flush_sig_insns
, BTFIXUPCALL_NOP
);
2775 poke_srmmu
= poke_viking
;
2778 /* Probe for the srmmu chip version. */
2779 static void __init
get_srmmu_type(void)
2781 unsigned long mreg
, psr
;
2782 unsigned long mod_typ
, mod_rev
, psr_typ
, psr_vers
;
2784 srmmu_modtype
= SRMMU_INVAL_MOD
;
2787 mreg
= srmmu_get_mmureg(); psr
= get_psr();
2788 mod_typ
= (mreg
& 0xf0000000) >> 28;
2789 mod_rev
= (mreg
& 0x0f000000) >> 24;
2790 psr_typ
= (psr
>> 28) & 0xf;
2791 psr_vers
= (psr
>> 24) & 0xf;
2793 /* First, check for HyperSparc or Cypress. */
2797 /* UP or MP Hypersparc */
2802 /* Uniprocessor Cypress */
2808 /* _REALLY OLD_ Cypress MP chips... */
2812 /* MP Cypress mmu/cache-controller */
2813 init_cypress_605(mod_rev
);
2816 /* Some other Cypress revision, assume a 605. */
2817 init_cypress_605(mod_rev
);
2823 /* Now Fujitsu TurboSparc. It might happen that it is
2824 in Swift emulation mode, so we will check later... */
2825 if (psr_typ
== 0 && psr_vers
== 5) {
2830 /* Next check for Fujitsu Swift. */
2831 if(psr_typ
== 0 && psr_vers
== 4) {
2835 /* Look if it is not a TurboSparc emulating Swift... */
2836 cpunode
= prom_getchild(prom_root_node
);
2837 while((cpunode
= prom_getsibling(cpunode
)) != 0) {
2838 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
2839 if(!strcmp(node_str
, "cpu")) {
2840 if (!prom_getintdefault(cpunode
, "psr-implementation", 1) &&
2841 prom_getintdefault(cpunode
, "psr-version", 1) == 5) {
2853 /* Now the Viking family of srmmu. */
2856 ((psr_vers
== 1) && (mod_typ
== 0) && (mod_rev
== 0)))) {
2861 /* Finally the Tsunami. */
2862 if(psr_typ
== 4 && psr_vers
== 1 && (mod_typ
|| mod_rev
)) {
2871 static int srmmu_check_pgt_cache(int low
, int high
)
2873 struct page
*page
, *page2
;
2876 if (pgtable_cache_size
> high
) {
2877 spin_lock(&pte_spinlock
);
2878 for (page2
= NULL
, page
= (struct page
*)pte_quicklist
; page
;) {
2879 if ((unsigned int)page
->pprev_hash
== 0xffff) {
2881 page2
->next_hash
= page
->next_hash
;
2883 (struct page
*)pte_quicklist
= page
->next_hash
;
2884 page
->next_hash
= NULL
;
2885 page
->pprev_hash
= NULL
;
2886 pgtable_cache_size
-= 16;
2890 page
= page2
->next_hash
;
2892 page
= (struct page
*)pte_quicklist
;
2893 if (pgtable_cache_size
<= low
)
2898 page
= page
->next_hash
;
2900 spin_unlock(&pte_spinlock
);
2902 if (pgd_cache_size
> high
/ 4) {
2903 spin_lock(&pgd_spinlock
);
2904 for (page2
= NULL
, page
= (struct page
*)pgd_quicklist
; page
;) {
2905 if ((unsigned int)page
->pprev_hash
== 0xf) {
2907 page2
->next_hash
= page
->next_hash
;
2909 (struct page
*)pgd_quicklist
= page
->next_hash
;
2910 page
->next_hash
= NULL
;
2911 page
->pprev_hash
= NULL
;
2912 pgd_cache_size
-= 4;
2916 page
= page2
->next_hash
;
2918 page
= (struct page
*)pgd_quicklist
;
2919 if (pgd_cache_size
<= low
/ 4)
2924 page
= page
->next_hash
;
2926 spin_unlock(&pgd_spinlock
);
2931 extern unsigned long spwin_mmu_patchme
, fwin_mmu_patchme
,
2932 tsetup_mmu_patchme
, rtrap_mmu_patchme
;
2934 extern unsigned long spwin_srmmu_stackchk
, srmmu_fwin_stackchk
,
2935 tsetup_srmmu_stackchk
, srmmu_rett_stackchk
;
2937 extern unsigned long srmmu_fault
;
2939 #define PATCH_BRANCH(insn, dest) do { \
2942 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2945 static void __init
patch_window_trap_handlers(void)
2947 unsigned long *iaddr
, *daddr
;
2949 PATCH_BRANCH(spwin_mmu_patchme
, spwin_srmmu_stackchk
);
2950 PATCH_BRANCH(fwin_mmu_patchme
, srmmu_fwin_stackchk
);
2951 PATCH_BRANCH(tsetup_mmu_patchme
, tsetup_srmmu_stackchk
);
2952 PATCH_BRANCH(rtrap_mmu_patchme
, srmmu_rett_stackchk
);
2953 PATCH_BRANCH(sparc_ttable
[SP_TRAP_TFLT
].inst_three
, srmmu_fault
);
2954 PATCH_BRANCH(sparc_ttable
[SP_TRAP_DFLT
].inst_three
, srmmu_fault
);
2955 PATCH_BRANCH(sparc_ttable
[SP_TRAP_DACC
].inst_three
, srmmu_fault
);
2959 /* Local cross-calls. */
2960 static void smp_flush_page_for_dma(unsigned long page
)
2962 xc1((smpfunc_t
) BTFIXUP_CALL(local_flush_page_for_dma
), page
);
2967 /* Load up routines and constants for sun4m and sun4d mmu */
2968 void __init
ld_mmu_srmmu(void)
2970 extern void ld_mmu_iommu(void);
2971 extern void ld_mmu_iounit(void);
2972 extern void ___xchg32_sun4md(void);
2974 /* First the constants */
2975 BTFIXUPSET_SIMM13(pmd_shift
, SRMMU_PMD_SHIFT
);
2976 BTFIXUPSET_SETHI(pmd_size
, SRMMU_PMD_SIZE
);
2977 BTFIXUPSET_SETHI(pmd_mask
, SRMMU_PMD_MASK
);
2978 BTFIXUPSET_SIMM13(pgdir_shift
, SRMMU_PGDIR_SHIFT
);
2979 BTFIXUPSET_SETHI(pgdir_size
, SRMMU_PGDIR_SIZE
);
2980 BTFIXUPSET_SETHI(pgdir_mask
, SRMMU_PGDIR_MASK
);
2982 BTFIXUPSET_SIMM13(ptrs_per_pte
, SRMMU_PTRS_PER_PTE
);
2983 BTFIXUPSET_SIMM13(ptrs_per_pmd
, SRMMU_PTRS_PER_PMD
);
2984 BTFIXUPSET_SIMM13(ptrs_per_pgd
, SRMMU_PTRS_PER_PGD
);
2986 BTFIXUPSET_INT(page_none
, pgprot_val(SRMMU_PAGE_NONE
));
2987 BTFIXUPSET_INT(page_shared
, pgprot_val(SRMMU_PAGE_SHARED
));
2988 BTFIXUPSET_INT(page_copy
, pgprot_val(SRMMU_PAGE_COPY
));
2989 BTFIXUPSET_INT(page_readonly
, pgprot_val(SRMMU_PAGE_RDONLY
));
2990 BTFIXUPSET_INT(page_kernel
, pgprot_val(SRMMU_PAGE_KERNEL
));
2991 pg_iobits
= SRMMU_VALID
| SRMMU_WRITE
| SRMMU_REF
;
2995 BTFIXUPSET_CALL(___xchg32
, ___xchg32_sun4md
, BTFIXUPCALL_SWAPG1G2
);
2997 BTFIXUPSET_CALL(get_pte_fast
, srmmu_get_pte_fast
, BTFIXUPCALL_RETINT(0));
2998 BTFIXUPSET_CALL(get_pgd_fast
, srmmu_get_pgd_fast
, BTFIXUPCALL_RETINT(0));
2999 BTFIXUPSET_CALL(free_pte_slow
, srmmu_free_pte_slow
, BTFIXUPCALL_NOP
);
3000 BTFIXUPSET_CALL(free_pgd_slow
, srmmu_free_pgd_slow
, BTFIXUPCALL_NOP
);
3001 BTFIXUPSET_CALL(do_check_pgt_cache
, srmmu_check_pgt_cache
, BTFIXUPCALL_NORM
);
3003 BTFIXUPSET_CALL(set_pgdir
, srmmu_set_pgdir
, BTFIXUPCALL_NORM
);
3005 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_cacheable
, BTFIXUPCALL_SWAPO0O1
);
3006 BTFIXUPSET_CALL(init_new_context
, srmmu_init_new_context
, BTFIXUPCALL_NORM
);
3007 BTFIXUPSET_CALL(switch_to_context
, srmmu_switch_to_context
, BTFIXUPCALL_NORM
);
3009 BTFIXUPSET_CALL(pte_page
, srmmu_pte_page
, BTFIXUPCALL_NORM
);
3010 BTFIXUPSET_CALL(pmd_page
, srmmu_pmd_page
, BTFIXUPCALL_NORM
);
3011 BTFIXUPSET_CALL(pgd_page
, srmmu_pgd_page
, BTFIXUPCALL_NORM
);
3013 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, srmmu_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
3015 BTFIXUPSET_SETHI(none_mask
, 0xF0000000);
3017 BTFIXUPSET_CALL(pte_present
, srmmu_pte_present
, BTFIXUPCALL_NORM
);
3018 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_SWAPO0G0
);
3020 BTFIXUPSET_CALL(pmd_bad
, srmmu_pmd_bad
, BTFIXUPCALL_NORM
);
3021 BTFIXUPSET_CALL(pmd_present
, srmmu_pmd_present
, BTFIXUPCALL_NORM
);
3022 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_SWAPO0G0
);
3024 BTFIXUPSET_CALL(pgd_none
, srmmu_pgd_none
, BTFIXUPCALL_NORM
);
3025 BTFIXUPSET_CALL(pgd_bad
, srmmu_pgd_bad
, BTFIXUPCALL_NORM
);
3026 BTFIXUPSET_CALL(pgd_present
, srmmu_pgd_present
, BTFIXUPCALL_NORM
);
3027 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_SWAPO0G0
);
3029 BTFIXUPSET_CALL(mk_pte
, srmmu_mk_pte
, BTFIXUPCALL_NORM
);
3030 BTFIXUPSET_CALL(mk_pte_phys
, srmmu_mk_pte_phys
, BTFIXUPCALL_NORM
);
3031 BTFIXUPSET_CALL(mk_pte_io
, srmmu_mk_pte_io
, BTFIXUPCALL_NORM
);
3032 BTFIXUPSET_CALL(pgd_set
, srmmu_pgd_set
, BTFIXUPCALL_NORM
);
3034 BTFIXUPSET_INT(pte_modify_mask
, SRMMU_CHG_MASK
);
3035 BTFIXUPSET_CALL(pgd_offset
, srmmu_pgd_offset
, BTFIXUPCALL_NORM
);
3036 BTFIXUPSET_CALL(pmd_offset
, srmmu_pmd_offset
, BTFIXUPCALL_NORM
);
3037 BTFIXUPSET_CALL(pte_offset
, srmmu_pte_offset
, BTFIXUPCALL_NORM
);
3038 BTFIXUPSET_CALL(pte_free_kernel
, srmmu_pte_free
, BTFIXUPCALL_NORM
);
3039 BTFIXUPSET_CALL(pmd_free_kernel
, srmmu_pmd_free
, BTFIXUPCALL_NORM
);
3040 BTFIXUPSET_CALL(pte_alloc_kernel
, srmmu_pte_alloc
, BTFIXUPCALL_NORM
);
3041 BTFIXUPSET_CALL(pmd_alloc_kernel
, srmmu_pmd_alloc
, BTFIXUPCALL_NORM
);
3042 BTFIXUPSET_CALL(pte_free
, srmmu_pte_free
, BTFIXUPCALL_NORM
);
3043 BTFIXUPSET_CALL(pte_alloc
, srmmu_pte_alloc
, BTFIXUPCALL_NORM
);
3044 BTFIXUPSET_CALL(pmd_free
, srmmu_pmd_free
, BTFIXUPCALL_NORM
);
3045 BTFIXUPSET_CALL(pmd_alloc
, srmmu_pmd_alloc
, BTFIXUPCALL_NORM
);
3046 BTFIXUPSET_CALL(pgd_free
, srmmu_pgd_free
, BTFIXUPCALL_NORM
);
3047 BTFIXUPSET_CALL(pgd_alloc
, srmmu_pgd_alloc
, BTFIXUPCALL_NORM
);
3049 BTFIXUPSET_HALF(pte_writei
, SRMMU_WRITE
);
3050 BTFIXUPSET_HALF(pte_dirtyi
, SRMMU_DIRTY
);
3051 BTFIXUPSET_HALF(pte_youngi
, SRMMU_REF
);
3052 BTFIXUPSET_HALF(pte_wrprotecti
, SRMMU_WRITE
);
3053 BTFIXUPSET_HALF(pte_mkcleani
, SRMMU_DIRTY
);
3054 BTFIXUPSET_HALF(pte_mkoldi
, SRMMU_REF
);
3055 BTFIXUPSET_CALL(pte_mkwrite
, srmmu_pte_mkwrite
, BTFIXUPCALL_ORINT(SRMMU_WRITE
));
3056 BTFIXUPSET_CALL(pte_mkdirty
, srmmu_pte_mkdirty
, BTFIXUPCALL_ORINT(SRMMU_DIRTY
));
3057 BTFIXUPSET_CALL(pte_mkyoung
, srmmu_pte_mkyoung
, BTFIXUPCALL_ORINT(SRMMU_REF
));
3058 BTFIXUPSET_CALL(update_mmu_cache
, srmmu_update_mmu_cache
, BTFIXUPCALL_NOP
);
3059 BTFIXUPSET_CALL(destroy_context
, srmmu_destroy_context
, BTFIXUPCALL_NORM
);
3061 BTFIXUPSET_CALL(mmu_info
, srmmu_mmu_info
, BTFIXUPCALL_NORM
);
3062 BTFIXUPSET_CALL(mmu_v2p
, srmmu_v2p
, BTFIXUPCALL_NORM
);
3063 BTFIXUPSET_CALL(mmu_p2v
, srmmu_p2v
, BTFIXUPCALL_NORM
);
3065 /* Task struct and kernel stack allocating/freeing. */
3066 BTFIXUPSET_CALL(alloc_task_struct
, srmmu_alloc_task_struct
, BTFIXUPCALL_NORM
);
3067 BTFIXUPSET_CALL(free_task_struct
, srmmu_free_task_struct
, BTFIXUPCALL_NORM
);
3069 BTFIXUPSET_CALL(quick_kernel_fault
, srmmu_quick_kernel_fault
, BTFIXUPCALL_NORM
);
3071 /* SRMMU specific. */
3072 BTFIXUPSET_CALL(ctxd_set
, srmmu_ctxd_set
, BTFIXUPCALL_NORM
);
3073 BTFIXUPSET_CALL(pmd_set
, srmmu_pmd_set
, BTFIXUPCALL_NORM
);
3076 patch_window_trap_handlers();
3079 /* El switcheroo... */
3081 BTFIXUPCOPY_CALL(local_flush_cache_all
, flush_cache_all
);
3082 BTFIXUPCOPY_CALL(local_flush_cache_mm
, flush_cache_mm
);
3083 BTFIXUPCOPY_CALL(local_flush_cache_range
, flush_cache_range
);
3084 BTFIXUPCOPY_CALL(local_flush_cache_page
, flush_cache_page
);
3085 BTFIXUPCOPY_CALL(local_flush_tlb_all
, flush_tlb_all
);
3086 BTFIXUPCOPY_CALL(local_flush_tlb_mm
, flush_tlb_mm
);
3087 BTFIXUPCOPY_CALL(local_flush_tlb_range
, flush_tlb_range
);
3088 BTFIXUPCOPY_CALL(local_flush_tlb_page
, flush_tlb_page
);
3089 BTFIXUPCOPY_CALL(local_flush_page_to_ram
, flush_page_to_ram
);
3090 BTFIXUPCOPY_CALL(local_flush_sig_insns
, flush_sig_insns
);
3091 BTFIXUPCOPY_CALL(local_flush_page_for_dma
, flush_page_for_dma
);
3093 BTFIXUPSET_CALL(flush_cache_all
, smp_flush_cache_all
, BTFIXUPCALL_NORM
);
3094 BTFIXUPSET_CALL(flush_cache_mm
, smp_flush_cache_mm
, BTFIXUPCALL_NORM
);
3095 BTFIXUPSET_CALL(flush_cache_range
, smp_flush_cache_range
, BTFIXUPCALL_NORM
);
3096 BTFIXUPSET_CALL(flush_cache_page
, smp_flush_cache_page
, BTFIXUPCALL_NORM
);
3097 if (sparc_cpu_model
!= sun4d
) {
3098 BTFIXUPSET_CALL(flush_tlb_all
, smp_flush_tlb_all
, BTFIXUPCALL_NORM
);
3099 BTFIXUPSET_CALL(flush_tlb_mm
, smp_flush_tlb_mm
, BTFIXUPCALL_NORM
);
3100 BTFIXUPSET_CALL(flush_tlb_range
, smp_flush_tlb_range
, BTFIXUPCALL_NORM
);
3101 BTFIXUPSET_CALL(flush_tlb_page
, smp_flush_tlb_page
, BTFIXUPCALL_NORM
);
3103 BTFIXUPSET_CALL(flush_page_to_ram
, smp_flush_page_to_ram
, BTFIXUPCALL_NORM
);
3104 BTFIXUPSET_CALL(flush_sig_insns
, smp_flush_sig_insns
, BTFIXUPCALL_NORM
);
3105 BTFIXUPSET_CALL(flush_page_for_dma
, smp_flush_page_for_dma
, BTFIXUPCALL_NORM
);
3107 if (sparc_cpu_model
== sun4d
)
3112 if (sparc_cpu_model
== sun4d
)