2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
11 #include <linux/seq_file.h>
12 #include <linux/spinlock.h>
13 #include <linux/bootmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/vmalloc.h>
16 #include <linux/kdebug.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/gfp.h>
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/io-unit.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/bitext.h>
31 #include <asm/vaddrs.h>
32 #include <asm/cache.h>
33 #include <asm/traps.h>
34 #include <asm/oplib.h>
42 /* Now the cpu specific definitions. */
43 #include <asm/turbosparc.h>
44 #include <asm/tsunami.h>
45 #include <asm/viking.h>
46 #include <asm/swift.h>
53 enum mbus_module srmmu_modtype
;
54 static unsigned int hwbug_bitmask
;
58 extern struct resource sparc_iomap
;
60 extern unsigned long last_valid_pfn
;
62 static pgd_t
*srmmu_swapper_pg_dir
;
64 const struct sparc32_cachetlb_ops
*sparc32_cachetlb_ops
;
67 const struct sparc32_cachetlb_ops
*local_ops
;
69 #define FLUSH_BEGIN(mm)
72 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
76 int flush_page_for_dma_global
= 1;
80 ctxd_t
*srmmu_ctx_table_phys
;
81 static ctxd_t
*srmmu_context_table
;
83 int viking_mxcc_present
;
84 static DEFINE_SPINLOCK(srmmu_context_spinlock
);
86 static int is_hypersparc
;
88 static int srmmu_cache_pagetables
;
90 /* these will be initialized in srmmu_nocache_calcsize() */
91 static unsigned long srmmu_nocache_size
;
92 static unsigned long srmmu_nocache_end
;
94 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
95 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
97 /* The context table is a nocache user with the biggest alignment needs. */
98 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
100 void *srmmu_nocache_pool
;
101 void *srmmu_nocache_bitmap
;
102 static struct bit_map srmmu_nocache_map
;
104 static inline int srmmu_pmd_none(pmd_t pmd
)
105 { return !(pmd_val(pmd
) & 0xFFFFFFF); }
107 /* XXX should we hyper_flush_whole_icache here - Anton */
108 static inline void srmmu_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
109 { set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (__nocache_pa((unsigned long) pgdp
) >> 4))); }
111 void pmd_set(pmd_t
*pmdp
, pte_t
*ptep
)
113 unsigned long ptp
; /* Physical address, shifted right by 4 */
116 ptp
= __nocache_pa((unsigned long) ptep
) >> 4;
117 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++) {
118 set_pte((pte_t
*)&pmdp
->pmdv
[i
], SRMMU_ET_PTD
| ptp
);
119 ptp
+= (SRMMU_REAL_PTRS_PER_PTE
*sizeof(pte_t
) >> 4);
123 void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmdp
, struct page
*ptep
)
125 unsigned long ptp
; /* Physical address, shifted right by 4 */
128 ptp
= page_to_pfn(ptep
) << (PAGE_SHIFT
-4); /* watch for overflow */
129 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++) {
130 set_pte((pte_t
*)&pmdp
->pmdv
[i
], SRMMU_ET_PTD
| ptp
);
131 ptp
+= (SRMMU_REAL_PTRS_PER_PTE
*sizeof(pte_t
) >> 4);
135 /* Find an entry in the third-level page table.. */
136 pte_t
*pte_offset_kernel(pmd_t
*dir
, unsigned long address
)
140 pte
= __nocache_va((dir
->pmdv
[0] & SRMMU_PTD_PMASK
) << 4);
141 return (pte_t
*) pte
+
142 ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
146 * size: bytes to allocate in the nocache area.
147 * align: bytes, number to align at.
148 * Returns the virtual address of the allocated area.
150 static void *__srmmu_get_nocache(int size
, int align
)
155 if (size
< SRMMU_NOCACHE_BITMAP_SHIFT
) {
156 printk(KERN_ERR
"Size 0x%x too small for nocache request\n",
158 size
= SRMMU_NOCACHE_BITMAP_SHIFT
;
160 if (size
& (SRMMU_NOCACHE_BITMAP_SHIFT
- 1)) {
161 printk(KERN_ERR
"Size 0x%x unaligned int nocache request\n",
163 size
+= SRMMU_NOCACHE_BITMAP_SHIFT
- 1;
165 BUG_ON(align
> SRMMU_NOCACHE_ALIGN_MAX
);
167 offset
= bit_map_string_get(&srmmu_nocache_map
,
168 size
>> SRMMU_NOCACHE_BITMAP_SHIFT
,
169 align
>> SRMMU_NOCACHE_BITMAP_SHIFT
);
171 printk(KERN_ERR
"srmmu: out of nocache %d: %d/%d\n",
172 size
, (int) srmmu_nocache_size
,
173 srmmu_nocache_map
.used
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
177 addr
= SRMMU_NOCACHE_VADDR
+ (offset
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
181 void *srmmu_get_nocache(int size
, int align
)
185 tmp
= __srmmu_get_nocache(size
, align
);
188 memset(tmp
, 0, size
);
193 void srmmu_free_nocache(void *addr
, int size
)
198 vaddr
= (unsigned long)addr
;
199 if (vaddr
< SRMMU_NOCACHE_VADDR
) {
200 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
201 vaddr
, (unsigned long)SRMMU_NOCACHE_VADDR
);
204 if (vaddr
+ size
> srmmu_nocache_end
) {
205 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
206 vaddr
, srmmu_nocache_end
);
209 if (!is_power_of_2(size
)) {
210 printk("Size 0x%x is not a power of 2\n", size
);
213 if (size
< SRMMU_NOCACHE_BITMAP_SHIFT
) {
214 printk("Size 0x%x is too small\n", size
);
217 if (vaddr
& (size
- 1)) {
218 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr
, size
);
222 offset
= (vaddr
- SRMMU_NOCACHE_VADDR
) >> SRMMU_NOCACHE_BITMAP_SHIFT
;
223 size
= size
>> SRMMU_NOCACHE_BITMAP_SHIFT
;
225 bit_map_clear(&srmmu_nocache_map
, offset
, size
);
228 static void srmmu_early_allocate_ptable_skeleton(unsigned long start
,
231 /* Return how much physical memory we have. */
232 static unsigned long __init
probe_memory(void)
234 unsigned long total
= 0;
237 for (i
= 0; sp_banks
[i
].num_bytes
; i
++)
238 total
+= sp_banks
[i
].num_bytes
;
244 * Reserve nocache dynamically proportionally to the amount of
245 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
247 static void __init
srmmu_nocache_calcsize(void)
249 unsigned long sysmemavail
= probe_memory() / 1024;
250 int srmmu_nocache_npages
;
252 srmmu_nocache_npages
=
253 sysmemavail
/ SRMMU_NOCACHE_ALCRATIO
/ 1024 * 256;
255 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
256 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
257 if (srmmu_nocache_npages
< SRMMU_MIN_NOCACHE_PAGES
)
258 srmmu_nocache_npages
= SRMMU_MIN_NOCACHE_PAGES
;
260 /* anything above 1280 blows up */
261 if (srmmu_nocache_npages
> SRMMU_MAX_NOCACHE_PAGES
)
262 srmmu_nocache_npages
= SRMMU_MAX_NOCACHE_PAGES
;
264 srmmu_nocache_size
= srmmu_nocache_npages
* PAGE_SIZE
;
265 srmmu_nocache_end
= SRMMU_NOCACHE_VADDR
+ srmmu_nocache_size
;
268 static void __init
srmmu_nocache_init(void)
270 unsigned int bitmap_bits
;
274 unsigned long paddr
, vaddr
;
275 unsigned long pteval
;
277 bitmap_bits
= srmmu_nocache_size
>> SRMMU_NOCACHE_BITMAP_SHIFT
;
279 srmmu_nocache_pool
= __alloc_bootmem(srmmu_nocache_size
,
280 SRMMU_NOCACHE_ALIGN_MAX
, 0UL);
281 memset(srmmu_nocache_pool
, 0, srmmu_nocache_size
);
283 srmmu_nocache_bitmap
=
284 __alloc_bootmem(BITS_TO_LONGS(bitmap_bits
) * sizeof(long),
285 SMP_CACHE_BYTES
, 0UL);
286 bit_map_init(&srmmu_nocache_map
, srmmu_nocache_bitmap
, bitmap_bits
);
288 srmmu_swapper_pg_dir
= __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE
, SRMMU_PGD_TABLE_SIZE
);
289 memset(__nocache_fix(srmmu_swapper_pg_dir
), 0, SRMMU_PGD_TABLE_SIZE
);
290 init_mm
.pgd
= srmmu_swapper_pg_dir
;
292 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR
, srmmu_nocache_end
);
294 paddr
= __pa((unsigned long)srmmu_nocache_pool
);
295 vaddr
= SRMMU_NOCACHE_VADDR
;
297 while (vaddr
< srmmu_nocache_end
) {
298 pgd
= pgd_offset_k(vaddr
);
299 pmd
= pmd_offset(__nocache_fix(pgd
), vaddr
);
300 pte
= pte_offset_kernel(__nocache_fix(pmd
), vaddr
);
302 pteval
= ((paddr
>> 4) | SRMMU_ET_PTE
| SRMMU_PRIV
);
304 if (srmmu_cache_pagetables
)
305 pteval
|= SRMMU_CACHE
;
307 set_pte(__nocache_fix(pte
), __pte(pteval
));
317 pgd_t
*get_pgd_fast(void)
321 pgd
= __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE
, SRMMU_PGD_TABLE_SIZE
);
323 pgd_t
*init
= pgd_offset_k(0);
324 memset(pgd
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
325 memcpy(pgd
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
326 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
333 * Hardware needs alignment to 256 only, but we align to whole page size
334 * to reduce fragmentation problems due to the buddy principle.
335 * XXX Provide actual fragmentation statistics in /proc.
337 * Alignments up to the page size are the same for physical and virtual
338 * addresses of the nocache area.
340 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
345 if ((pte
= (unsigned long)pte_alloc_one_kernel(mm
, address
)) == 0)
347 page
= pfn_to_page(__nocache_pa(pte
) >> PAGE_SHIFT
);
348 pgtable_page_ctor(page
);
352 void pte_free(struct mm_struct
*mm
, pgtable_t pte
)
356 pgtable_page_dtor(pte
);
357 p
= (unsigned long)page_address(pte
); /* Cached address (for test) */
360 p
= page_to_pfn(pte
) << PAGE_SHIFT
; /* Physical address */
362 /* free non cached virtual address*/
363 srmmu_free_nocache(__nocache_va(p
), PTE_SIZE
);
366 /* context handling - a dynamically sized pool is used */
367 #define NO_CONTEXT -1
370 struct ctx_list
*next
;
371 struct ctx_list
*prev
;
372 unsigned int ctx_number
;
373 struct mm_struct
*ctx_mm
;
376 static struct ctx_list
*ctx_list_pool
;
377 static struct ctx_list ctx_free
;
378 static struct ctx_list ctx_used
;
380 /* At boot time we determine the number of contexts */
381 static int num_contexts
;
383 static inline void remove_from_ctx_list(struct ctx_list
*entry
)
385 entry
->next
->prev
= entry
->prev
;
386 entry
->prev
->next
= entry
->next
;
389 static inline void add_to_ctx_list(struct ctx_list
*head
, struct ctx_list
*entry
)
392 (entry
->prev
= head
->prev
)->next
= entry
;
395 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
396 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
399 static inline void alloc_context(struct mm_struct
*old_mm
, struct mm_struct
*mm
)
401 struct ctx_list
*ctxp
;
403 ctxp
= ctx_free
.next
;
404 if (ctxp
!= &ctx_free
) {
405 remove_from_ctx_list(ctxp
);
406 add_to_used_ctxlist(ctxp
);
407 mm
->context
= ctxp
->ctx_number
;
411 ctxp
= ctx_used
.next
;
412 if (ctxp
->ctx_mm
== old_mm
)
414 if (ctxp
== &ctx_used
)
415 panic("out of mmu contexts");
416 flush_cache_mm(ctxp
->ctx_mm
);
417 flush_tlb_mm(ctxp
->ctx_mm
);
418 remove_from_ctx_list(ctxp
);
419 add_to_used_ctxlist(ctxp
);
420 ctxp
->ctx_mm
->context
= NO_CONTEXT
;
422 mm
->context
= ctxp
->ctx_number
;
425 static inline void free_context(int context
)
427 struct ctx_list
*ctx_old
;
429 ctx_old
= ctx_list_pool
+ context
;
430 remove_from_ctx_list(ctx_old
);
431 add_to_free_ctxlist(ctx_old
);
434 static void __init
sparc_context_init(int numctx
)
439 size
= numctx
* sizeof(struct ctx_list
);
440 ctx_list_pool
= __alloc_bootmem(size
, SMP_CACHE_BYTES
, 0UL);
442 for (ctx
= 0; ctx
< numctx
; ctx
++) {
443 struct ctx_list
*clist
;
445 clist
= (ctx_list_pool
+ ctx
);
446 clist
->ctx_number
= ctx
;
447 clist
->ctx_mm
= NULL
;
449 ctx_free
.next
= ctx_free
.prev
= &ctx_free
;
450 ctx_used
.next
= ctx_used
.prev
= &ctx_used
;
451 for (ctx
= 0; ctx
< numctx
; ctx
++)
452 add_to_free_ctxlist(ctx_list_pool
+ ctx
);
455 void switch_mm(struct mm_struct
*old_mm
, struct mm_struct
*mm
,
456 struct task_struct
*tsk
)
458 if (mm
->context
== NO_CONTEXT
) {
459 spin_lock(&srmmu_context_spinlock
);
460 alloc_context(old_mm
, mm
);
461 spin_unlock(&srmmu_context_spinlock
);
462 srmmu_ctxd_set(&srmmu_context_table
[mm
->context
], mm
->pgd
);
465 if (sparc_cpu_model
== sparc_leon
)
469 hyper_flush_whole_icache();
471 srmmu_set_context(mm
->context
);
474 /* Low level IO area allocation on the SRMMU. */
475 static inline void srmmu_mapioaddr(unsigned long physaddr
,
476 unsigned long virt_addr
, int bus_type
)
483 physaddr
&= PAGE_MASK
;
484 pgdp
= pgd_offset_k(virt_addr
);
485 pmdp
= pmd_offset(pgdp
, virt_addr
);
486 ptep
= pte_offset_kernel(pmdp
, virt_addr
);
487 tmp
= (physaddr
>> 4) | SRMMU_ET_PTE
;
489 /* I need to test whether this is consistent over all
490 * sun4m's. The bus_type represents the upper 4 bits of
491 * 36-bit physical address on the I/O space lines...
493 tmp
|= (bus_type
<< 28);
495 __flush_page_to_ram(virt_addr
);
496 set_pte(ptep
, __pte(tmp
));
499 void srmmu_mapiorange(unsigned int bus
, unsigned long xpa
,
500 unsigned long xva
, unsigned int len
)
504 srmmu_mapioaddr(xpa
, xva
, bus
);
511 static inline void srmmu_unmapioaddr(unsigned long virt_addr
)
517 pgdp
= pgd_offset_k(virt_addr
);
518 pmdp
= pmd_offset(pgdp
, virt_addr
);
519 ptep
= pte_offset_kernel(pmdp
, virt_addr
);
521 /* No need to flush uncacheable page. */
525 void srmmu_unmapiorange(unsigned long virt_addr
, unsigned int len
)
529 srmmu_unmapioaddr(virt_addr
);
530 virt_addr
+= PAGE_SIZE
;
536 extern void tsunami_flush_cache_all(void);
537 extern void tsunami_flush_cache_mm(struct mm_struct
*mm
);
538 extern void tsunami_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
539 extern void tsunami_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
540 extern void tsunami_flush_page_to_ram(unsigned long page
);
541 extern void tsunami_flush_page_for_dma(unsigned long page
);
542 extern void tsunami_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
543 extern void tsunami_flush_tlb_all(void);
544 extern void tsunami_flush_tlb_mm(struct mm_struct
*mm
);
545 extern void tsunami_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
546 extern void tsunami_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
547 extern void tsunami_setup_blockops(void);
550 extern void swift_flush_cache_all(void);
551 extern void swift_flush_cache_mm(struct mm_struct
*mm
);
552 extern void swift_flush_cache_range(struct vm_area_struct
*vma
,
553 unsigned long start
, unsigned long end
);
554 extern void swift_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
555 extern void swift_flush_page_to_ram(unsigned long page
);
556 extern void swift_flush_page_for_dma(unsigned long page
);
557 extern void swift_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
558 extern void swift_flush_tlb_all(void);
559 extern void swift_flush_tlb_mm(struct mm_struct
*mm
);
560 extern void swift_flush_tlb_range(struct vm_area_struct
*vma
,
561 unsigned long start
, unsigned long end
);
562 extern void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
564 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
565 void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
570 if ((ctx1
= vma
->vm_mm
->context
) != -1) {
571 cctx
= srmmu_get_context();
572 /* Is context # ever different from current context? P3 */
574 printk("flush ctx %02x curr %02x\n", ctx1
, cctx
);
575 srmmu_set_context(ctx1
);
576 swift_flush_page(page
);
577 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : :
578 "r" (page
), "i" (ASI_M_FLUSH_PROBE
));
579 srmmu_set_context(cctx
);
581 /* Rm. prot. bits from virt. c. */
582 /* swift_flush_cache_all(); */
583 /* swift_flush_cache_page(vma, page); */
584 swift_flush_page(page
);
586 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : :
587 "r" (page
), "i" (ASI_M_FLUSH_PROBE
));
588 /* same as above: srmmu_flush_tlb_page() */
595 * The following are all MBUS based SRMMU modules, and therefore could
596 * be found in a multiprocessor configuration. On the whole, these
597 * chips seems to be much more touchy about DVMA and page tables
598 * with respect to cache coherency.
602 extern void viking_flush_cache_all(void);
603 extern void viking_flush_cache_mm(struct mm_struct
*mm
);
604 extern void viking_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
606 extern void viking_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
607 extern void viking_flush_page_to_ram(unsigned long page
);
608 extern void viking_flush_page_for_dma(unsigned long page
);
609 extern void viking_flush_sig_insns(struct mm_struct
*mm
, unsigned long addr
);
610 extern void viking_flush_page(unsigned long page
);
611 extern void viking_mxcc_flush_page(unsigned long page
);
612 extern void viking_flush_tlb_all(void);
613 extern void viking_flush_tlb_mm(struct mm_struct
*mm
);
614 extern void viking_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
616 extern void viking_flush_tlb_page(struct vm_area_struct
*vma
,
618 extern void sun4dsmp_flush_tlb_all(void);
619 extern void sun4dsmp_flush_tlb_mm(struct mm_struct
*mm
);
620 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
622 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct
*vma
,
626 extern void hypersparc_flush_cache_all(void);
627 extern void hypersparc_flush_cache_mm(struct mm_struct
*mm
);
628 extern void hypersparc_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
629 extern void hypersparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
630 extern void hypersparc_flush_page_to_ram(unsigned long page
);
631 extern void hypersparc_flush_page_for_dma(unsigned long page
);
632 extern void hypersparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
633 extern void hypersparc_flush_tlb_all(void);
634 extern void hypersparc_flush_tlb_mm(struct mm_struct
*mm
);
635 extern void hypersparc_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
636 extern void hypersparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
637 extern void hypersparc_setup_blockops(void);
640 * NOTE: All of this startup code assumes the low 16mb (approx.) of
641 * kernel mappings are done with one single contiguous chunk of
642 * ram. On small ram machines (classics mainly) we only get
643 * around 8mb mapped for us.
646 static void __init
early_pgtable_allocfail(char *type
)
648 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type
);
652 static void __init
srmmu_early_allocate_ptable_skeleton(unsigned long start
,
659 while (start
< end
) {
660 pgdp
= pgd_offset_k(start
);
661 if (pgd_none(*(pgd_t
*)__nocache_fix(pgdp
))) {
662 pmdp
= __srmmu_get_nocache(
663 SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
665 early_pgtable_allocfail("pmd");
666 memset(__nocache_fix(pmdp
), 0, SRMMU_PMD_TABLE_SIZE
);
667 pgd_set(__nocache_fix(pgdp
), pmdp
);
669 pmdp
= pmd_offset(__nocache_fix(pgdp
), start
);
670 if (srmmu_pmd_none(*(pmd_t
*)__nocache_fix(pmdp
))) {
671 ptep
= __srmmu_get_nocache(PTE_SIZE
, PTE_SIZE
);
673 early_pgtable_allocfail("pte");
674 memset(__nocache_fix(ptep
), 0, PTE_SIZE
);
675 pmd_set(__nocache_fix(pmdp
), ptep
);
677 if (start
> (0xffffffffUL
- PMD_SIZE
))
679 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
683 static void __init
srmmu_allocate_ptable_skeleton(unsigned long start
,
690 while (start
< end
) {
691 pgdp
= pgd_offset_k(start
);
692 if (pgd_none(*pgdp
)) {
693 pmdp
= __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
695 early_pgtable_allocfail("pmd");
696 memset(pmdp
, 0, SRMMU_PMD_TABLE_SIZE
);
699 pmdp
= pmd_offset(pgdp
, start
);
700 if (srmmu_pmd_none(*pmdp
)) {
701 ptep
= __srmmu_get_nocache(PTE_SIZE
,
704 early_pgtable_allocfail("pte");
705 memset(ptep
, 0, PTE_SIZE
);
708 if (start
> (0xffffffffUL
- PMD_SIZE
))
710 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
714 /* These flush types are not available on all chips... */
715 static inline unsigned long srmmu_probe(unsigned long vaddr
)
717 unsigned long retval
;
719 if (sparc_cpu_model
!= sparc_leon
) {
722 __asm__
__volatile__("lda [%1] %2, %0\n\t" :
724 "r" (vaddr
| 0x400), "i" (ASI_M_FLUSH_PROBE
));
726 retval
= leon_swprobe(vaddr
, 0);
732 * This is much cleaner than poking around physical address space
733 * looking at the prom's page table directly which is what most
734 * other OS's do. Yuck... this is much better.
736 static void __init
srmmu_inherit_prom_mappings(unsigned long start
,
739 unsigned long probed
;
744 int what
; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
746 while (start
<= end
) {
748 break; /* probably wrap around */
749 if (start
== 0xfef00000)
750 start
= KADB_DEBUGGER_BEGVM
;
751 probed
= srmmu_probe(start
);
753 /* continue probing until we find an entry */
758 /* A red snapper, see what it really is. */
760 addr
= start
- PAGE_SIZE
;
762 if (!(start
& ~(SRMMU_REAL_PMD_MASK
))) {
763 if (srmmu_probe(addr
+ SRMMU_REAL_PMD_SIZE
) == probed
)
767 if (!(start
& ~(SRMMU_PGDIR_MASK
))) {
768 if (srmmu_probe(addr
+ SRMMU_PGDIR_SIZE
) == probed
)
772 pgdp
= pgd_offset_k(start
);
774 *(pgd_t
*)__nocache_fix(pgdp
) = __pgd(probed
);
775 start
+= SRMMU_PGDIR_SIZE
;
778 if (pgd_none(*(pgd_t
*)__nocache_fix(pgdp
))) {
779 pmdp
= __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE
,
780 SRMMU_PMD_TABLE_SIZE
);
782 early_pgtable_allocfail("pmd");
783 memset(__nocache_fix(pmdp
), 0, SRMMU_PMD_TABLE_SIZE
);
784 pgd_set(__nocache_fix(pgdp
), pmdp
);
786 pmdp
= pmd_offset(__nocache_fix(pgdp
), start
);
787 if (srmmu_pmd_none(*(pmd_t
*)__nocache_fix(pmdp
))) {
788 ptep
= __srmmu_get_nocache(PTE_SIZE
, PTE_SIZE
);
790 early_pgtable_allocfail("pte");
791 memset(__nocache_fix(ptep
), 0, PTE_SIZE
);
792 pmd_set(__nocache_fix(pmdp
), ptep
);
795 /* We bend the rule where all 16 PTPs in a pmd_t point
796 * inside the same PTE page, and we leak a perfectly
797 * good hardware PTE piece. Alternatives seem worse.
799 unsigned int x
; /* Index of HW PMD in soft cluster */
801 x
= (start
>> PMD_SHIFT
) & 15;
802 val
= &pmdp
->pmdv
[x
];
803 *(unsigned long *)__nocache_fix(val
) = probed
;
804 start
+= SRMMU_REAL_PMD_SIZE
;
807 ptep
= pte_offset_kernel(__nocache_fix(pmdp
), start
);
808 *(pte_t
*)__nocache_fix(ptep
) = __pte(probed
);
813 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
815 /* Create a third-level SRMMU 16MB page mapping. */
816 static void __init
do_large_mapping(unsigned long vaddr
, unsigned long phys_base
)
818 pgd_t
*pgdp
= pgd_offset_k(vaddr
);
819 unsigned long big_pte
;
821 big_pte
= KERNEL_PTE(phys_base
>> 4);
822 *(pgd_t
*)__nocache_fix(pgdp
) = __pgd(big_pte
);
825 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
826 static unsigned long __init
map_spbank(unsigned long vbase
, int sp_entry
)
828 unsigned long pstart
= (sp_banks
[sp_entry
].base_addr
& SRMMU_PGDIR_MASK
);
829 unsigned long vstart
= (vbase
& SRMMU_PGDIR_MASK
);
830 unsigned long vend
= SRMMU_PGDIR_ALIGN(vbase
+ sp_banks
[sp_entry
].num_bytes
);
831 /* Map "low" memory only */
832 const unsigned long min_vaddr
= PAGE_OFFSET
;
833 const unsigned long max_vaddr
= PAGE_OFFSET
+ SRMMU_MAXMEM
;
835 if (vstart
< min_vaddr
|| vstart
>= max_vaddr
)
838 if (vend
> max_vaddr
|| vend
< min_vaddr
)
841 while (vstart
< vend
) {
842 do_large_mapping(vstart
, pstart
);
843 vstart
+= SRMMU_PGDIR_SIZE
; pstart
+= SRMMU_PGDIR_SIZE
;
848 static void __init
map_kernel(void)
853 do_large_mapping(PAGE_OFFSET
, phys_base
);
856 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
857 map_spbank((unsigned long)__va(sp_banks
[i
].base_addr
), i
);
861 void (*poke_srmmu
)(void) = NULL
;
863 extern unsigned long bootmem_init(unsigned long *pages_avail
);
865 void __init
srmmu_paging_init(void)
873 unsigned long pages_avail
;
875 init_mm
.context
= (unsigned long) NO_CONTEXT
;
876 sparc_iomap
.start
= SUN4M_IOBASE_VADDR
; /* 16MB of IOSPACE on all sun4m's. */
878 if (sparc_cpu_model
== sun4d
)
879 num_contexts
= 65536; /* We know it is Viking */
881 /* Find the number of contexts on the srmmu. */
882 cpunode
= prom_getchild(prom_root_node
);
884 while (cpunode
!= 0) {
885 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
886 if (!strcmp(node_str
, "cpu")) {
887 num_contexts
= prom_getintdefault(cpunode
, "mmu-nctx", 0x8);
890 cpunode
= prom_getsibling(cpunode
);
895 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
900 last_valid_pfn
= bootmem_init(&pages_avail
);
902 srmmu_nocache_calcsize();
903 srmmu_nocache_init();
904 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM
- PAGE_SIZE
));
907 /* ctx table has to be physically aligned to its size */
908 srmmu_context_table
= __srmmu_get_nocache(num_contexts
* sizeof(ctxd_t
), num_contexts
* sizeof(ctxd_t
));
909 srmmu_ctx_table_phys
= (ctxd_t
*)__nocache_pa((unsigned long)srmmu_context_table
);
911 for (i
= 0; i
< num_contexts
; i
++)
912 srmmu_ctxd_set((ctxd_t
*)__nocache_fix(&srmmu_context_table
[i
]), srmmu_swapper_pg_dir
);
915 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys
);
917 /* Stop from hanging here... */
918 local_ops
->tlb_all();
924 srmmu_allocate_ptable_skeleton(sparc_iomap
.start
, IOBASE_END
);
925 srmmu_allocate_ptable_skeleton(DVMA_VADDR
, DVMA_END
);
927 srmmu_allocate_ptable_skeleton(
928 __fix_to_virt(__end_of_fixed_addresses
- 1), FIXADDR_TOP
);
929 srmmu_allocate_ptable_skeleton(PKMAP_BASE
, PKMAP_END
);
931 pgd
= pgd_offset_k(PKMAP_BASE
);
932 pmd
= pmd_offset(pgd
, PKMAP_BASE
);
933 pte
= pte_offset_kernel(pmd
, PKMAP_BASE
);
934 pkmap_page_table
= pte
;
939 sparc_context_init(num_contexts
);
944 unsigned long zones_size
[MAX_NR_ZONES
];
945 unsigned long zholes_size
[MAX_NR_ZONES
];
946 unsigned long npages
;
949 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
950 zones_size
[znum
] = zholes_size
[znum
] = 0;
952 npages
= max_low_pfn
- pfn_base
;
954 zones_size
[ZONE_DMA
] = npages
;
955 zholes_size
[ZONE_DMA
] = npages
- pages_avail
;
957 npages
= highend_pfn
- max_low_pfn
;
958 zones_size
[ZONE_HIGHMEM
] = npages
;
959 zholes_size
[ZONE_HIGHMEM
] = npages
- calc_highpages();
961 free_area_init_node(0, zones_size
, pfn_base
, zholes_size
);
965 void mmu_info(struct seq_file
*m
)
970 "nocache total\t: %ld\n"
971 "nocache used\t: %d\n",
975 srmmu_nocache_map
.used
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
978 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
980 mm
->context
= NO_CONTEXT
;
984 void destroy_context(struct mm_struct
*mm
)
987 if (mm
->context
!= NO_CONTEXT
) {
989 srmmu_ctxd_set(&srmmu_context_table
[mm
->context
], srmmu_swapper_pg_dir
);
991 spin_lock(&srmmu_context_spinlock
);
992 free_context(mm
->context
);
993 spin_unlock(&srmmu_context_spinlock
);
994 mm
->context
= NO_CONTEXT
;
998 /* Init various srmmu chip types. */
999 static void __init
srmmu_is_bad(void)
1001 prom_printf("Could not determine SRMMU chip type.\n");
1005 static void __init
init_vac_layout(void)
1012 unsigned long max_size
= 0;
1013 unsigned long min_line_size
= 0x10000000;
1016 nd
= prom_getchild(prom_root_node
);
1017 while ((nd
= prom_getsibling(nd
)) != 0) {
1018 prom_getstring(nd
, "device_type", node_str
, sizeof(node_str
));
1019 if (!strcmp(node_str
, "cpu")) {
1020 vac_line_size
= prom_getint(nd
, "cache-line-size");
1021 if (vac_line_size
== -1) {
1022 prom_printf("can't determine cache-line-size, halting.\n");
1025 cache_lines
= prom_getint(nd
, "cache-nlines");
1026 if (cache_lines
== -1) {
1027 prom_printf("can't determine cache-nlines, halting.\n");
1031 vac_cache_size
= cache_lines
* vac_line_size
;
1033 if (vac_cache_size
> max_size
)
1034 max_size
= vac_cache_size
;
1035 if (vac_line_size
< min_line_size
)
1036 min_line_size
= vac_line_size
;
1037 //FIXME: cpus not contiguous!!
1039 if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
))
1047 prom_printf("No CPU nodes found, halting.\n");
1051 vac_cache_size
= max_size
;
1052 vac_line_size
= min_line_size
;
1054 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1055 (int)vac_cache_size
, (int)vac_line_size
);
1058 static void poke_hypersparc(void)
1060 volatile unsigned long clear
;
1061 unsigned long mreg
= srmmu_get_mmureg();
1063 hyper_flush_unconditional_combined();
1065 mreg
&= ~(HYPERSPARC_CWENABLE
);
1066 mreg
|= (HYPERSPARC_CENABLE
| HYPERSPARC_WBENABLE
);
1067 mreg
|= (HYPERSPARC_CMODE
);
1069 srmmu_set_mmureg(mreg
);
1071 #if 0 /* XXX I think this is bad news... -DaveM */
1072 hyper_clear_all_tags();
1075 put_ross_icr(HYPERSPARC_ICCR_FTD
| HYPERSPARC_ICCR_ICE
);
1076 hyper_flush_whole_icache();
1077 clear
= srmmu_get_faddr();
1078 clear
= srmmu_get_fstatus();
1081 static const struct sparc32_cachetlb_ops hypersparc_ops
= {
1082 .cache_all
= hypersparc_flush_cache_all
,
1083 .cache_mm
= hypersparc_flush_cache_mm
,
1084 .cache_page
= hypersparc_flush_cache_page
,
1085 .cache_range
= hypersparc_flush_cache_range
,
1086 .tlb_all
= hypersparc_flush_tlb_all
,
1087 .tlb_mm
= hypersparc_flush_tlb_mm
,
1088 .tlb_page
= hypersparc_flush_tlb_page
,
1089 .tlb_range
= hypersparc_flush_tlb_range
,
1090 .page_to_ram
= hypersparc_flush_page_to_ram
,
1091 .sig_insns
= hypersparc_flush_sig_insns
,
1092 .page_for_dma
= hypersparc_flush_page_for_dma
,
1095 static void __init
init_hypersparc(void)
1097 srmmu_name
= "ROSS HyperSparc";
1098 srmmu_modtype
= HyperSparc
;
1103 sparc32_cachetlb_ops
= &hypersparc_ops
;
1105 poke_srmmu
= poke_hypersparc
;
1107 hypersparc_setup_blockops();
1110 static void poke_swift(void)
1114 /* Clear any crap from the cache or else... */
1115 swift_flush_cache_all();
1117 /* Enable I & D caches */
1118 mreg
= srmmu_get_mmureg();
1119 mreg
|= (SWIFT_IE
| SWIFT_DE
);
1121 * The Swift branch folding logic is completely broken. At
1122 * trap time, if things are just right, if can mistakenly
1123 * think that a trap is coming from kernel mode when in fact
1124 * it is coming from user mode (it mis-executes the branch in
1125 * the trap code). So you see things like crashme completely
1126 * hosing your machine which is completely unacceptable. Turn
1127 * this shit off... nice job Fujitsu.
1129 mreg
&= ~(SWIFT_BF
);
1130 srmmu_set_mmureg(mreg
);
1133 static const struct sparc32_cachetlb_ops swift_ops
= {
1134 .cache_all
= swift_flush_cache_all
,
1135 .cache_mm
= swift_flush_cache_mm
,
1136 .cache_page
= swift_flush_cache_page
,
1137 .cache_range
= swift_flush_cache_range
,
1138 .tlb_all
= swift_flush_tlb_all
,
1139 .tlb_mm
= swift_flush_tlb_mm
,
1140 .tlb_page
= swift_flush_tlb_page
,
1141 .tlb_range
= swift_flush_tlb_range
,
1142 .page_to_ram
= swift_flush_page_to_ram
,
1143 .sig_insns
= swift_flush_sig_insns
,
1144 .page_for_dma
= swift_flush_page_for_dma
,
1147 #define SWIFT_MASKID_ADDR 0x10003018
1148 static void __init
init_swift(void)
1150 unsigned long swift_rev
;
1152 __asm__
__volatile__("lda [%1] %2, %0\n\t"
1153 "srl %0, 0x18, %0\n\t" :
1155 "r" (SWIFT_MASKID_ADDR
), "i" (ASI_M_BYPASS
));
1156 srmmu_name
= "Fujitsu Swift";
1157 switch (swift_rev
) {
1162 srmmu_modtype
= Swift_lots_o_bugs
;
1163 hwbug_bitmask
|= (HWBUG_KERN_ACCBROKEN
| HWBUG_KERN_CBITBROKEN
);
1165 * Gee george, I wonder why Sun is so hush hush about
1166 * this hardware bug... really braindamage stuff going
1167 * on here. However I think we can find a way to avoid
1168 * all of the workaround overhead under Linux. Basically,
1169 * any page fault can cause kernel pages to become user
1170 * accessible (the mmu gets confused and clears some of
1171 * the ACC bits in kernel ptes). Aha, sounds pretty
1172 * horrible eh? But wait, after extensive testing it appears
1173 * that if you use pgd_t level large kernel pte's (like the
1174 * 4MB pages on the Pentium) the bug does not get tripped
1175 * at all. This avoids almost all of the major overhead.
1176 * Welcome to a world where your vendor tells you to,
1177 * "apply this kernel patch" instead of "sorry for the
1178 * broken hardware, send it back and we'll give you
1179 * properly functioning parts"
1184 srmmu_modtype
= Swift_bad_c
;
1185 hwbug_bitmask
|= HWBUG_KERN_CBITBROKEN
;
1187 * You see Sun allude to this hardware bug but never
1188 * admit things directly, they'll say things like,
1189 * "the Swift chip cache problems" or similar.
1193 srmmu_modtype
= Swift_ok
;
1197 sparc32_cachetlb_ops
= &swift_ops
;
1198 flush_page_for_dma_global
= 0;
1201 * Are you now convinced that the Swift is one of the
1202 * biggest VLSI abortions of all time? Bravo Fujitsu!
1203 * Fujitsu, the !#?!%$'d up processor people. I bet if
1204 * you examined the microcode of the Swift you'd find
1205 * XXX's all over the place.
1207 poke_srmmu
= poke_swift
;
1210 static void turbosparc_flush_cache_all(void)
1212 flush_user_windows();
1213 turbosparc_idflash_clear();
1216 static void turbosparc_flush_cache_mm(struct mm_struct
*mm
)
1219 flush_user_windows();
1220 turbosparc_idflash_clear();
1224 static void turbosparc_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1226 FLUSH_BEGIN(vma
->vm_mm
)
1227 flush_user_windows();
1228 turbosparc_idflash_clear();
1232 static void turbosparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1234 FLUSH_BEGIN(vma
->vm_mm
)
1235 flush_user_windows();
1236 if (vma
->vm_flags
& VM_EXEC
)
1237 turbosparc_flush_icache();
1238 turbosparc_flush_dcache();
1242 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1243 static void turbosparc_flush_page_to_ram(unsigned long page
)
1245 #ifdef TURBOSPARC_WRITEBACK
1246 volatile unsigned long clear
;
1248 if (srmmu_probe(page
))
1249 turbosparc_flush_page_cache(page
);
1250 clear
= srmmu_get_fstatus();
1254 static void turbosparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1258 static void turbosparc_flush_page_for_dma(unsigned long page
)
1260 turbosparc_flush_dcache();
1263 static void turbosparc_flush_tlb_all(void)
1265 srmmu_flush_whole_tlb();
1268 static void turbosparc_flush_tlb_mm(struct mm_struct
*mm
)
1271 srmmu_flush_whole_tlb();
1275 static void turbosparc_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1277 FLUSH_BEGIN(vma
->vm_mm
)
1278 srmmu_flush_whole_tlb();
1282 static void turbosparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1284 FLUSH_BEGIN(vma
->vm_mm
)
1285 srmmu_flush_whole_tlb();
1290 static void poke_turbosparc(void)
1292 unsigned long mreg
= srmmu_get_mmureg();
1293 unsigned long ccreg
;
1295 /* Clear any crap from the cache or else... */
1296 turbosparc_flush_cache_all();
1297 /* Temporarily disable I & D caches */
1298 mreg
&= ~(TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
);
1299 mreg
&= ~(TURBOSPARC_PCENABLE
); /* Don't check parity */
1300 srmmu_set_mmureg(mreg
);
1302 ccreg
= turbosparc_get_ccreg();
1304 #ifdef TURBOSPARC_WRITEBACK
1305 ccreg
|= (TURBOSPARC_SNENABLE
); /* Do DVMA snooping in Dcache */
1306 ccreg
&= ~(TURBOSPARC_uS2
| TURBOSPARC_WTENABLE
);
1307 /* Write-back D-cache, emulate VLSI
1308 * abortion number three, not number one */
1310 /* For now let's play safe, optimize later */
1311 ccreg
|= (TURBOSPARC_SNENABLE
| TURBOSPARC_WTENABLE
);
1312 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1313 ccreg
&= ~(TURBOSPARC_uS2
);
1314 /* Emulate VLSI abortion number three, not number one */
1317 switch (ccreg
& 7) {
1318 case 0: /* No SE cache */
1319 case 7: /* Test mode */
1322 ccreg
|= (TURBOSPARC_SCENABLE
);
1324 turbosparc_set_ccreg(ccreg
);
1326 mreg
|= (TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* I & D caches on */
1327 mreg
|= (TURBOSPARC_ICSNOOP
); /* Icache snooping on */
1328 srmmu_set_mmureg(mreg
);
1331 static const struct sparc32_cachetlb_ops turbosparc_ops
= {
1332 .cache_all
= turbosparc_flush_cache_all
,
1333 .cache_mm
= turbosparc_flush_cache_mm
,
1334 .cache_page
= turbosparc_flush_cache_page
,
1335 .cache_range
= turbosparc_flush_cache_range
,
1336 .tlb_all
= turbosparc_flush_tlb_all
,
1337 .tlb_mm
= turbosparc_flush_tlb_mm
,
1338 .tlb_page
= turbosparc_flush_tlb_page
,
1339 .tlb_range
= turbosparc_flush_tlb_range
,
1340 .page_to_ram
= turbosparc_flush_page_to_ram
,
1341 .sig_insns
= turbosparc_flush_sig_insns
,
1342 .page_for_dma
= turbosparc_flush_page_for_dma
,
1345 static void __init
init_turbosparc(void)
1347 srmmu_name
= "Fujitsu TurboSparc";
1348 srmmu_modtype
= TurboSparc
;
1349 sparc32_cachetlb_ops
= &turbosparc_ops
;
1350 poke_srmmu
= poke_turbosparc
;
1353 static void poke_tsunami(void)
1355 unsigned long mreg
= srmmu_get_mmureg();
1357 tsunami_flush_icache();
1358 tsunami_flush_dcache();
1359 mreg
&= ~TSUNAMI_ITD
;
1360 mreg
|= (TSUNAMI_IENAB
| TSUNAMI_DENAB
);
1361 srmmu_set_mmureg(mreg
);
1364 static const struct sparc32_cachetlb_ops tsunami_ops
= {
1365 .cache_all
= tsunami_flush_cache_all
,
1366 .cache_mm
= tsunami_flush_cache_mm
,
1367 .cache_page
= tsunami_flush_cache_page
,
1368 .cache_range
= tsunami_flush_cache_range
,
1369 .tlb_all
= tsunami_flush_tlb_all
,
1370 .tlb_mm
= tsunami_flush_tlb_mm
,
1371 .tlb_page
= tsunami_flush_tlb_page
,
1372 .tlb_range
= tsunami_flush_tlb_range
,
1373 .page_to_ram
= tsunami_flush_page_to_ram
,
1374 .sig_insns
= tsunami_flush_sig_insns
,
1375 .page_for_dma
= tsunami_flush_page_for_dma
,
1378 static void __init
init_tsunami(void)
1381 * Tsunami's pretty sane, Sun and TI actually got it
1382 * somewhat right this time. Fujitsu should have
1383 * taken some lessons from them.
1386 srmmu_name
= "TI Tsunami";
1387 srmmu_modtype
= Tsunami
;
1388 sparc32_cachetlb_ops
= &tsunami_ops
;
1389 poke_srmmu
= poke_tsunami
;
1391 tsunami_setup_blockops();
1394 static void poke_viking(void)
1396 unsigned long mreg
= srmmu_get_mmureg();
1397 static int smp_catch
;
1399 if (viking_mxcc_present
) {
1400 unsigned long mxcc_control
= mxcc_get_creg();
1402 mxcc_control
|= (MXCC_CTL_ECE
| MXCC_CTL_PRE
| MXCC_CTL_MCE
);
1403 mxcc_control
&= ~(MXCC_CTL_RRC
);
1404 mxcc_set_creg(mxcc_control
);
1407 * We don't need memory parity checks.
1408 * XXX This is a mess, have to dig out later. ecd.
1409 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1412 /* We do cache ptables on MXCC. */
1413 mreg
|= VIKING_TCENABLE
;
1415 unsigned long bpreg
;
1417 mreg
&= ~(VIKING_TCENABLE
);
1419 /* Must disable mixed-cmd mode here for other cpu's. */
1420 bpreg
= viking_get_bpreg();
1421 bpreg
&= ~(VIKING_ACTION_MIX
);
1422 viking_set_bpreg(bpreg
);
1424 /* Just in case PROM does something funny. */
1429 mreg
|= VIKING_SPENABLE
;
1430 mreg
|= (VIKING_ICENABLE
| VIKING_DCENABLE
);
1431 mreg
|= VIKING_SBENABLE
;
1432 mreg
&= ~(VIKING_ACENABLE
);
1433 srmmu_set_mmureg(mreg
);
1436 static struct sparc32_cachetlb_ops viking_ops
= {
1437 .cache_all
= viking_flush_cache_all
,
1438 .cache_mm
= viking_flush_cache_mm
,
1439 .cache_page
= viking_flush_cache_page
,
1440 .cache_range
= viking_flush_cache_range
,
1441 .tlb_all
= viking_flush_tlb_all
,
1442 .tlb_mm
= viking_flush_tlb_mm
,
1443 .tlb_page
= viking_flush_tlb_page
,
1444 .tlb_range
= viking_flush_tlb_range
,
1445 .page_to_ram
= viking_flush_page_to_ram
,
1446 .sig_insns
= viking_flush_sig_insns
,
1447 .page_for_dma
= viking_flush_page_for_dma
,
1451 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1452 * perform the local TLB flush and all the other cpus will see it.
1453 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1454 * that requires that we add some synchronization to these flushes.
1456 * The bug is that the fifo which keeps track of all the pending TLB
1457 * broadcasts in the system is an entry or two too small, so if we
1458 * have too many going at once we'll overflow that fifo and lose a TLB
1459 * flush resulting in corruption.
1461 * Our workaround is to take a global spinlock around the TLB flushes,
1462 * which guarentees we won't ever have too many pending. It's a big
1463 * hammer, but a semaphore like system to make sure we only have N TLB
1464 * flushes going at once will require SMP locking anyways so there's
1465 * no real value in trying any harder than this.
1467 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops
= {
1468 .cache_all
= viking_flush_cache_all
,
1469 .cache_mm
= viking_flush_cache_mm
,
1470 .cache_page
= viking_flush_cache_page
,
1471 .cache_range
= viking_flush_cache_range
,
1472 .tlb_all
= sun4dsmp_flush_tlb_all
,
1473 .tlb_mm
= sun4dsmp_flush_tlb_mm
,
1474 .tlb_page
= sun4dsmp_flush_tlb_page
,
1475 .tlb_range
= sun4dsmp_flush_tlb_range
,
1476 .page_to_ram
= viking_flush_page_to_ram
,
1477 .sig_insns
= viking_flush_sig_insns
,
1478 .page_for_dma
= viking_flush_page_for_dma
,
1482 static void __init
init_viking(void)
1484 unsigned long mreg
= srmmu_get_mmureg();
1486 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1487 if (mreg
& VIKING_MMODE
) {
1488 srmmu_name
= "TI Viking";
1489 viking_mxcc_present
= 0;
1493 * We need this to make sure old viking takes no hits
1494 * on it's cache for dma snoops to workaround the
1495 * "load from non-cacheable memory" interrupt bug.
1496 * This is only necessary because of the new way in
1497 * which we use the IOMMU.
1499 viking_ops
.page_for_dma
= viking_flush_page
;
1501 viking_sun4d_smp_ops
.page_for_dma
= viking_flush_page
;
1503 flush_page_for_dma_global
= 0;
1505 srmmu_name
= "TI Viking/MXCC";
1506 viking_mxcc_present
= 1;
1507 srmmu_cache_pagetables
= 1;
1510 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1513 if (sparc_cpu_model
== sun4d
)
1514 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1515 &viking_sun4d_smp_ops
;
1518 poke_srmmu
= poke_viking
;
1521 /* Probe for the srmmu chip version. */
1522 static void __init
get_srmmu_type(void)
1524 unsigned long mreg
, psr
;
1525 unsigned long mod_typ
, mod_rev
, psr_typ
, psr_vers
;
1527 srmmu_modtype
= SRMMU_INVAL_MOD
;
1530 mreg
= srmmu_get_mmureg(); psr
= get_psr();
1531 mod_typ
= (mreg
& 0xf0000000) >> 28;
1532 mod_rev
= (mreg
& 0x0f000000) >> 24;
1533 psr_typ
= (psr
>> 28) & 0xf;
1534 psr_vers
= (psr
>> 24) & 0xf;
1536 /* First, check for sparc-leon. */
1537 if (sparc_cpu_model
== sparc_leon
) {
1542 /* Second, check for HyperSparc or Cypress. */
1546 /* UP or MP Hypersparc */
1558 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1565 /* Now Fujitsu TurboSparc. It might happen that it is
1566 * in Swift emulation mode, so we will check later...
1568 if (psr_typ
== 0 && psr_vers
== 5) {
1573 /* Next check for Fujitsu Swift. */
1574 if (psr_typ
== 0 && psr_vers
== 4) {
1578 /* Look if it is not a TurboSparc emulating Swift... */
1579 cpunode
= prom_getchild(prom_root_node
);
1580 while ((cpunode
= prom_getsibling(cpunode
)) != 0) {
1581 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
1582 if (!strcmp(node_str
, "cpu")) {
1583 if (!prom_getintdefault(cpunode
, "psr-implementation", 1) &&
1584 prom_getintdefault(cpunode
, "psr-version", 1) == 5) {
1596 /* Now the Viking family of srmmu. */
1599 ((psr_vers
== 1) && (mod_typ
== 0) && (mod_rev
== 0)))) {
1604 /* Finally the Tsunami. */
1605 if (psr_typ
== 4 && psr_vers
== 1 && (mod_typ
|| mod_rev
)) {
1615 /* Local cross-calls. */
1616 static void smp_flush_page_for_dma(unsigned long page
)
1618 xc1((smpfunc_t
) local_ops
->page_for_dma
, page
);
1619 local_ops
->page_for_dma(page
);
1622 static void smp_flush_cache_all(void)
1624 xc0((smpfunc_t
) local_ops
->cache_all
);
1625 local_ops
->cache_all();
1628 static void smp_flush_tlb_all(void)
1630 xc0((smpfunc_t
) local_ops
->tlb_all
);
1631 local_ops
->tlb_all();
1634 static void smp_flush_cache_mm(struct mm_struct
*mm
)
1636 if (mm
->context
!= NO_CONTEXT
) {
1638 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1639 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1640 if (!cpumask_empty(&cpu_mask
))
1641 xc1((smpfunc_t
) local_ops
->cache_mm
, (unsigned long) mm
);
1642 local_ops
->cache_mm(mm
);
1646 static void smp_flush_tlb_mm(struct mm_struct
*mm
)
1648 if (mm
->context
!= NO_CONTEXT
) {
1650 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1651 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1652 if (!cpumask_empty(&cpu_mask
)) {
1653 xc1((smpfunc_t
) local_ops
->tlb_mm
, (unsigned long) mm
);
1654 if (atomic_read(&mm
->mm_users
) == 1 && current
->active_mm
== mm
)
1655 cpumask_copy(mm_cpumask(mm
),
1656 cpumask_of(smp_processor_id()));
1658 local_ops
->tlb_mm(mm
);
1662 static void smp_flush_cache_range(struct vm_area_struct
*vma
,
1663 unsigned long start
,
1666 struct mm_struct
*mm
= vma
->vm_mm
;
1668 if (mm
->context
!= NO_CONTEXT
) {
1670 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1671 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1672 if (!cpumask_empty(&cpu_mask
))
1673 xc3((smpfunc_t
) local_ops
->cache_range
,
1674 (unsigned long) vma
, start
, end
);
1675 local_ops
->cache_range(vma
, start
, end
);
1679 static void smp_flush_tlb_range(struct vm_area_struct
*vma
,
1680 unsigned long start
,
1683 struct mm_struct
*mm
= vma
->vm_mm
;
1685 if (mm
->context
!= NO_CONTEXT
) {
1687 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1688 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1689 if (!cpumask_empty(&cpu_mask
))
1690 xc3((smpfunc_t
) local_ops
->tlb_range
,
1691 (unsigned long) vma
, start
, end
);
1692 local_ops
->tlb_range(vma
, start
, end
);
1696 static void smp_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1698 struct mm_struct
*mm
= vma
->vm_mm
;
1700 if (mm
->context
!= NO_CONTEXT
) {
1702 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1703 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1704 if (!cpumask_empty(&cpu_mask
))
1705 xc2((smpfunc_t
) local_ops
->cache_page
,
1706 (unsigned long) vma
, page
);
1707 local_ops
->cache_page(vma
, page
);
1711 static void smp_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1713 struct mm_struct
*mm
= vma
->vm_mm
;
1715 if (mm
->context
!= NO_CONTEXT
) {
1717 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1718 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1719 if (!cpumask_empty(&cpu_mask
))
1720 xc2((smpfunc_t
) local_ops
->tlb_page
,
1721 (unsigned long) vma
, page
);
1722 local_ops
->tlb_page(vma
, page
);
1726 static void smp_flush_page_to_ram(unsigned long page
)
1728 /* Current theory is that those who call this are the one's
1729 * who have just dirtied their cache with the pages contents
1730 * in kernel space, therefore we only run this on local cpu.
1732 * XXX This experiment failed, research further... -DaveM
1735 xc1((smpfunc_t
) local_ops
->page_to_ram
, page
);
1737 local_ops
->page_to_ram(page
);
1740 static void smp_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1743 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1744 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1745 if (!cpumask_empty(&cpu_mask
))
1746 xc2((smpfunc_t
) local_ops
->sig_insns
,
1747 (unsigned long) mm
, insn_addr
);
1748 local_ops
->sig_insns(mm
, insn_addr
);
1751 static struct sparc32_cachetlb_ops smp_cachetlb_ops
= {
1752 .cache_all
= smp_flush_cache_all
,
1753 .cache_mm
= smp_flush_cache_mm
,
1754 .cache_page
= smp_flush_cache_page
,
1755 .cache_range
= smp_flush_cache_range
,
1756 .tlb_all
= smp_flush_tlb_all
,
1757 .tlb_mm
= smp_flush_tlb_mm
,
1758 .tlb_page
= smp_flush_tlb_page
,
1759 .tlb_range
= smp_flush_tlb_range
,
1760 .page_to_ram
= smp_flush_page_to_ram
,
1761 .sig_insns
= smp_flush_sig_insns
,
1762 .page_for_dma
= smp_flush_page_for_dma
,
1766 /* Load up routines and constants for sun4m and sun4d mmu */
1767 void __init
load_mmu(void)
1769 extern void ld_mmu_iommu(void);
1770 extern void ld_mmu_iounit(void);
1776 /* El switcheroo... */
1777 local_ops
= sparc32_cachetlb_ops
;
1779 if (sparc_cpu_model
== sun4d
|| sparc_cpu_model
== sparc_leon
) {
1780 smp_cachetlb_ops
.tlb_all
= local_ops
->tlb_all
;
1781 smp_cachetlb_ops
.tlb_mm
= local_ops
->tlb_mm
;
1782 smp_cachetlb_ops
.tlb_range
= local_ops
->tlb_range
;
1783 smp_cachetlb_ops
.tlb_page
= local_ops
->tlb_page
;
1786 if (poke_srmmu
== poke_viking
) {
1787 /* Avoid unnecessary cross calls. */
1788 smp_cachetlb_ops
.cache_all
= local_ops
->cache_all
;
1789 smp_cachetlb_ops
.cache_mm
= local_ops
->cache_mm
;
1790 smp_cachetlb_ops
.cache_range
= local_ops
->cache_range
;
1791 smp_cachetlb_ops
.cache_page
= local_ops
->cache_page
;
1793 smp_cachetlb_ops
.page_to_ram
= local_ops
->page_to_ram
;
1794 smp_cachetlb_ops
.sig_insns
= local_ops
->sig_insns
;
1795 smp_cachetlb_ops
.page_for_dma
= local_ops
->page_for_dma
;
1798 /* It really is const after this point. */
1799 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1803 if (sparc_cpu_model
== sun4d
)
1808 if (sparc_cpu_model
== sun4d
)
1810 else if (sparc_cpu_model
== sparc_leon
)