2 * This file contains the routines for initializing the MMU
3 * on the 8xx series of chips.
6 * Derived from arch/powerpc/mm/40x_mmu.c:
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/memblock.h>
16 #include <asm/fixmap.h>
17 #include <asm/code-patching.h>
21 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
23 extern int __map_without_ltlbs
;
26 * Return PA for this VA if it is in IMMR area, or 0
28 phys_addr_t
v_block_mapped(unsigned long va
)
30 unsigned long p
= PHYS_IMMR_BASE
;
32 if (__map_without_ltlbs
)
34 if (va
>= VIRT_IMMR_BASE
&& va
< VIRT_IMMR_BASE
+ IMMR_SIZE
)
35 return p
+ va
- VIRT_IMMR_BASE
;
40 * Return VA for a given PA or 0 if not mapped
42 unsigned long p_block_mapped(phys_addr_t pa
)
44 unsigned long p
= PHYS_IMMR_BASE
;
46 if (__map_without_ltlbs
)
48 if (pa
>= p
&& pa
< p
+ IMMR_SIZE
)
49 return VIRT_IMMR_BASE
+ pa
- p
;
53 #define LARGE_PAGE_SIZE_8M (1<<23)
56 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
58 void __init
MMU_init_hw(void)
60 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
62 unsigned long ctr
= mfspr(SPRN_MD_CTR
) & 0xfe000000;
63 unsigned long flags
= 0xf0 | MD_SPS16K
| _PAGE_SHARED
| _PAGE_DIRTY
;
64 #ifdef CONFIG_PIN_TLB_IMMR
69 unsigned long addr
= 0;
70 unsigned long mem
= total_lowmem
;
72 for (; i
< 32 && mem
>= LARGE_PAGE_SIZE_8M
; i
++) {
73 mtspr(SPRN_MD_CTR
, ctr
| (i
<< 8));
74 mtspr(SPRN_MD_EPN
, (unsigned long)__va(addr
) | MD_EVALID
);
75 mtspr(SPRN_MD_TWC
, MD_PS8MEG
| MD_SVALID
);
76 mtspr(SPRN_MD_RPN
, addr
| flags
| _PAGE_PRESENT
);
77 addr
+= LARGE_PAGE_SIZE_8M
;
78 mem
-= LARGE_PAGE_SIZE_8M
;
83 static void mmu_mapin_immr(void)
85 unsigned long p
= PHYS_IMMR_BASE
;
86 unsigned long v
= VIRT_IMMR_BASE
;
87 unsigned long f
= pgprot_val(PAGE_KERNEL_NCG
);
90 for (offset
= 0; offset
< IMMR_SIZE
; offset
+= PAGE_SIZE
)
91 map_page(v
+ offset
, p
+ offset
, f
);
94 /* Address of instructions to patch */
95 #ifndef CONFIG_PIN_TLB_IMMR
96 extern unsigned int DTLBMiss_jmp
;
98 extern unsigned int DTLBMiss_cmp
, FixupDAR_cmp
;
100 void mmu_patch_cmp_limit(unsigned int *addr
, unsigned long mapped
)
102 unsigned int instr
= *addr
;
105 instr
|= (unsigned long)__va(mapped
) >> 16;
106 patch_instruction(addr
, instr
);
109 unsigned long __init
mmu_mapin_ram(unsigned long top
)
111 unsigned long mapped
;
113 if (__map_without_ltlbs
) {
116 #ifndef CONFIG_PIN_TLB_IMMR
117 patch_instruction(&DTLBMiss_jmp
, PPC_INST_NOP
);
120 mapped
= top
& ~(LARGE_PAGE_SIZE_8M
- 1);
123 mmu_patch_cmp_limit(&DTLBMiss_cmp
, mapped
);
124 mmu_patch_cmp_limit(&FixupDAR_cmp
, mapped
);
126 /* If the size of RAM is not an exact power of two, we may not
127 * have covered RAM in its entirety with 8 MiB
128 * pages. Consequently, restrict the top end of RAM currently
129 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
130 * coverage with normal-sized pages (or other reasons) do not
131 * attempt to allocate outside the allowed range.
134 memblock_set_current_limit(mapped
);
139 void setup_initial_memory_limit(phys_addr_t first_memblock_base
,
140 phys_addr_t first_memblock_size
)
142 /* We don't currently support the first MEMBLOCK not mapping 0
143 * physical on those processors
145 BUG_ON(first_memblock_base
!= 0);
147 /* 8xx can only access 24MB at the moment */
148 memblock_set_current_limit(min_t(u64
, first_memblock_size
, 0x01800000));
152 * Set up to use a given MMU context.
153 * id is context number, pgd is PGD pointer.
155 * We place the physical address of the new task page directory loaded
156 * into the MMU base register, and set the ASID compare register with
159 void set_context(unsigned long id
, pgd_t
*pgd
)
161 s16 offset
= (s16
)(__pa(swapper_pg_dir
));
163 #ifdef CONFIG_BDI_SWITCH
164 pgd_t
**ptr
= *(pgd_t
***)(KERNELBASE
+ 0xf0);
166 /* Context switch the PTE pointer for the Abatron BDI2000.
167 * The PGDIR is passed as second argument.
172 /* Register M_TW will contain base address of level 1 table minus the
173 * lower part of the kernel PGDIR base address, so that all accesses to
174 * level 1 table are done relative to lower part of kernel PGDIR base
177 mtspr(SPRN_M_TW
, __pa(pgd
) - offset
);
180 mtspr(SPRN_M_CASID
, id
);
185 void flush_instruction_cache(void)
188 mtspr(SPRN_IC_CST
, IDC_INVALL
);