2 * This file contains the routines for initializing the MMU
3 * on the 8xx series of chips.
6 * Derived from arch/powerpc/mm/40x_mmu.c:
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/memblock.h>
16 #include <asm/fixmap.h>
17 #include <asm/code-patching.h>
21 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
23 extern int __map_without_ltlbs
;
25 static unsigned long block_mapped_ram
;
28 * Return PA for this VA if it is in an area mapped with LTLBs.
29 * Otherwise, returns 0
31 phys_addr_t
v_block_mapped(unsigned long va
)
33 unsigned long p
= PHYS_IMMR_BASE
;
35 if (__map_without_ltlbs
)
37 if (va
>= VIRT_IMMR_BASE
&& va
< VIRT_IMMR_BASE
+ IMMR_SIZE
)
38 return p
+ va
- VIRT_IMMR_BASE
;
39 if (va
>= PAGE_OFFSET
&& va
< PAGE_OFFSET
+ block_mapped_ram
)
45 * Return VA for a given PA mapped with LTLBs or 0 if not mapped
47 unsigned long p_block_mapped(phys_addr_t pa
)
49 unsigned long p
= PHYS_IMMR_BASE
;
51 if (__map_without_ltlbs
)
53 if (pa
>= p
&& pa
< p
+ IMMR_SIZE
)
54 return VIRT_IMMR_BASE
+ pa
- p
;
55 if (pa
< block_mapped_ram
)
56 return (unsigned long)__va(pa
);
60 #define LARGE_PAGE_SIZE_8M (1<<23)
63 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
65 void __init
MMU_init_hw(void)
67 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
68 #ifdef CONFIG_PIN_TLB_DATA
69 unsigned long ctr
= mfspr(SPRN_MD_CTR
) & 0xfe000000;
70 unsigned long flags
= 0xf0 | MD_SPS16K
| _PAGE_PRIVILEGED
| _PAGE_DIRTY
;
71 #ifdef CONFIG_PIN_TLB_IMMR
76 unsigned long addr
= 0;
77 unsigned long mem
= total_lowmem
;
79 for (; i
< 32 && mem
>= LARGE_PAGE_SIZE_8M
; i
++) {
80 mtspr(SPRN_MD_CTR
, ctr
| (i
<< 8));
81 mtspr(SPRN_MD_EPN
, (unsigned long)__va(addr
) | MD_EVALID
);
82 mtspr(SPRN_MD_TWC
, MD_PS8MEG
| MD_SVALID
| M_APG2
);
83 mtspr(SPRN_MD_RPN
, addr
| flags
| _PAGE_PRESENT
);
84 addr
+= LARGE_PAGE_SIZE_8M
;
85 mem
-= LARGE_PAGE_SIZE_8M
;
90 static void __init
mmu_mapin_immr(void)
92 unsigned long p
= PHYS_IMMR_BASE
;
93 unsigned long v
= VIRT_IMMR_BASE
;
94 unsigned long f
= pgprot_val(PAGE_KERNEL_NCG
);
97 for (offset
= 0; offset
< IMMR_SIZE
; offset
+= PAGE_SIZE
)
98 map_kernel_page(v
+ offset
, p
+ offset
, f
);
101 /* Address of instructions to patch */
102 #ifndef CONFIG_PIN_TLB_IMMR
103 extern unsigned int DTLBMiss_jmp
;
105 extern unsigned int DTLBMiss_cmp
, FixupDAR_cmp
;
106 #ifndef CONFIG_PIN_TLB_TEXT
107 extern unsigned int ITLBMiss_cmp
;
110 static void __init
mmu_patch_cmp_limit(unsigned int *addr
, unsigned long mapped
)
112 unsigned int instr
= *addr
;
115 instr
|= (unsigned long)__va(mapped
) >> 16;
116 patch_instruction(addr
, instr
);
119 unsigned long __init
mmu_mapin_ram(unsigned long top
)
121 unsigned long mapped
;
123 if (__map_without_ltlbs
) {
126 #ifndef CONFIG_PIN_TLB_IMMR
127 patch_instruction(&DTLBMiss_jmp
, PPC_INST_NOP
);
129 #ifndef CONFIG_PIN_TLB_TEXT
130 mmu_patch_cmp_limit(&ITLBMiss_cmp
, 0);
133 mapped
= top
& ~(LARGE_PAGE_SIZE_8M
- 1);
136 mmu_patch_cmp_limit(&DTLBMiss_cmp
, mapped
);
137 mmu_patch_cmp_limit(&FixupDAR_cmp
, mapped
);
139 /* If the size of RAM is not an exact power of two, we may not
140 * have covered RAM in its entirety with 8 MiB
141 * pages. Consequently, restrict the top end of RAM currently
142 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
143 * coverage with normal-sized pages (or other reasons) do not
144 * attempt to allocate outside the allowed range.
147 memblock_set_current_limit(mapped
);
149 block_mapped_ram
= mapped
;
154 void __init
setup_initial_memory_limit(phys_addr_t first_memblock_base
,
155 phys_addr_t first_memblock_size
)
157 /* We don't currently support the first MEMBLOCK not mapping 0
158 * physical on those processors
160 BUG_ON(first_memblock_base
!= 0);
162 /* 8xx can only access 24MB at the moment */
163 memblock_set_current_limit(min_t(u64
, first_memblock_size
, 0x01800000));
167 * Set up to use a given MMU context.
168 * id is context number, pgd is PGD pointer.
170 * We place the physical address of the new task page directory loaded
171 * into the MMU base register, and set the ASID compare register with
174 void set_context(unsigned long id
, pgd_t
*pgd
)
176 s16 offset
= (s16
)(__pa(swapper_pg_dir
));
178 #ifdef CONFIG_BDI_SWITCH
179 pgd_t
**ptr
= *(pgd_t
***)(KERNELBASE
+ 0xf0);
181 /* Context switch the PTE pointer for the Abatron BDI2000.
182 * The PGDIR is passed as second argument.
187 /* Register M_TW will contain base address of level 1 table minus the
188 * lower part of the kernel PGDIR base address, so that all accesses to
189 * level 1 table are done relative to lower part of kernel PGDIR base
192 mtspr(SPRN_M_TW
, __pa(pgd
) - offset
);
195 mtspr(SPRN_M_CASID
, id
);
200 void flush_instruction_cache(void)
203 mtspr(SPRN_IC_CST
, IDC_INVALL
);