2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * and 8260 implementations but excludes the 8xx and 4xx.
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
15 * Derived from "arch/i386/mm/init.c"
16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/highmem.h>
29 #include <linux/memblock.h>
33 #include <asm/machdep.h>
37 struct hash_pte
*Hash
, *Hash_end
;
38 unsigned long Hash_size
, Hash_mask
;
41 struct ppc_bat BATS
[8][2]; /* 8 pairs of IBAT, DBAT */
43 struct batrange
{ /* stores address ranges mapped by BATs */
50 * Return PA for this VA if it is mapped by a BAT, or 0
52 phys_addr_t
v_mapped_by_bats(unsigned long va
)
55 for (b
= 0; b
< 4; ++b
)
56 if (va
>= bat_addrs
[b
].start
&& va
< bat_addrs
[b
].limit
)
57 return bat_addrs
[b
].phys
+ (va
- bat_addrs
[b
].start
);
62 * Return VA for a given PA or 0 if not mapped
64 unsigned long p_mapped_by_bats(phys_addr_t pa
)
67 for (b
= 0; b
< 4; ++b
)
68 if (pa
>= bat_addrs
[b
].phys
69 && pa
< (bat_addrs
[b
].limit
-bat_addrs
[b
].start
)
71 return bat_addrs
[b
].start
+(pa
-bat_addrs
[b
].phys
);
75 unsigned long __init
mmu_mapin_ram(unsigned long top
)
77 unsigned long tot
, bl
, done
;
78 unsigned long max_size
= (256<<20);
80 if (__map_without_bats
) {
81 printk(KERN_DEBUG
"RAM mapped without BATs\n");
85 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
87 /* Make sure we don't map a block larger than the
88 smallest alignment of the physical address. */
90 for (bl
= 128<<10; bl
< max_size
; bl
<<= 1) {
95 setbat(2, PAGE_OFFSET
, 0, bl
, PAGE_KERNEL_X
);
96 done
= (unsigned long)bat_addrs
[2].limit
- PAGE_OFFSET
+ 1;
97 if ((done
< tot
) && !bat_addrs
[3].limit
) {
98 /* use BAT3 to cover a bit more */
100 for (bl
= 128<<10; bl
< max_size
; bl
<<= 1)
103 setbat(3, PAGE_OFFSET
+done
, done
, bl
, PAGE_KERNEL_X
);
104 done
= (unsigned long)bat_addrs
[3].limit
- PAGE_OFFSET
+ 1;
111 * Set up one of the I/D BAT (block address translation) register pairs.
112 * The parameters are not checked; in particular size must be a power
113 * of 2 between 128k and 256M.
115 void __init
setbat(int index
, unsigned long virt
, phys_addr_t phys
,
116 unsigned int size
, int flags
)
120 struct ppc_bat
*bat
= BATS
[index
];
122 if ((flags
& _PAGE_NO_CACHE
) ||
123 (cpu_has_feature(CPU_FTR_NEED_COHERENT
) == 0))
124 flags
&= ~_PAGE_COHERENT
;
126 bl
= (size
>> 17) - 1;
127 if (PVR_VER(mfspr(SPRN_PVR
)) != 1) {
130 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
131 | _PAGE_COHERENT
| _PAGE_GUARDED
);
132 wimgxpp
|= (flags
& _PAGE_RW
)? BPP_RW
: BPP_RX
;
133 bat
[1].batu
= virt
| (bl
<< 2) | 2; /* Vs=1, Vp=0 */
134 bat
[1].batl
= BAT_PHYS_ADDR(phys
) | wimgxpp
;
135 if (flags
& _PAGE_USER
)
136 bat
[1].batu
|= 1; /* Vp = 1 */
137 if (flags
& _PAGE_GUARDED
) {
138 /* G bit must be zero in IBATs */
139 bat
[0].batu
= bat
[0].batl
= 0;
141 /* make IBAT same as DBAT */
148 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
150 wimgxpp
|= (flags
& _PAGE_RW
)?
151 ((flags
& _PAGE_USER
)? PP_RWRW
: PP_RWXX
): PP_RXRX
;
152 bat
->batu
= virt
| wimgxpp
| 4; /* Ks=0, Ku=1 */
153 bat
->batl
= phys
| bl
| 0x40; /* V=1 */
156 bat_addrs
[index
].start
= virt
;
157 bat_addrs
[index
].limit
= virt
+ ((bl
+ 1) << 17) - 1;
158 bat_addrs
[index
].phys
= phys
;
162 * Preload a translation in the hash table
164 void hash_preload(struct mm_struct
*mm
, unsigned long ea
,
165 unsigned long access
, unsigned long trap
)
171 pmd
= pmd_offset(pud_offset(pgd_offset(mm
, ea
), ea
), ea
);
173 add_hash_page(mm
->context
.id
, ea
, pmd_val(*pmd
));
177 * Initialize the hash table and patch the instructions in hashtable.S.
179 void __init
MMU_init_hw(void)
181 unsigned int hmask
, mb
, mb2
;
182 unsigned int n_hpteg
, lg_n_hpteg
;
184 extern unsigned int hash_page_patch_A
[];
185 extern unsigned int hash_page_patch_B
[], hash_page_patch_C
[];
186 extern unsigned int hash_page
[];
187 extern unsigned int flush_hash_patch_A
[], flush_hash_patch_B
[];
189 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE
)) {
191 * Put a blr (procedure return) instruction at the
192 * start of hash_page, since we can still get DSI
193 * exceptions on a 603.
195 hash_page
[0] = 0x4e800020;
196 flush_icache_range((unsigned long) &hash_page
[0],
197 (unsigned long) &hash_page
[1]);
201 if ( ppc_md
.progress
) ppc_md
.progress("hash:enter", 0x105);
203 #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
204 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
205 #define MIN_N_HPTEG 1024 /* min 64kB hash table */
208 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
209 * This is less than the recommended amount, but then
212 n_hpteg
= total_memory
/ (PAGE_SIZE
* 8);
213 if (n_hpteg
< MIN_N_HPTEG
)
214 n_hpteg
= MIN_N_HPTEG
;
215 lg_n_hpteg
= __ilog2(n_hpteg
);
216 if (n_hpteg
& (n_hpteg
- 1)) {
217 ++lg_n_hpteg
; /* round up if not power of 2 */
218 n_hpteg
= 1 << lg_n_hpteg
;
220 Hash_size
= n_hpteg
<< LG_HPTEG_SIZE
;
223 * Find some memory for the hash table.
225 if ( ppc_md
.progress
) ppc_md
.progress("hash:find piece", 0x322);
226 Hash
= __va(memblock_alloc(Hash_size
, Hash_size
));
227 cacheable_memzero(Hash
, Hash_size
);
228 _SDR1
= __pa(Hash
) | SDR1_LOW_BITS
;
230 Hash_end
= (struct hash_pte
*) ((unsigned long)Hash
+ Hash_size
);
232 printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n",
233 (unsigned long long)(total_memory
>> 20), Hash_size
>> 10, Hash
);
237 * Patch up the instructions in hashtable.S:create_hpte
239 if ( ppc_md
.progress
) ppc_md
.progress("hash:patch", 0x345);
240 Hash_mask
= n_hpteg
- 1;
241 hmask
= Hash_mask
>> (16 - LG_HPTEG_SIZE
);
242 mb2
= mb
= 32 - LG_HPTEG_SIZE
- lg_n_hpteg
;
244 mb2
= 16 - LG_HPTEG_SIZE
;
246 hash_page_patch_A
[0] = (hash_page_patch_A
[0] & ~0xffff)
247 | ((unsigned int)(Hash
) >> 16);
248 hash_page_patch_A
[1] = (hash_page_patch_A
[1] & ~0x7c0) | (mb
<< 6);
249 hash_page_patch_A
[2] = (hash_page_patch_A
[2] & ~0x7c0) | (mb2
<< 6);
250 hash_page_patch_B
[0] = (hash_page_patch_B
[0] & ~0xffff) | hmask
;
251 hash_page_patch_C
[0] = (hash_page_patch_C
[0] & ~0xffff) | hmask
;
254 * Ensure that the locations we've patched have been written
255 * out from the data cache and invalidated in the instruction
256 * cache, on those machines with split caches.
258 flush_icache_range((unsigned long) &hash_page_patch_A
[0],
259 (unsigned long) &hash_page_patch_C
[1]);
262 * Patch up the instructions in hashtable.S:flush_hash_page
264 flush_hash_patch_A
[0] = (flush_hash_patch_A
[0] & ~0xffff)
265 | ((unsigned int)(Hash
) >> 16);
266 flush_hash_patch_A
[1] = (flush_hash_patch_A
[1] & ~0x7c0) | (mb
<< 6);
267 flush_hash_patch_A
[2] = (flush_hash_patch_A
[2] & ~0x7c0) | (mb2
<< 6);
268 flush_hash_patch_B
[0] = (flush_hash_patch_B
[0] & ~0xffff) | hmask
;
269 flush_icache_range((unsigned long) &flush_hash_patch_A
[0],
270 (unsigned long) &flush_hash_patch_B
[1]);
272 if ( ppc_md
.progress
) ppc_md
.progress("hash:done", 0x205);
275 void setup_initial_memory_limit(phys_addr_t first_memblock_base
,
276 phys_addr_t first_memblock_size
)
278 /* We don't currently support the first MEMBLOCK not mapping 0
279 * physical on those processors
281 BUG_ON(first_memblock_base
!= 0);
283 /* 601 can only access 16MB at the moment */
284 if (PVR_VER(mfspr(SPRN_PVR
)) == 1)
285 memblock_set_current_limit(min_t(u64
, first_memblock_size
, 0x01000000));
286 else /* Anything else has 256M mapped */
287 memblock_set_current_limit(min_t(u64
, first_memblock_size
, 0x10000000));