2 * linux/arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
18 #include <asm/cputype.h>
19 #include <asm/mach-types.h>
20 #include <asm/sections.h>
21 #include <asm/cachetype.h>
22 #include <asm/setup.h>
23 #include <asm/sizes.h>
25 #include <asm/highmem.h>
27 #include <asm/mach/arch.h>
28 #include <asm/mach/map.h>
32 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
35 * empty_zero_page is a special page that is used for
36 * zero-initialized data and COW.
38 struct page
*empty_zero_page
;
39 EXPORT_SYMBOL(empty_zero_page
);
42 * The pmd table for the upper-most set of pages.
46 #define CPOLICY_UNCACHED 0
47 #define CPOLICY_BUFFERED 1
48 #define CPOLICY_WRITETHROUGH 2
49 #define CPOLICY_WRITEBACK 3
50 #define CPOLICY_WRITEALLOC 4
52 static unsigned int cachepolicy __initdata
= CPOLICY_WRITEBACK
;
53 static unsigned int ecc_mask __initdata
= 0;
55 pgprot_t pgprot_kernel
;
57 EXPORT_SYMBOL(pgprot_user
);
58 EXPORT_SYMBOL(pgprot_kernel
);
61 const char policy
[16];
67 static struct cachepolicy cache_policies
[] __initdata
= {
71 .pmd
= PMD_SECT_UNCACHED
,
72 .pte
= L_PTE_MT_UNCACHED
,
76 .pmd
= PMD_SECT_BUFFERED
,
77 .pte
= L_PTE_MT_BUFFERABLE
,
79 .policy
= "writethrough",
82 .pte
= L_PTE_MT_WRITETHROUGH
,
84 .policy
= "writeback",
87 .pte
= L_PTE_MT_WRITEBACK
,
89 .policy
= "writealloc",
92 .pte
= L_PTE_MT_WRITEALLOC
,
97 * These are useful for identifying cache coherency
98 * problems by allowing the cache or the cache and
99 * writebuffer to be turned off. (Note: the write
100 * buffer should not be on and the cache off).
102 static void __init
early_cachepolicy(char **p
)
106 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
107 int len
= strlen(cache_policies
[i
].policy
);
109 if (memcmp(*p
, cache_policies
[i
].policy
, len
) == 0) {
111 cr_alignment
&= ~cache_policies
[i
].cr_mask
;
112 cr_no_alignment
&= ~cache_policies
[i
].cr_mask
;
117 if (i
== ARRAY_SIZE(cache_policies
))
118 printk(KERN_ERR
"ERROR: unknown or unsupported cache policy\n");
119 if (cpu_architecture() >= CPU_ARCH_ARMv6
) {
120 printk(KERN_WARNING
"Only cachepolicy=writeback supported on ARMv6 and later\n");
121 cachepolicy
= CPOLICY_WRITEBACK
;
124 set_cr(cr_alignment
);
126 __early_param("cachepolicy=", early_cachepolicy
);
128 static void __init
early_nocache(char **__unused
)
130 char *p
= "buffered";
131 printk(KERN_WARNING
"nocache is deprecated; use cachepolicy=%s\n", p
);
132 early_cachepolicy(&p
);
134 __early_param("nocache", early_nocache
);
136 static void __init
early_nowrite(char **__unused
)
138 char *p
= "uncached";
139 printk(KERN_WARNING
"nowb is deprecated; use cachepolicy=%s\n", p
);
140 early_cachepolicy(&p
);
142 __early_param("nowb", early_nowrite
);
144 static void __init
early_ecc(char **p
)
146 if (memcmp(*p
, "on", 2) == 0) {
147 ecc_mask
= PMD_PROTECTION
;
149 } else if (memcmp(*p
, "off", 3) == 0) {
154 __early_param("ecc=", early_ecc
);
156 static int __init
noalign_setup(char *__unused
)
158 cr_alignment
&= ~CR_A
;
159 cr_no_alignment
&= ~CR_A
;
160 set_cr(cr_alignment
);
163 __setup("noalign", noalign_setup
);
166 void adjust_cr(unsigned long mask
, unsigned long set
)
174 local_irq_save(flags
);
176 cr_no_alignment
= (cr_no_alignment
& ~mask
) | set
;
177 cr_alignment
= (cr_alignment
& ~mask
) | set
;
179 set_cr((get_cr() & ~mask
) | set
);
181 local_irq_restore(flags
);
185 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
186 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
188 static struct mem_type mem_types
[] = {
189 [MT_DEVICE
] = { /* Strongly ordered / ARMv6 shared device */
190 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_SHARED
|
192 .prot_l1
= PMD_TYPE_TABLE
,
193 .prot_sect
= PROT_SECT_DEVICE
| PMD_SECT_S
,
196 [MT_DEVICE_NONSHARED
] = { /* ARMv6 non-shared device */
197 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_NONSHARED
,
198 .prot_l1
= PMD_TYPE_TABLE
,
199 .prot_sect
= PROT_SECT_DEVICE
,
202 [MT_DEVICE_CACHED
] = { /* ioremap_cached */
203 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_CACHED
,
204 .prot_l1
= PMD_TYPE_TABLE
,
205 .prot_sect
= PROT_SECT_DEVICE
| PMD_SECT_WB
,
208 [MT_DEVICE_WC
] = { /* ioremap_wc */
209 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_WC
,
210 .prot_l1
= PMD_TYPE_TABLE
,
211 .prot_sect
= PROT_SECT_DEVICE
,
215 .prot_pte
= PROT_PTE_DEVICE
,
216 .prot_l1
= PMD_TYPE_TABLE
,
217 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
,
221 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
,
222 .domain
= DOMAIN_KERNEL
,
225 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
| PMD_SECT_MINICACHE
,
226 .domain
= DOMAIN_KERNEL
,
229 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
231 .prot_l1
= PMD_TYPE_TABLE
,
232 .domain
= DOMAIN_USER
,
234 [MT_HIGH_VECTORS
] = {
235 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
236 L_PTE_USER
| L_PTE_EXEC
,
237 .prot_l1
= PMD_TYPE_TABLE
,
238 .domain
= DOMAIN_USER
,
241 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
242 .domain
= DOMAIN_KERNEL
,
245 .prot_sect
= PMD_TYPE_SECT
,
246 .domain
= DOMAIN_KERNEL
,
248 [MT_MEMORY_NONCACHED
] = {
249 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
250 .domain
= DOMAIN_KERNEL
,
254 const struct mem_type
*get_mem_type(unsigned int type
)
256 return type
< ARRAY_SIZE(mem_types
) ? &mem_types
[type
] : NULL
;
258 EXPORT_SYMBOL(get_mem_type
);
261 * Adjust the PMD section entries according to the CPU in use.
263 static void __init
build_mem_type_table(void)
265 struct cachepolicy
*cp
;
266 unsigned int cr
= get_cr();
267 unsigned int user_pgprot
, kern_pgprot
, vecs_pgprot
;
268 int cpu_arch
= cpu_architecture();
271 if (cpu_arch
< CPU_ARCH_ARMv6
) {
272 #if defined(CONFIG_CPU_DCACHE_DISABLE)
273 if (cachepolicy
> CPOLICY_BUFFERED
)
274 cachepolicy
= CPOLICY_BUFFERED
;
275 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
276 if (cachepolicy
> CPOLICY_WRITETHROUGH
)
277 cachepolicy
= CPOLICY_WRITETHROUGH
;
280 if (cpu_arch
< CPU_ARCH_ARMv5
) {
281 if (cachepolicy
>= CPOLICY_WRITEALLOC
)
282 cachepolicy
= CPOLICY_WRITEBACK
;
286 cachepolicy
= CPOLICY_WRITEALLOC
;
290 * Strip out features not present on earlier architectures.
291 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
292 * without extended page tables don't have the 'Shared' bit.
294 if (cpu_arch
< CPU_ARCH_ARMv5
)
295 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++)
296 mem_types
[i
].prot_sect
&= ~PMD_SECT_TEX(7);
297 if ((cpu_arch
< CPU_ARCH_ARMv6
|| !(cr
& CR_XP
)) && !cpu_is_xsc3())
298 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++)
299 mem_types
[i
].prot_sect
&= ~PMD_SECT_S
;
302 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
303 * "update-able on write" bit on ARM610). However, Xscale and
304 * Xscale3 require this bit to be cleared.
306 if (cpu_is_xscale() || cpu_is_xsc3()) {
307 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
308 mem_types
[i
].prot_sect
&= ~PMD_BIT4
;
309 mem_types
[i
].prot_l1
&= ~PMD_BIT4
;
311 } else if (cpu_arch
< CPU_ARCH_ARMv6
) {
312 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
313 if (mem_types
[i
].prot_l1
)
314 mem_types
[i
].prot_l1
|= PMD_BIT4
;
315 if (mem_types
[i
].prot_sect
)
316 mem_types
[i
].prot_sect
|= PMD_BIT4
;
321 * Mark the device areas according to the CPU/architecture.
323 if (cpu_is_xsc3() || (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
))) {
324 if (!cpu_is_xsc3()) {
326 * Mark device regions on ARMv6+ as execute-never
327 * to prevent speculative instruction fetches.
329 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_XN
;
330 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_XN
;
331 mem_types
[MT_DEVICE_CACHED
].prot_sect
|= PMD_SECT_XN
;
332 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_XN
;
334 if (cpu_arch
>= CPU_ARCH_ARMv7
&& (cr
& CR_TRE
)) {
336 * For ARMv7 with TEX remapping,
337 * - shared device is SXCB=1100
338 * - nonshared device is SXCB=0100
339 * - write combine device mem is SXCB=0001
340 * (Uncached Normal memory)
342 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_TEX(1);
343 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_TEX(1);
344 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_BUFFERABLE
;
345 } else if (cpu_is_xsc3()) {
348 * - shared device is TEXCB=00101
349 * - nonshared device is TEXCB=01000
350 * - write combine device mem is TEXCB=00100
351 * (Inner/Outer Uncacheable in xsc3 parlance)
353 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED
;
354 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_TEX(2);
355 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_TEX(1);
358 * For ARMv6 and ARMv7 without TEX remapping,
359 * - shared device is TEXCB=00001
360 * - nonshared device is TEXCB=01000
361 * - write combine device mem is TEXCB=00100
362 * (Uncached Normal in ARMv6 parlance).
364 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_BUFFERED
;
365 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_TEX(2);
366 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_TEX(1);
370 * On others, write combining is "Uncached/Buffered"
372 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_BUFFERABLE
;
376 * Now deal with the memory-type mappings
378 cp
= &cache_policies
[cachepolicy
];
379 vecs_pgprot
= kern_pgprot
= user_pgprot
= cp
->pte
;
383 * Only use write-through for non-SMP systems
385 if (cpu_arch
>= CPU_ARCH_ARMv5
&& cachepolicy
> CPOLICY_WRITETHROUGH
)
386 vecs_pgprot
= cache_policies
[CPOLICY_WRITETHROUGH
].pte
;
390 * Enable CPU-specific coherency if supported.
391 * (Only available on XSC3 at the moment.)
393 if (arch_is_coherent() && cpu_is_xsc3())
394 mem_types
[MT_MEMORY
].prot_sect
|= PMD_SECT_S
;
397 * ARMv6 and above have extended page tables.
399 if (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
)) {
401 * Mark cache clean areas and XIP ROM read only
402 * from SVC mode and no access from userspace.
404 mem_types
[MT_ROM
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
405 mem_types
[MT_MINICLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
406 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
410 * Mark memory with the "shared" attribute for SMP systems
412 user_pgprot
|= L_PTE_SHARED
;
413 kern_pgprot
|= L_PTE_SHARED
;
414 vecs_pgprot
|= L_PTE_SHARED
;
415 mem_types
[MT_MEMORY
].prot_sect
|= PMD_SECT_S
;
416 mem_types
[MT_MEMORY_NONCACHED
].prot_sect
|= PMD_SECT_S
;
421 * Non-cacheable Normal - intended for memory areas that must
422 * not cause dirty cache line writebacks when used
424 if (cpu_arch
>= CPU_ARCH_ARMv6
) {
425 if (cpu_arch
>= CPU_ARCH_ARMv7
&& (cr
& CR_TRE
)) {
426 /* Non-cacheable Normal is XCB = 001 */
427 mem_types
[MT_MEMORY_NONCACHED
].prot_sect
|=
430 /* For both ARMv6 and non-TEX-remapping ARMv7 */
431 mem_types
[MT_MEMORY_NONCACHED
].prot_sect
|=
435 mem_types
[MT_MEMORY_NONCACHED
].prot_sect
|= PMD_SECT_BUFFERABLE
;
438 for (i
= 0; i
< 16; i
++) {
439 unsigned long v
= pgprot_val(protection_map
[i
]);
440 protection_map
[i
] = __pgprot(v
| user_pgprot
);
443 mem_types
[MT_LOW_VECTORS
].prot_pte
|= vecs_pgprot
;
444 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= vecs_pgprot
;
446 pgprot_user
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
| user_pgprot
);
447 pgprot_kernel
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
|
448 L_PTE_DIRTY
| L_PTE_WRITE
|
449 L_PTE_EXEC
| kern_pgprot
);
451 mem_types
[MT_LOW_VECTORS
].prot_l1
|= ecc_mask
;
452 mem_types
[MT_HIGH_VECTORS
].prot_l1
|= ecc_mask
;
453 mem_types
[MT_MEMORY
].prot_sect
|= ecc_mask
| cp
->pmd
;
454 mem_types
[MT_ROM
].prot_sect
|= cp
->pmd
;
458 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WT
;
462 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WB
;
465 printk("Memory policy: ECC %sabled, Data cache %s\n",
466 ecc_mask
? "en" : "dis", cp
->policy
);
468 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
469 struct mem_type
*t
= &mem_types
[i
];
471 t
->prot_l1
|= PMD_DOMAIN(t
->domain
);
473 t
->prot_sect
|= PMD_DOMAIN(t
->domain
);
477 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
479 static void __init
alloc_init_pte(pmd_t
*pmd
, unsigned long addr
,
480 unsigned long end
, unsigned long pfn
,
481 const struct mem_type
*type
)
485 if (pmd_none(*pmd
)) {
486 pte
= alloc_bootmem_low_pages(2 * PTRS_PER_PTE
* sizeof(pte_t
));
487 __pmd_populate(pmd
, __pa(pte
) | type
->prot_l1
);
490 pte
= pte_offset_kernel(pmd
, addr
);
492 set_pte_ext(pte
, pfn_pte(pfn
, __pgprot(type
->prot_pte
)), 0);
494 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
497 static void __init
alloc_init_section(pgd_t
*pgd
, unsigned long addr
,
498 unsigned long end
, unsigned long phys
,
499 const struct mem_type
*type
)
501 pmd_t
*pmd
= pmd_offset(pgd
, addr
);
504 * Try a section mapping - end, addr and phys must all be aligned
505 * to a section boundary. Note that PMDs refer to the individual
506 * L1 entries, whereas PGDs refer to a group of L1 entries making
507 * up one logical pointer to an L2 table.
509 if (((addr
| end
| phys
) & ~SECTION_MASK
) == 0) {
512 if (addr
& SECTION_SIZE
)
516 *pmd
= __pmd(phys
| type
->prot_sect
);
517 phys
+= SECTION_SIZE
;
518 } while (pmd
++, addr
+= SECTION_SIZE
, addr
!= end
);
523 * No need to loop; pte's aren't interested in the
524 * individual L1 entries.
526 alloc_init_pte(pmd
, addr
, end
, __phys_to_pfn(phys
), type
);
530 static void __init
create_36bit_mapping(struct map_desc
*md
,
531 const struct mem_type
*type
)
533 unsigned long phys
, addr
, length
, end
;
537 phys
= (unsigned long)__pfn_to_phys(md
->pfn
);
538 length
= PAGE_ALIGN(md
->length
);
540 if (!(cpu_architecture() >= CPU_ARCH_ARMv6
|| cpu_is_xsc3())) {
541 printk(KERN_ERR
"MM: CPU does not support supersection "
542 "mapping for 0x%08llx at 0x%08lx\n",
543 __pfn_to_phys((u64
)md
->pfn
), addr
);
547 /* N.B. ARMv6 supersections are only defined to work with domain 0.
548 * Since domain assignments can in fact be arbitrary, the
549 * 'domain == 0' check below is required to insure that ARMv6
550 * supersections are only allocated for domain 0 regardless
551 * of the actual domain assignments in use.
554 printk(KERN_ERR
"MM: invalid domain in supersection "
555 "mapping for 0x%08llx at 0x%08lx\n",
556 __pfn_to_phys((u64
)md
->pfn
), addr
);
560 if ((addr
| length
| __pfn_to_phys(md
->pfn
)) & ~SUPERSECTION_MASK
) {
561 printk(KERN_ERR
"MM: cannot create mapping for "
562 "0x%08llx at 0x%08lx invalid alignment\n",
563 __pfn_to_phys((u64
)md
->pfn
), addr
);
568 * Shift bits [35:32] of address into bits [23:20] of PMD
571 phys
|= (((md
->pfn
>> (32 - PAGE_SHIFT
)) & 0xF) << 20);
573 pgd
= pgd_offset_k(addr
);
576 pmd_t
*pmd
= pmd_offset(pgd
, addr
);
579 for (i
= 0; i
< 16; i
++)
580 *pmd
++ = __pmd(phys
| type
->prot_sect
| PMD_SECT_SUPER
);
582 addr
+= SUPERSECTION_SIZE
;
583 phys
+= SUPERSECTION_SIZE
;
584 pgd
+= SUPERSECTION_SIZE
>> PGDIR_SHIFT
;
585 } while (addr
!= end
);
589 * Create the page directory entries and any necessary
590 * page tables for the mapping specified by `md'. We
591 * are able to cope here with varying sizes and address
592 * offsets, and we take full advantage of sections and
595 void __init
create_mapping(struct map_desc
*md
)
597 unsigned long phys
, addr
, length
, end
;
598 const struct mem_type
*type
;
601 if (md
->virtual != vectors_base() && md
->virtual < TASK_SIZE
) {
602 printk(KERN_WARNING
"BUG: not creating mapping for "
603 "0x%08llx at 0x%08lx in user region\n",
604 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
608 if ((md
->type
== MT_DEVICE
|| md
->type
== MT_ROM
) &&
609 md
->virtual >= PAGE_OFFSET
&& md
->virtual < VMALLOC_END
) {
610 printk(KERN_WARNING
"BUG: mapping for 0x%08llx at 0x%08lx "
611 "overlaps vmalloc space\n",
612 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
615 type
= &mem_types
[md
->type
];
618 * Catch 36-bit addresses
620 if (md
->pfn
>= 0x100000) {
621 create_36bit_mapping(md
, type
);
625 addr
= md
->virtual & PAGE_MASK
;
626 phys
= (unsigned long)__pfn_to_phys(md
->pfn
);
627 length
= PAGE_ALIGN(md
->length
+ (md
->virtual & ~PAGE_MASK
));
629 if (type
->prot_l1
== 0 && ((addr
| phys
| length
) & ~SECTION_MASK
)) {
630 printk(KERN_WARNING
"BUG: map for 0x%08lx at 0x%08lx can not "
631 "be mapped using pages, ignoring.\n",
632 __pfn_to_phys(md
->pfn
), addr
);
636 pgd
= pgd_offset_k(addr
);
639 unsigned long next
= pgd_addr_end(addr
, end
);
641 alloc_init_section(pgd
, addr
, next
, phys
, type
);
645 } while (pgd
++, addr
!= end
);
649 * Create the architecture specific mappings
651 void __init
iotable_init(struct map_desc
*io_desc
, int nr
)
655 for (i
= 0; i
< nr
; i
++)
656 create_mapping(io_desc
+ i
);
659 static unsigned long __initdata vmalloc_reserve
= SZ_128M
;
662 * vmalloc=size forces the vmalloc area to be exactly 'size'
663 * bytes. This can be used to increase (or decrease) the vmalloc
664 * area - the default is 128m.
666 static void __init
early_vmalloc(char **arg
)
668 vmalloc_reserve
= memparse(*arg
, arg
);
670 if (vmalloc_reserve
< SZ_16M
) {
671 vmalloc_reserve
= SZ_16M
;
673 "vmalloc area too small, limiting to %luMB\n",
674 vmalloc_reserve
>> 20);
677 if (vmalloc_reserve
> VMALLOC_END
- (PAGE_OFFSET
+ SZ_32M
)) {
678 vmalloc_reserve
= VMALLOC_END
- (PAGE_OFFSET
+ SZ_32M
);
680 "vmalloc area is too big, limiting to %luMB\n",
681 vmalloc_reserve
>> 20);
684 __early_param("vmalloc=", early_vmalloc
);
686 #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
688 static void __init
sanity_check_meminfo(void)
692 for (i
= 0, j
= 0; i
< meminfo
.nr_banks
; i
++) {
693 struct membank
*bank
= &meminfo
.bank
[j
];
694 *bank
= meminfo
.bank
[i
];
696 #ifdef CONFIG_HIGHMEM
698 * Split those memory banks which are partially overlapping
699 * the vmalloc area greatly simplifying things later.
701 if (__va(bank
->start
) < VMALLOC_MIN
&&
702 bank
->size
> VMALLOC_MIN
- __va(bank
->start
)) {
703 if (meminfo
.nr_banks
>= NR_BANKS
) {
704 printk(KERN_CRIT
"NR_BANKS too low, "
705 "ignoring high memory\n");
706 } else if (cache_is_vipt_aliasing()) {
707 printk(KERN_CRIT
"HIGHMEM is not yet supported "
708 "with VIPT aliasing cache, "
709 "ignoring high memory\n");
711 memmove(bank
+ 1, bank
,
712 (meminfo
.nr_banks
- i
) * sizeof(*bank
));
715 bank
[1].size
-= VMALLOC_MIN
- __va(bank
->start
);
716 bank
[1].start
= __pa(VMALLOC_MIN
- 1) + 1;
719 bank
->size
= VMALLOC_MIN
- __va(bank
->start
);
723 * Check whether this memory bank would entirely overlap
726 if (__va(bank
->start
) >= VMALLOC_MIN
||
727 __va(bank
->start
) < (void *)PAGE_OFFSET
) {
728 printk(KERN_NOTICE
"Ignoring RAM at %.8lx-%.8lx "
729 "(vmalloc region overlap).\n",
730 bank
->start
, bank
->start
+ bank
->size
- 1);
735 * Check whether this memory bank would partially overlap
738 if (__va(bank
->start
+ bank
->size
) > VMALLOC_MIN
||
739 __va(bank
->start
+ bank
->size
) < __va(bank
->start
)) {
740 unsigned long newsize
= VMALLOC_MIN
- __va(bank
->start
);
741 printk(KERN_NOTICE
"Truncating RAM at %.8lx-%.8lx "
742 "to -%.8lx (vmalloc region overlap).\n",
743 bank
->start
, bank
->start
+ bank
->size
- 1,
744 bank
->start
+ newsize
- 1);
745 bank
->size
= newsize
;
750 meminfo
.nr_banks
= j
;
753 static inline void prepare_page_table(void)
758 * Clear out all the mappings below the kernel image.
760 for (addr
= 0; addr
< MODULES_VADDR
; addr
+= PGDIR_SIZE
)
761 pmd_clear(pmd_off_k(addr
));
763 #ifdef CONFIG_XIP_KERNEL
764 /* The XIP kernel is mapped in the module area -- skip over it */
765 addr
= ((unsigned long)_etext
+ PGDIR_SIZE
- 1) & PGDIR_MASK
;
767 for ( ; addr
< PAGE_OFFSET
; addr
+= PGDIR_SIZE
)
768 pmd_clear(pmd_off_k(addr
));
771 * Clear out all the kernel space mappings, except for the first
772 * memory bank, up to the end of the vmalloc region.
774 for (addr
= __phys_to_virt(bank_phys_end(&meminfo
.bank
[0]));
775 addr
< VMALLOC_END
; addr
+= PGDIR_SIZE
)
776 pmd_clear(pmd_off_k(addr
));
780 * Reserve the various regions of node 0
782 void __init
reserve_node_zero(pg_data_t
*pgdat
)
784 unsigned long res_size
= 0;
787 * Register the kernel text and data with bootmem.
788 * Note that this can only be in node 0.
790 #ifdef CONFIG_XIP_KERNEL
791 reserve_bootmem_node(pgdat
, __pa(_data
), _end
- _data
,
794 reserve_bootmem_node(pgdat
, __pa(_stext
), _end
- _stext
,
799 * Reserve the page tables. These are already in use,
800 * and can only be in node 0.
802 reserve_bootmem_node(pgdat
, __pa(swapper_pg_dir
),
803 PTRS_PER_PGD
* sizeof(pgd_t
), BOOTMEM_DEFAULT
);
806 * Hmm... This should go elsewhere, but we really really need to
807 * stop things allocating the low memory; ideally we need a better
808 * implementation of GFP_DMA which does not assume that DMA-able
809 * memory starts at zero.
811 if (machine_is_integrator() || machine_is_cintegrator())
812 res_size
= __pa(swapper_pg_dir
) - PHYS_OFFSET
;
815 * These should likewise go elsewhere. They pre-reserve the
816 * screen memory region at the start of main system memory.
818 if (machine_is_edb7211())
819 res_size
= 0x00020000;
820 if (machine_is_p720t())
821 res_size
= 0x00014000;
823 /* H1940 and RX3715 need to reserve this for suspend */
825 if (machine_is_h1940() || machine_is_rx3715()) {
826 reserve_bootmem_node(pgdat
, 0x30003000, 0x1000,
828 reserve_bootmem_node(pgdat
, 0x30081000, 0x1000,
832 if (machine_is_palmld() || machine_is_palmtx()) {
833 reserve_bootmem_node(pgdat
, 0xa0000000, 0x1000,
835 reserve_bootmem_node(pgdat
, 0xa0200000, 0x1000,
839 if (machine_is_treo680()) {
840 reserve_bootmem_node(pgdat
, 0xa0000000, 0x1000,
842 reserve_bootmem_node(pgdat
, 0xa2000000, 0x1000,
846 if (machine_is_palmt5())
847 reserve_bootmem_node(pgdat
, 0xa0200000, 0x1000,
851 * U300 - This platform family can share physical memory
852 * between two ARM cpus, one running Linux and the other
853 * running another OS.
855 if (machine_is_u300()) {
856 #ifdef CONFIG_MACH_U300_SINGLE_RAM
857 #if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \
858 CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
859 res_size
= 0x00100000;
866 * Because of the SA1111 DMA bug, we want to preserve our
867 * precious DMA-able memory...
869 res_size
= __pa(swapper_pg_dir
) - PHYS_OFFSET
;
872 reserve_bootmem_node(pgdat
, PHYS_OFFSET
, res_size
,
877 * Set up device the mappings. Since we clear out the page tables for all
878 * mappings above VMALLOC_END, we will remove any debug device mappings.
879 * This means you have to be careful how you debug this function, or any
880 * called function. This means you can't use any function or debugging
881 * method which may touch any device, otherwise the kernel _will_ crash.
883 static void __init
devicemaps_init(struct machine_desc
*mdesc
)
890 * Allocate the vector page early.
892 vectors
= alloc_bootmem_low_pages(PAGE_SIZE
);
894 for (addr
= VMALLOC_END
; addr
; addr
+= PGDIR_SIZE
)
895 pmd_clear(pmd_off_k(addr
));
898 * Map the kernel if it is XIP.
899 * It is always first in the modulearea.
901 #ifdef CONFIG_XIP_KERNEL
902 map
.pfn
= __phys_to_pfn(CONFIG_XIP_PHYS_ADDR
& SECTION_MASK
);
903 map
.virtual = MODULES_VADDR
;
904 map
.length
= ((unsigned long)_etext
- map
.virtual + ~SECTION_MASK
) & SECTION_MASK
;
906 create_mapping(&map
);
910 * Map the cache flushing regions.
913 map
.pfn
= __phys_to_pfn(FLUSH_BASE_PHYS
);
914 map
.virtual = FLUSH_BASE
;
916 map
.type
= MT_CACHECLEAN
;
917 create_mapping(&map
);
919 #ifdef FLUSH_BASE_MINICACHE
920 map
.pfn
= __phys_to_pfn(FLUSH_BASE_PHYS
+ SZ_1M
);
921 map
.virtual = FLUSH_BASE_MINICACHE
;
923 map
.type
= MT_MINICLEAN
;
924 create_mapping(&map
);
928 * Create a mapping for the machine vectors at the high-vectors
929 * location (0xffff0000). If we aren't using high-vectors, also
930 * create a mapping at the low-vectors virtual address.
932 map
.pfn
= __phys_to_pfn(virt_to_phys(vectors
));
933 map
.virtual = 0xffff0000;
934 map
.length
= PAGE_SIZE
;
935 map
.type
= MT_HIGH_VECTORS
;
936 create_mapping(&map
);
938 if (!vectors_high()) {
940 map
.type
= MT_LOW_VECTORS
;
941 create_mapping(&map
);
945 * Ask the machine support to map in the statically mapped devices.
951 * Finally flush the caches and tlb to ensure that we're in a
952 * consistent state wrt the writebuffer. This also ensures that
953 * any write-allocated cache lines in the vector page are written
954 * back. After this point, we can start to touch devices again.
956 local_flush_tlb_all();
960 static void __init
kmap_init(void)
962 #ifdef CONFIG_HIGHMEM
963 pmd_t
*pmd
= pmd_off_k(PKMAP_BASE
);
964 pte_t
*pte
= alloc_bootmem_low_pages(2 * PTRS_PER_PTE
* sizeof(pte_t
));
965 BUG_ON(!pmd_none(*pmd
) || !pte
);
966 __pmd_populate(pmd
, __pa(pte
) | _PAGE_KERNEL_TABLE
);
967 pkmap_page_table
= pte
+ PTRS_PER_PTE
;
972 * paging_init() sets up the page tables, initialises the zone memory
973 * maps, and sets up the zero page, bad page and bad page tables.
975 void __init
paging_init(struct machine_desc
*mdesc
)
979 build_mem_type_table();
980 sanity_check_meminfo();
981 prepare_page_table();
983 devicemaps_init(mdesc
);
986 top_pmd
= pmd_off_k(0xffff0000);
989 * allocate the zero page. Note that this always succeeds and
990 * returns a zeroed result.
992 zero_page
= alloc_bootmem_low_pages(PAGE_SIZE
);
993 empty_zero_page
= virt_to_page(zero_page
);
994 flush_dcache_page(empty_zero_page
);
998 * In order to soft-boot, we need to insert a 1:1 mapping in place of
999 * the user-mode pages. This will then ensure that we have predictable
1000 * results when turning the mmu off
1002 void setup_mm_for_reboot(char mode
)
1004 unsigned long base_pmdval
;
1008 if (current
->mm
&& current
->mm
->pgd
)
1009 pgd
= current
->mm
->pgd
;
1013 base_pmdval
= PMD_SECT_AP_WRITE
| PMD_SECT_AP_READ
| PMD_TYPE_SECT
;
1014 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ
&& !cpu_is_xscale())
1015 base_pmdval
|= PMD_BIT4
;
1017 for (i
= 0; i
< FIRST_USER_PGD_NR
+ USER_PTRS_PER_PGD
; i
++, pgd
++) {
1018 unsigned long pmdval
= (i
<< PGDIR_SHIFT
) | base_pmdval
;
1021 pmd
= pmd_off(pgd
, i
<< PGDIR_SHIFT
);
1022 pmd
[0] = __pmd(pmdval
);
1023 pmd
[1] = __pmd(pmdval
+ (1 << (PGDIR_SHIFT
- 1)));
1024 flush_pmd_entry(pmd
);