2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
20 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/tlbflush.h>
26 #include <asm/mach/map.h>
28 #define CPOLICY_UNCACHED 0
29 #define CPOLICY_BUFFERED 1
30 #define CPOLICY_WRITETHROUGH 2
31 #define CPOLICY_WRITEBACK 3
32 #define CPOLICY_WRITEALLOC 4
34 static unsigned int cachepolicy __initdata
= CPOLICY_WRITEBACK
;
35 static unsigned int ecc_mask __initdata
= 0;
36 pgprot_t pgprot_kernel
;
38 EXPORT_SYMBOL(pgprot_kernel
);
41 const char policy
[16];
47 static struct cachepolicy cache_policies
[] __initdata
= {
51 .pmd
= PMD_SECT_UNCACHED
,
56 .pmd
= PMD_SECT_BUFFERED
,
57 .pte
= PTE_BUFFERABLE
,
59 .policy
= "writethrough",
64 .policy
= "writeback",
67 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
69 .policy
= "writealloc",
72 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
77 * These are useful for identifing cache coherency
78 * problems by allowing the cache or the cache and
79 * writebuffer to be turned off. (Note: the write
80 * buffer should not be on and the cache off).
82 static void __init
early_cachepolicy(char **p
)
86 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
87 int len
= strlen(cache_policies
[i
].policy
);
89 if (memcmp(*p
, cache_policies
[i
].policy
, len
) == 0) {
91 cr_alignment
&= ~cache_policies
[i
].cr_mask
;
92 cr_no_alignment
&= ~cache_policies
[i
].cr_mask
;
97 if (i
== ARRAY_SIZE(cache_policies
))
98 printk(KERN_ERR
"ERROR: unknown or unsupported cache policy\n");
100 set_cr(cr_alignment
);
103 static void __init
early_nocache(char **__unused
)
105 char *p
= "buffered";
106 printk(KERN_WARNING
"nocache is deprecated; use cachepolicy=%s\n", p
);
107 early_cachepolicy(&p
);
110 static void __init
early_nowrite(char **__unused
)
112 char *p
= "uncached";
113 printk(KERN_WARNING
"nowb is deprecated; use cachepolicy=%s\n", p
);
114 early_cachepolicy(&p
);
117 static void __init
early_ecc(char **p
)
119 if (memcmp(*p
, "on", 2) == 0) {
120 ecc_mask
= PMD_PROTECTION
;
122 } else if (memcmp(*p
, "off", 3) == 0) {
128 __early_param("nocache", early_nocache
);
129 __early_param("nowb", early_nowrite
);
130 __early_param("cachepolicy=", early_cachepolicy
);
131 __early_param("ecc=", early_ecc
);
133 static int __init
noalign_setup(char *__unused
)
135 cr_alignment
&= ~CR_A
;
136 cr_no_alignment
&= ~CR_A
;
137 set_cr(cr_alignment
);
141 __setup("noalign", noalign_setup
);
143 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
146 * need to get a 16k page for level 1
148 pgd_t
*get_pgd_slow(struct mm_struct
*mm
)
150 pgd_t
*new_pgd
, *init_pgd
;
151 pmd_t
*new_pmd
, *init_pmd
;
152 pte_t
*new_pte
, *init_pte
;
154 new_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
, 2);
158 memzero(new_pgd
, FIRST_KERNEL_PGD_NR
* sizeof(pgd_t
));
160 init_pgd
= pgd_offset_k(0);
162 if (!vectors_high()) {
164 * This lock is here just to satisfy pmd_alloc and pte_lock
166 spin_lock(&mm
->page_table_lock
);
169 * On ARM, first page must always be allocated since it
170 * contains the machine vectors.
172 new_pmd
= pmd_alloc(mm
, new_pgd
, 0);
176 new_pte
= pte_alloc_map(mm
, new_pmd
, 0);
180 init_pmd
= pmd_offset(init_pgd
, 0);
181 init_pte
= pte_offset_map_nested(init_pmd
, 0);
182 set_pte(new_pte
, *init_pte
);
183 pte_unmap_nested(init_pte
);
186 spin_unlock(&mm
->page_table_lock
);
190 * Copy over the kernel and IO PGD entries
192 memcpy(new_pgd
+ FIRST_KERNEL_PGD_NR
, init_pgd
+ FIRST_KERNEL_PGD_NR
,
193 (PTRS_PER_PGD
- FIRST_KERNEL_PGD_NR
) * sizeof(pgd_t
));
195 clean_dcache_area(new_pgd
, PTRS_PER_PGD
* sizeof(pgd_t
));
200 spin_unlock(&mm
->page_table_lock
);
202 free_pages((unsigned long)new_pgd
, 2);
206 spin_unlock(&mm
->page_table_lock
);
207 free_pages((unsigned long)new_pgd
, 2);
214 void free_pgd_slow(pgd_t
*pgd
)
222 /* pgd is always present and good */
232 pte
= pmd_page(*pmd
);
234 dec_page_state(nr_page_table_pages
);
238 free_pages((unsigned long) pgd
, 2);
242 * Create a SECTION PGD between VIRT and PHYS in domain
243 * DOMAIN with protection PROT. This operates on half-
244 * pgdir entry increments.
247 alloc_init_section(unsigned long virt
, unsigned long phys
, int prot
)
251 pmdp
= pmd_offset(pgd_offset_k(virt
), virt
);
252 if (virt
& (1 << 20))
255 *pmdp
= __pmd(phys
| prot
);
256 flush_pmd_entry(pmdp
);
260 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
263 alloc_init_supersection(unsigned long virt
, unsigned long phys
, int prot
)
267 for (i
= 0; i
< 16; i
+= 1) {
268 alloc_init_section(virt
, phys
& SUPERSECTION_MASK
,
269 prot
| PMD_SECT_SUPER
);
271 virt
+= (PGDIR_SIZE
/ 2);
272 phys
+= (PGDIR_SIZE
/ 2);
277 * Add a PAGE mapping between VIRT and PHYS in domain
278 * DOMAIN with protection PROT. Note that due to the
279 * way we map the PTEs, we must allocate two PTE_SIZE'd
280 * blocks - one for the Linux pte table, and one for
281 * the hardware pte table.
284 alloc_init_page(unsigned long virt
, unsigned long phys
, unsigned int prot_l1
, pgprot_t prot
)
289 pmdp
= pmd_offset(pgd_offset_k(virt
), virt
);
291 if (pmd_none(*pmdp
)) {
292 unsigned long pmdval
;
293 ptep
= alloc_bootmem_low_pages(2 * PTRS_PER_PTE
*
296 pmdval
= __pa(ptep
) | prot_l1
;
297 pmdp
[0] = __pmd(pmdval
);
298 pmdp
[1] = __pmd(pmdval
+ 256 * sizeof(pte_t
));
299 flush_pmd_entry(pmdp
);
301 ptep
= pte_offset_kernel(pmdp
, virt
);
303 set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
307 * Clear any PGD mapping. On a two-level page table system,
308 * the clearance is done by the middle-level functions (pmd)
309 * rather than the top-level (pgd) functions.
311 static inline void clear_mapping(unsigned long virt
)
313 pmd_clear(pmd_offset(pgd_offset_k(virt
), virt
));
317 unsigned int prot_pte
;
318 unsigned int prot_l1
;
319 unsigned int prot_sect
;
323 static struct mem_types mem_types
[] __initdata
= {
325 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
327 .prot_l1
= PMD_TYPE_TABLE
,
328 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_UNCACHED
|
333 .prot_sect
= PMD_TYPE_SECT
,
334 .domain
= DOMAIN_KERNEL
,
337 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_MINICACHE
,
338 .domain
= DOMAIN_KERNEL
,
341 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
343 .prot_l1
= PMD_TYPE_TABLE
,
344 .domain
= DOMAIN_USER
,
346 [MT_HIGH_VECTORS
] = {
347 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
348 L_PTE_USER
| L_PTE_EXEC
,
349 .prot_l1
= PMD_TYPE_TABLE
,
350 .domain
= DOMAIN_USER
,
353 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
354 .domain
= DOMAIN_KERNEL
,
357 .prot_sect
= PMD_TYPE_SECT
,
358 .domain
= DOMAIN_KERNEL
,
360 [MT_IXP2000_DEVICE
] = { /* IXP2400 requires XCB=101 for on-chip I/O */
361 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
363 .prot_l1
= PMD_TYPE_TABLE
,
364 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_UNCACHED
|
365 PMD_SECT_AP_WRITE
| PMD_SECT_BUFFERABLE
|
372 * Adjust the PMD section entries according to the CPU in use.
374 static void __init
build_mem_type_table(void)
376 struct cachepolicy
*cp
;
377 unsigned int cr
= get_cr();
378 int cpu_arch
= cpu_architecture();
381 #if defined(CONFIG_CPU_DCACHE_DISABLE)
382 if (cachepolicy
> CPOLICY_BUFFERED
)
383 cachepolicy
= CPOLICY_BUFFERED
;
384 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
385 if (cachepolicy
> CPOLICY_WRITETHROUGH
)
386 cachepolicy
= CPOLICY_WRITETHROUGH
;
388 if (cpu_arch
< CPU_ARCH_ARMv5
) {
389 if (cachepolicy
>= CPOLICY_WRITEALLOC
)
390 cachepolicy
= CPOLICY_WRITEBACK
;
394 if (cpu_arch
<= CPU_ARCH_ARMv5
) {
395 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
396 if (mem_types
[i
].prot_l1
)
397 mem_types
[i
].prot_l1
|= PMD_BIT4
;
398 if (mem_types
[i
].prot_sect
)
399 mem_types
[i
].prot_sect
|= PMD_BIT4
;
404 * ARMv6 and above have extended page tables.
406 if (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
)) {
408 * bit 4 becomes XN which we must clear for the
409 * kernel memory mapping.
411 mem_types
[MT_MEMORY
].prot_sect
&= ~PMD_BIT4
;
412 mem_types
[MT_ROM
].prot_sect
&= ~PMD_BIT4
;
414 * Mark cache clean areas and XIP ROM read only
415 * from SVC mode and no access from userspace.
417 mem_types
[MT_ROM
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
418 mem_types
[MT_MINICLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
419 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
422 cp
= &cache_policies
[cachepolicy
];
424 if (cpu_arch
>= CPU_ARCH_ARMv5
) {
425 mem_types
[MT_LOW_VECTORS
].prot_pte
|= cp
->pte
& PTE_CACHEABLE
;
426 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= cp
->pte
& PTE_CACHEABLE
;
428 mem_types
[MT_LOW_VECTORS
].prot_pte
|= cp
->pte
;
429 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= cp
->pte
;
430 mem_types
[MT_MINICLEAN
].prot_sect
&= ~PMD_SECT_TEX(1);
433 mem_types
[MT_LOW_VECTORS
].prot_l1
|= ecc_mask
;
434 mem_types
[MT_HIGH_VECTORS
].prot_l1
|= ecc_mask
;
435 mem_types
[MT_MEMORY
].prot_sect
|= ecc_mask
| cp
->pmd
;
436 mem_types
[MT_ROM
].prot_sect
|= cp
->pmd
;
438 for (i
= 0; i
< 16; i
++) {
439 unsigned long v
= pgprot_val(protection_map
[i
]);
440 v
&= (~(PTE_BUFFERABLE
|PTE_CACHEABLE
)) | cp
->pte
;
441 protection_map
[i
] = __pgprot(v
);
444 pgprot_kernel
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
|
445 L_PTE_DIRTY
| L_PTE_WRITE
|
446 L_PTE_EXEC
| cp
->pte
);
450 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WT
;
454 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WB
;
457 printk("Memory policy: ECC %sabled, Data cache %s\n",
458 ecc_mask
? "en" : "dis", cp
->policy
);
461 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
464 * Create the page directory entries and any necessary
465 * page tables for the mapping specified by `md'. We
466 * are able to cope here with varying sizes and address
467 * offsets, and we take full advantage of sections and
470 static void __init
create_mapping(struct map_desc
*md
)
472 unsigned long virt
, length
;
473 int prot_sect
, prot_l1
, domain
;
477 if (md
->virtual != vectors_base() && md
->virtual < TASK_SIZE
) {
478 printk(KERN_WARNING
"BUG: not creating mapping for "
479 "0x%08lx at 0x%08lx in user region\n",
480 md
->physical
, md
->virtual);
484 if ((md
->type
== MT_DEVICE
|| md
->type
== MT_ROM
) &&
485 md
->virtual >= PAGE_OFFSET
&& md
->virtual < VMALLOC_END
) {
486 printk(KERN_WARNING
"BUG: mapping for 0x%08lx at 0x%08lx "
487 "overlaps vmalloc space\n",
488 md
->physical
, md
->virtual);
491 domain
= mem_types
[md
->type
].domain
;
492 prot_pte
= __pgprot(mem_types
[md
->type
].prot_pte
);
493 prot_l1
= mem_types
[md
->type
].prot_l1
| PMD_DOMAIN(domain
);
494 prot_sect
= mem_types
[md
->type
].prot_sect
| PMD_DOMAIN(domain
);
497 off
= md
->physical
- virt
;
500 if (mem_types
[md
->type
].prot_l1
== 0 &&
501 (virt
& 0xfffff || (virt
+ off
) & 0xfffff || (virt
+ length
) & 0xfffff)) {
502 printk(KERN_WARNING
"BUG: map for 0x%08lx at 0x%08lx can not "
503 "be mapped using pages, ignoring.\n",
504 md
->physical
, md
->virtual);
508 while ((virt
& 0xfffff || (virt
+ off
) & 0xfffff) && length
>= PAGE_SIZE
) {
509 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
515 /* N.B. ARMv6 supersections are only defined to work with domain 0.
516 * Since domain assignments can in fact be arbitrary, the
517 * 'domain == 0' check below is required to insure that ARMv6
518 * supersections are only allocated for domain 0 regardless
519 * of the actual domain assignments in use.
521 if (cpu_architecture() >= CPU_ARCH_ARMv6
&& domain
== 0) {
522 /* Align to supersection boundary */
523 while ((virt
& ~SUPERSECTION_MASK
|| (virt
+ off
) &
524 ~SUPERSECTION_MASK
) && length
>= (PGDIR_SIZE
/ 2)) {
525 alloc_init_section(virt
, virt
+ off
, prot_sect
);
527 virt
+= (PGDIR_SIZE
/ 2);
528 length
-= (PGDIR_SIZE
/ 2);
531 while (length
>= SUPERSECTION_SIZE
) {
532 alloc_init_supersection(virt
, virt
+ off
, prot_sect
);
534 virt
+= SUPERSECTION_SIZE
;
535 length
-= SUPERSECTION_SIZE
;
540 * A section mapping covers half a "pgdir" entry.
542 while (length
>= (PGDIR_SIZE
/ 2)) {
543 alloc_init_section(virt
, virt
+ off
, prot_sect
);
545 virt
+= (PGDIR_SIZE
/ 2);
546 length
-= (PGDIR_SIZE
/ 2);
549 while (length
>= PAGE_SIZE
) {
550 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
558 * In order to soft-boot, we need to insert a 1:1 mapping in place of
559 * the user-mode pages. This will then ensure that we have predictable
560 * results when turning the mmu off
562 void setup_mm_for_reboot(char mode
)
564 unsigned long pmdval
;
568 int cpu_arch
= cpu_architecture();
570 if (current
->mm
&& current
->mm
->pgd
)
571 pgd
= current
->mm
->pgd
;
575 for (i
= 0; i
< FIRST_USER_PGD_NR
+ USER_PTRS_PER_PGD
; i
++) {
576 pmdval
= (i
<< PGDIR_SHIFT
) |
577 PMD_SECT_AP_WRITE
| PMD_SECT_AP_READ
|
579 if (cpu_arch
<= CPU_ARCH_ARMv5
)
581 pmd
= pmd_offset(pgd
+ i
, i
<< PGDIR_SHIFT
);
582 pmd
[0] = __pmd(pmdval
);
583 pmd
[1] = __pmd(pmdval
+ (1 << (PGDIR_SHIFT
- 1)));
584 flush_pmd_entry(pmd
);
588 extern void _stext
, _etext
;
591 * Setup initial mappings. We use the page we allocated for zero page to hold
592 * the mappings, which will get overwritten by the vectors in traps_init().
593 * The mappings must be in virtual address order.
595 void __init
memtable_init(struct meminfo
*mi
)
597 struct map_desc
*init_maps
, *p
, *q
;
598 unsigned long address
= 0;
601 build_mem_type_table();
603 init_maps
= p
= alloc_bootmem_low_pages(PAGE_SIZE
);
605 #ifdef CONFIG_XIP_KERNEL
606 p
->physical
= CONFIG_XIP_PHYS_ADDR
& PMD_MASK
;
607 p
->virtual = (unsigned long)&_stext
& PMD_MASK
;
608 p
->length
= ((unsigned long)&_etext
- p
->virtual + ~PMD_MASK
) & PMD_MASK
;
613 for (i
= 0; i
< mi
->nr_banks
; i
++) {
614 if (mi
->bank
[i
].size
== 0)
617 p
->physical
= mi
->bank
[i
].start
;
618 p
->virtual = __phys_to_virt(p
->physical
);
619 p
->length
= mi
->bank
[i
].size
;
625 p
->physical
= FLUSH_BASE_PHYS
;
626 p
->virtual = FLUSH_BASE
;
627 p
->length
= PGDIR_SIZE
;
628 p
->type
= MT_CACHECLEAN
;
632 #ifdef FLUSH_BASE_MINICACHE
633 p
->physical
= FLUSH_BASE_PHYS
+ PGDIR_SIZE
;
634 p
->virtual = FLUSH_BASE_MINICACHE
;
635 p
->length
= PGDIR_SIZE
;
636 p
->type
= MT_MINICLEAN
;
641 * Go through the initial mappings, but clear out any
642 * pgdir entries that are not in the description.
646 if (address
< q
->virtual || q
== p
) {
647 clear_mapping(address
);
648 address
+= PGDIR_SIZE
;
652 address
= q
->virtual + q
->length
;
653 address
= (address
+ PGDIR_SIZE
- 1) & PGDIR_MASK
;
657 } while (address
!= 0);
660 * Create a mapping for the machine vectors at the high-vectors
661 * location (0xffff0000). If we aren't using high-vectors, also
662 * create a mapping at the low-vectors virtual address.
664 init_maps
->physical
= virt_to_phys(init_maps
);
665 init_maps
->virtual = 0xffff0000;
666 init_maps
->length
= PAGE_SIZE
;
667 init_maps
->type
= MT_HIGH_VECTORS
;
668 create_mapping(init_maps
);
670 if (!vectors_high()) {
671 init_maps
->virtual = 0;
672 init_maps
->type
= MT_LOW_VECTORS
;
673 create_mapping(init_maps
);
681 * Create the architecture specific mappings
683 void __init
iotable_init(struct map_desc
*io_desc
, int nr
)
687 for (i
= 0; i
< nr
; i
++)
688 create_mapping(io_desc
+ i
);
692 free_memmap(int node
, unsigned long start_pfn
, unsigned long end_pfn
)
694 struct page
*start_pg
, *end_pg
;
695 unsigned long pg
, pgend
;
698 * Convert start_pfn/end_pfn to a struct page pointer.
700 start_pg
= pfn_to_page(start_pfn
);
701 end_pg
= pfn_to_page(end_pfn
);
704 * Convert to physical addresses, and
705 * round start upwards and end downwards.
707 pg
= PAGE_ALIGN(__pa(start_pg
));
708 pgend
= __pa(end_pg
) & PAGE_MASK
;
711 * If there are free pages between these,
712 * free the section of the memmap array.
715 free_bootmem_node(NODE_DATA(node
), pg
, pgend
- pg
);
718 static inline void free_unused_memmap_node(int node
, struct meminfo
*mi
)
720 unsigned long bank_start
, prev_bank_end
= 0;
724 * [FIXME] This relies on each bank being in address order. This
725 * may not be the case, especially if the user has provided the
726 * information on the command line.
728 for (i
= 0; i
< mi
->nr_banks
; i
++) {
729 if (mi
->bank
[i
].size
== 0 || mi
->bank
[i
].node
!= node
)
732 bank_start
= mi
->bank
[i
].start
>> PAGE_SHIFT
;
733 if (bank_start
< prev_bank_end
) {
734 printk(KERN_ERR
"MEM: unordered memory banks. "
735 "Not freeing memmap.\n");
740 * If we had a previous bank, and there is a space
741 * between the current bank and the previous, free it.
743 if (prev_bank_end
&& prev_bank_end
!= bank_start
)
744 free_memmap(node
, prev_bank_end
, bank_start
);
746 prev_bank_end
= PAGE_ALIGN(mi
->bank
[i
].start
+
747 mi
->bank
[i
].size
) >> PAGE_SHIFT
;
752 * The mem_map array can get very big. Free
753 * the unused area of the memory map.
755 void __init
create_memmap_holes(struct meminfo
*mi
)
759 for_each_online_node(node
)
760 free_unused_memmap_node(node
, mi
);