2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
20 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/tlbflush.h>
26 #include <asm/mach/map.h>
28 #define CPOLICY_UNCACHED 0
29 #define CPOLICY_BUFFERED 1
30 #define CPOLICY_WRITETHROUGH 2
31 #define CPOLICY_WRITEBACK 3
32 #define CPOLICY_WRITEALLOC 4
34 static unsigned int cachepolicy __initdata
= CPOLICY_WRITEBACK
;
35 static unsigned int ecc_mask __initdata
= 0;
36 pgprot_t pgprot_kernel
;
38 EXPORT_SYMBOL(pgprot_kernel
);
43 const char policy
[16];
49 static struct cachepolicy cache_policies
[] __initdata
= {
53 .pmd
= PMD_SECT_UNCACHED
,
58 .pmd
= PMD_SECT_BUFFERED
,
59 .pte
= PTE_BUFFERABLE
,
61 .policy
= "writethrough",
66 .policy
= "writeback",
69 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
71 .policy
= "writealloc",
74 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
79 * These are useful for identifing cache coherency
80 * problems by allowing the cache or the cache and
81 * writebuffer to be turned off. (Note: the write
82 * buffer should not be on and the cache off).
84 static void __init
early_cachepolicy(char **p
)
88 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
89 int len
= strlen(cache_policies
[i
].policy
);
91 if (memcmp(*p
, cache_policies
[i
].policy
, len
) == 0) {
93 cr_alignment
&= ~cache_policies
[i
].cr_mask
;
94 cr_no_alignment
&= ~cache_policies
[i
].cr_mask
;
99 if (i
== ARRAY_SIZE(cache_policies
))
100 printk(KERN_ERR
"ERROR: unknown or unsupported cache policy\n");
102 set_cr(cr_alignment
);
105 static void __init
early_nocache(char **__unused
)
107 char *p
= "buffered";
108 printk(KERN_WARNING
"nocache is deprecated; use cachepolicy=%s\n", p
);
109 early_cachepolicy(&p
);
112 static void __init
early_nowrite(char **__unused
)
114 char *p
= "uncached";
115 printk(KERN_WARNING
"nowb is deprecated; use cachepolicy=%s\n", p
);
116 early_cachepolicy(&p
);
119 static void __init
early_ecc(char **p
)
121 if (memcmp(*p
, "on", 2) == 0) {
122 ecc_mask
= PMD_PROTECTION
;
124 } else if (memcmp(*p
, "off", 3) == 0) {
130 __early_param("nocache", early_nocache
);
131 __early_param("nowb", early_nowrite
);
132 __early_param("cachepolicy=", early_cachepolicy
);
133 __early_param("ecc=", early_ecc
);
135 static int __init
noalign_setup(char *__unused
)
137 cr_alignment
&= ~CR_A
;
138 cr_no_alignment
&= ~CR_A
;
139 set_cr(cr_alignment
);
143 __setup("noalign", noalign_setup
);
145 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
147 static inline pmd_t
*pmd_off(pgd_t
*pgd
, unsigned long virt
)
149 return pmd_offset(pgd
, virt
);
152 static inline pmd_t
*pmd_off_k(unsigned long virt
)
154 return pmd_off(pgd_offset_k(virt
), virt
);
158 * need to get a 16k page for level 1
160 pgd_t
*get_pgd_slow(struct mm_struct
*mm
)
162 pgd_t
*new_pgd
, *init_pgd
;
163 pmd_t
*new_pmd
, *init_pmd
;
164 pte_t
*new_pte
, *init_pte
;
166 new_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
, 2);
170 memzero(new_pgd
, FIRST_KERNEL_PGD_NR
* sizeof(pgd_t
));
173 * Copy over the kernel and IO PGD entries
175 init_pgd
= pgd_offset_k(0);
176 memcpy(new_pgd
+ FIRST_KERNEL_PGD_NR
, init_pgd
+ FIRST_KERNEL_PGD_NR
,
177 (PTRS_PER_PGD
- FIRST_KERNEL_PGD_NR
) * sizeof(pgd_t
));
179 clean_dcache_area(new_pgd
, PTRS_PER_PGD
* sizeof(pgd_t
));
181 if (!vectors_high()) {
183 * On ARM, first page must always be allocated since it
184 * contains the machine vectors.
186 new_pmd
= pmd_alloc(mm
, new_pgd
, 0);
190 new_pte
= pte_alloc_map(mm
, new_pmd
, 0);
194 init_pmd
= pmd_offset(init_pgd
, 0);
195 init_pte
= pte_offset_map_nested(init_pmd
, 0);
196 set_pte(new_pte
, *init_pte
);
197 pte_unmap_nested(init_pte
);
206 free_pages((unsigned long)new_pgd
, 2);
211 void free_pgd_slow(pgd_t
*pgd
)
219 /* pgd is always present and good */
220 pmd
= pmd_off(pgd
, 0);
229 pte
= pmd_page(*pmd
);
231 dec_page_state(nr_page_table_pages
);
232 pte_lock_deinit(pte
);
236 free_pages((unsigned long) pgd
, 2);
240 * Create a SECTION PGD between VIRT and PHYS in domain
241 * DOMAIN with protection PROT. This operates on half-
242 * pgdir entry increments.
245 alloc_init_section(unsigned long virt
, unsigned long phys
, int prot
)
247 pmd_t
*pmdp
= pmd_off_k(virt
);
249 if (virt
& (1 << 20))
252 *pmdp
= __pmd(phys
| prot
);
253 flush_pmd_entry(pmdp
);
257 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
260 alloc_init_supersection(unsigned long virt
, unsigned long phys
, int prot
)
264 for (i
= 0; i
< 16; i
+= 1) {
265 alloc_init_section(virt
, phys
, prot
| PMD_SECT_SUPER
);
267 virt
+= (PGDIR_SIZE
/ 2);
272 * Add a PAGE mapping between VIRT and PHYS in domain
273 * DOMAIN with protection PROT. Note that due to the
274 * way we map the PTEs, we must allocate two PTE_SIZE'd
275 * blocks - one for the Linux pte table, and one for
276 * the hardware pte table.
279 alloc_init_page(unsigned long virt
, unsigned long phys
, unsigned int prot_l1
, pgprot_t prot
)
281 pmd_t
*pmdp
= pmd_off_k(virt
);
284 if (pmd_none(*pmdp
)) {
285 ptep
= alloc_bootmem_low_pages(2 * PTRS_PER_PTE
*
288 __pmd_populate(pmdp
, __pa(ptep
) | prot_l1
);
290 ptep
= pte_offset_kernel(pmdp
, virt
);
292 set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
296 unsigned int prot_pte
;
297 unsigned int prot_l1
;
298 unsigned int prot_sect
;
302 static struct mem_types mem_types
[] __initdata
= {
304 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
306 .prot_l1
= PMD_TYPE_TABLE
,
307 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_UNCACHED
|
312 .prot_sect
= PMD_TYPE_SECT
,
313 .domain
= DOMAIN_KERNEL
,
316 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_MINICACHE
,
317 .domain
= DOMAIN_KERNEL
,
320 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
322 .prot_l1
= PMD_TYPE_TABLE
,
323 .domain
= DOMAIN_USER
,
325 [MT_HIGH_VECTORS
] = {
326 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
327 L_PTE_USER
| L_PTE_EXEC
,
328 .prot_l1
= PMD_TYPE_TABLE
,
329 .domain
= DOMAIN_USER
,
332 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
333 .domain
= DOMAIN_KERNEL
,
336 .prot_sect
= PMD_TYPE_SECT
,
337 .domain
= DOMAIN_KERNEL
,
339 [MT_IXP2000_DEVICE
] = { /* IXP2400 requires XCB=101 for on-chip I/O */
340 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
342 .prot_l1
= PMD_TYPE_TABLE
,
343 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_UNCACHED
|
344 PMD_SECT_AP_WRITE
| PMD_SECT_BUFFERABLE
|
351 * Adjust the PMD section entries according to the CPU in use.
353 void __init
build_mem_type_table(void)
355 struct cachepolicy
*cp
;
356 unsigned int cr
= get_cr();
357 unsigned int user_pgprot
;
358 int cpu_arch
= cpu_architecture();
361 #if defined(CONFIG_CPU_DCACHE_DISABLE)
362 if (cachepolicy
> CPOLICY_BUFFERED
)
363 cachepolicy
= CPOLICY_BUFFERED
;
364 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
365 if (cachepolicy
> CPOLICY_WRITETHROUGH
)
366 cachepolicy
= CPOLICY_WRITETHROUGH
;
368 if (cpu_arch
< CPU_ARCH_ARMv5
) {
369 if (cachepolicy
>= CPOLICY_WRITEALLOC
)
370 cachepolicy
= CPOLICY_WRITEBACK
;
374 if (cpu_arch
<= CPU_ARCH_ARMv5TEJ
) {
375 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
376 if (mem_types
[i
].prot_l1
)
377 mem_types
[i
].prot_l1
|= PMD_BIT4
;
378 if (mem_types
[i
].prot_sect
)
379 mem_types
[i
].prot_sect
|= PMD_BIT4
;
383 cp
= &cache_policies
[cachepolicy
];
384 user_pgprot
= cp
->pte
;
387 * ARMv6 and above have extended page tables.
389 if (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
)) {
391 * bit 4 becomes XN which we must clear for the
392 * kernel memory mapping.
394 mem_types
[MT_MEMORY
].prot_sect
&= ~PMD_BIT4
;
395 mem_types
[MT_ROM
].prot_sect
&= ~PMD_BIT4
;
397 * Mark cache clean areas and XIP ROM read only
398 * from SVC mode and no access from userspace.
400 mem_types
[MT_ROM
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
401 mem_types
[MT_MINICLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
402 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
405 * Mark the device area as "shared device"
407 mem_types
[MT_DEVICE
].prot_pte
|= L_PTE_BUFFERABLE
;
408 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_BUFFERED
;
411 * User pages need to be mapped with the ASID
414 user_pgprot
|= L_PTE_ASID
;
417 if (cpu_arch
>= CPU_ARCH_ARMv5
) {
418 mem_types
[MT_LOW_VECTORS
].prot_pte
|= cp
->pte
& PTE_CACHEABLE
;
419 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= cp
->pte
& PTE_CACHEABLE
;
421 mem_types
[MT_LOW_VECTORS
].prot_pte
|= cp
->pte
;
422 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= cp
->pte
;
423 mem_types
[MT_MINICLEAN
].prot_sect
&= ~PMD_SECT_TEX(1);
426 mem_types
[MT_LOW_VECTORS
].prot_l1
|= ecc_mask
;
427 mem_types
[MT_HIGH_VECTORS
].prot_l1
|= ecc_mask
;
428 mem_types
[MT_MEMORY
].prot_sect
|= ecc_mask
| cp
->pmd
;
429 mem_types
[MT_ROM
].prot_sect
|= cp
->pmd
;
431 for (i
= 0; i
< 16; i
++) {
432 unsigned long v
= pgprot_val(protection_map
[i
]);
433 v
= (v
& ~(PTE_BUFFERABLE
|PTE_CACHEABLE
)) | user_pgprot
;
434 protection_map
[i
] = __pgprot(v
);
437 pgprot_kernel
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
|
438 L_PTE_DIRTY
| L_PTE_WRITE
|
439 L_PTE_EXEC
| cp
->pte
);
443 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WT
;
447 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WB
;
450 printk("Memory policy: ECC %sabled, Data cache %s\n",
451 ecc_mask
? "en" : "dis", cp
->policy
);
454 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
457 * Create the page directory entries and any necessary
458 * page tables for the mapping specified by `md'. We
459 * are able to cope here with varying sizes and address
460 * offsets, and we take full advantage of sections and
463 void __init
create_mapping(struct map_desc
*md
)
465 unsigned long virt
, length
;
466 int prot_sect
, prot_l1
, domain
;
468 unsigned long off
= (u32
)__pfn_to_phys(md
->pfn
);
470 if (md
->virtual != vectors_base() && md
->virtual < TASK_SIZE
) {
471 printk(KERN_WARNING
"BUG: not creating mapping for "
472 "0x%016llx at 0x%08lx in user region\n",
473 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
477 if ((md
->type
== MT_DEVICE
|| md
->type
== MT_ROM
) &&
478 md
->virtual >= PAGE_OFFSET
&& md
->virtual < VMALLOC_END
) {
479 printk(KERN_WARNING
"BUG: mapping for 0x%016llx at 0x%08lx "
480 "overlaps vmalloc space\n",
481 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
484 domain
= mem_types
[md
->type
].domain
;
485 prot_pte
= __pgprot(mem_types
[md
->type
].prot_pte
);
486 prot_l1
= mem_types
[md
->type
].prot_l1
| PMD_DOMAIN(domain
);
487 prot_sect
= mem_types
[md
->type
].prot_sect
| PMD_DOMAIN(domain
);
490 * Catch 36-bit addresses
492 if(md
->pfn
>= 0x100000) {
494 printk(KERN_ERR
"MM: invalid domain in supersection "
495 "mapping for 0x%016llx at 0x%08lx\n",
496 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
499 if((md
->virtual | md
->length
| __pfn_to_phys(md
->pfn
))
500 & ~SUPERSECTION_MASK
) {
501 printk(KERN_ERR
"MM: cannot create mapping for "
502 "0x%016llx at 0x%08lx invalid alignment\n",
503 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
508 * Shift bits [35:32] of address into bits [23:20] of PMD
511 off
|= (((md
->pfn
>> (32 - PAGE_SHIFT
)) & 0xF) << 20);
518 if (mem_types
[md
->type
].prot_l1
== 0 &&
519 (virt
& 0xfffff || (virt
+ off
) & 0xfffff || (virt
+ length
) & 0xfffff)) {
520 printk(KERN_WARNING
"BUG: map for 0x%08lx at 0x%08lx can not "
521 "be mapped using pages, ignoring.\n",
522 __pfn_to_phys(md
->pfn
), md
->virtual);
526 while ((virt
& 0xfffff || (virt
+ off
) & 0xfffff) && length
>= PAGE_SIZE
) {
527 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
533 /* N.B. ARMv6 supersections are only defined to work with domain 0.
534 * Since domain assignments can in fact be arbitrary, the
535 * 'domain == 0' check below is required to insure that ARMv6
536 * supersections are only allocated for domain 0 regardless
537 * of the actual domain assignments in use.
539 if (cpu_architecture() >= CPU_ARCH_ARMv6
&& domain
== 0) {
541 * Align to supersection boundary if !high pages.
542 * High pages have already been checked for proper
543 * alignment above and they will fail the SUPSERSECTION_MASK
544 * check because of the way the address is encoded into
547 if (md
->pfn
<= 0x100000) {
548 while ((virt
& ~SUPERSECTION_MASK
||
549 (virt
+ off
) & ~SUPERSECTION_MASK
) &&
550 length
>= (PGDIR_SIZE
/ 2)) {
551 alloc_init_section(virt
, virt
+ off
, prot_sect
);
553 virt
+= (PGDIR_SIZE
/ 2);
554 length
-= (PGDIR_SIZE
/ 2);
558 while (length
>= SUPERSECTION_SIZE
) {
559 alloc_init_supersection(virt
, virt
+ off
, prot_sect
);
561 virt
+= SUPERSECTION_SIZE
;
562 length
-= SUPERSECTION_SIZE
;
567 * A section mapping covers half a "pgdir" entry.
569 while (length
>= (PGDIR_SIZE
/ 2)) {
570 alloc_init_section(virt
, virt
+ off
, prot_sect
);
572 virt
+= (PGDIR_SIZE
/ 2);
573 length
-= (PGDIR_SIZE
/ 2);
576 while (length
>= PAGE_SIZE
) {
577 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
585 * In order to soft-boot, we need to insert a 1:1 mapping in place of
586 * the user-mode pages. This will then ensure that we have predictable
587 * results when turning the mmu off
589 void setup_mm_for_reboot(char mode
)
591 unsigned long base_pmdval
;
595 if (current
->mm
&& current
->mm
->pgd
)
596 pgd
= current
->mm
->pgd
;
600 base_pmdval
= PMD_SECT_AP_WRITE
| PMD_SECT_AP_READ
| PMD_TYPE_SECT
;
601 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ
)
602 base_pmdval
|= PMD_BIT4
;
604 for (i
= 0; i
< FIRST_USER_PGD_NR
+ USER_PTRS_PER_PGD
; i
++, pgd
++) {
605 unsigned long pmdval
= (i
<< PGDIR_SHIFT
) | base_pmdval
;
608 pmd
= pmd_off(pgd
, i
<< PGDIR_SHIFT
);
609 pmd
[0] = __pmd(pmdval
);
610 pmd
[1] = __pmd(pmdval
+ (1 << (PGDIR_SHIFT
- 1)));
611 flush_pmd_entry(pmd
);
616 * Create the architecture specific mappings
618 void __init
iotable_init(struct map_desc
*io_desc
, int nr
)
622 for (i
= 0; i
< nr
; i
++)
623 create_mapping(io_desc
+ i
);