2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
20 #include <asm/pgalloc.h>
22 #include <asm/setup.h>
23 #include <asm/tlbflush.h>
25 #include <asm/mach/map.h>
27 #define CPOLICY_UNCACHED 0
28 #define CPOLICY_BUFFERED 1
29 #define CPOLICY_WRITETHROUGH 2
30 #define CPOLICY_WRITEBACK 3
31 #define CPOLICY_WRITEALLOC 4
33 static unsigned int cachepolicy __initdata
= CPOLICY_WRITEBACK
;
34 static unsigned int ecc_mask __initdata
= 0;
35 pgprot_t pgprot_kernel
;
37 EXPORT_SYMBOL(pgprot_kernel
);
42 const char policy
[16];
48 static struct cachepolicy cache_policies
[] __initdata
= {
52 .pmd
= PMD_SECT_UNCACHED
,
57 .pmd
= PMD_SECT_BUFFERED
,
58 .pte
= PTE_BUFFERABLE
,
60 .policy
= "writethrough",
65 .policy
= "writeback",
68 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
70 .policy
= "writealloc",
73 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
78 * These are useful for identifing cache coherency
79 * problems by allowing the cache or the cache and
80 * writebuffer to be turned off. (Note: the write
81 * buffer should not be on and the cache off).
83 static void __init
early_cachepolicy(char **p
)
87 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
88 int len
= strlen(cache_policies
[i
].policy
);
90 if (memcmp(*p
, cache_policies
[i
].policy
, len
) == 0) {
92 cr_alignment
&= ~cache_policies
[i
].cr_mask
;
93 cr_no_alignment
&= ~cache_policies
[i
].cr_mask
;
98 if (i
== ARRAY_SIZE(cache_policies
))
99 printk(KERN_ERR
"ERROR: unknown or unsupported cache policy\n");
101 set_cr(cr_alignment
);
104 static void __init
early_nocache(char **__unused
)
106 char *p
= "buffered";
107 printk(KERN_WARNING
"nocache is deprecated; use cachepolicy=%s\n", p
);
108 early_cachepolicy(&p
);
111 static void __init
early_nowrite(char **__unused
)
113 char *p
= "uncached";
114 printk(KERN_WARNING
"nowb is deprecated; use cachepolicy=%s\n", p
);
115 early_cachepolicy(&p
);
118 static void __init
early_ecc(char **p
)
120 if (memcmp(*p
, "on", 2) == 0) {
121 ecc_mask
= PMD_PROTECTION
;
123 } else if (memcmp(*p
, "off", 3) == 0) {
129 __early_param("nocache", early_nocache
);
130 __early_param("nowb", early_nowrite
);
131 __early_param("cachepolicy=", early_cachepolicy
);
132 __early_param("ecc=", early_ecc
);
134 static int __init
noalign_setup(char *__unused
)
136 cr_alignment
&= ~CR_A
;
137 cr_no_alignment
&= ~CR_A
;
138 set_cr(cr_alignment
);
142 __setup("noalign", noalign_setup
);
144 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
146 static inline pmd_t
*pmd_off(pgd_t
*pgd
, unsigned long virt
)
148 return pmd_offset(pgd
, virt
);
151 static inline pmd_t
*pmd_off_k(unsigned long virt
)
153 return pmd_off(pgd_offset_k(virt
), virt
);
157 * need to get a 16k page for level 1
159 pgd_t
*get_pgd_slow(struct mm_struct
*mm
)
161 pgd_t
*new_pgd
, *init_pgd
;
162 pmd_t
*new_pmd
, *init_pmd
;
163 pte_t
*new_pte
, *init_pte
;
165 new_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
, 2);
169 memzero(new_pgd
, FIRST_KERNEL_PGD_NR
* sizeof(pgd_t
));
172 * Copy over the kernel and IO PGD entries
174 init_pgd
= pgd_offset_k(0);
175 memcpy(new_pgd
+ FIRST_KERNEL_PGD_NR
, init_pgd
+ FIRST_KERNEL_PGD_NR
,
176 (PTRS_PER_PGD
- FIRST_KERNEL_PGD_NR
) * sizeof(pgd_t
));
178 clean_dcache_area(new_pgd
, PTRS_PER_PGD
* sizeof(pgd_t
));
180 if (!vectors_high()) {
182 * On ARM, first page must always be allocated since it
183 * contains the machine vectors.
185 new_pmd
= pmd_alloc(mm
, new_pgd
, 0);
189 new_pte
= pte_alloc_map(mm
, new_pmd
, 0);
193 init_pmd
= pmd_offset(init_pgd
, 0);
194 init_pte
= pte_offset_map_nested(init_pmd
, 0);
195 set_pte(new_pte
, *init_pte
);
196 pte_unmap_nested(init_pte
);
205 free_pages((unsigned long)new_pgd
, 2);
210 void free_pgd_slow(pgd_t
*pgd
)
218 /* pgd is always present and good */
219 pmd
= pmd_off(pgd
, 0);
228 pte
= pmd_page(*pmd
);
230 dec_page_state(nr_page_table_pages
);
231 pte_lock_deinit(pte
);
235 free_pages((unsigned long) pgd
, 2);
239 * Create a SECTION PGD between VIRT and PHYS in domain
240 * DOMAIN with protection PROT. This operates on half-
241 * pgdir entry increments.
244 alloc_init_section(unsigned long virt
, unsigned long phys
, int prot
)
246 pmd_t
*pmdp
= pmd_off_k(virt
);
248 if (virt
& (1 << 20))
251 *pmdp
= __pmd(phys
| prot
);
252 flush_pmd_entry(pmdp
);
256 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
259 alloc_init_supersection(unsigned long virt
, unsigned long phys
, int prot
)
263 for (i
= 0; i
< 16; i
+= 1) {
264 alloc_init_section(virt
, phys
, prot
| PMD_SECT_SUPER
);
266 virt
+= (PGDIR_SIZE
/ 2);
271 * Add a PAGE mapping between VIRT and PHYS in domain
272 * DOMAIN with protection PROT. Note that due to the
273 * way we map the PTEs, we must allocate two PTE_SIZE'd
274 * blocks - one for the Linux pte table, and one for
275 * the hardware pte table.
278 alloc_init_page(unsigned long virt
, unsigned long phys
, unsigned int prot_l1
, pgprot_t prot
)
280 pmd_t
*pmdp
= pmd_off_k(virt
);
283 if (pmd_none(*pmdp
)) {
284 ptep
= alloc_bootmem_low_pages(2 * PTRS_PER_PTE
*
287 __pmd_populate(pmdp
, __pa(ptep
) | prot_l1
);
289 ptep
= pte_offset_kernel(pmdp
, virt
);
291 set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
295 unsigned int prot_pte
;
296 unsigned int prot_l1
;
297 unsigned int prot_sect
;
301 static struct mem_types mem_types
[] __initdata
= {
303 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
305 .prot_l1
= PMD_TYPE_TABLE
,
306 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_UNCACHED
|
311 .prot_sect
= PMD_TYPE_SECT
,
312 .domain
= DOMAIN_KERNEL
,
315 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_MINICACHE
,
316 .domain
= DOMAIN_KERNEL
,
319 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
321 .prot_l1
= PMD_TYPE_TABLE
,
322 .domain
= DOMAIN_USER
,
324 [MT_HIGH_VECTORS
] = {
325 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
326 L_PTE_USER
| L_PTE_EXEC
,
327 .prot_l1
= PMD_TYPE_TABLE
,
328 .domain
= DOMAIN_USER
,
331 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
332 .domain
= DOMAIN_KERNEL
,
335 .prot_sect
= PMD_TYPE_SECT
,
336 .domain
= DOMAIN_KERNEL
,
338 [MT_IXP2000_DEVICE
] = { /* IXP2400 requires XCB=101 for on-chip I/O */
339 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
341 .prot_l1
= PMD_TYPE_TABLE
,
342 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_UNCACHED
|
343 PMD_SECT_AP_WRITE
| PMD_SECT_BUFFERABLE
|
347 [MT_NONSHARED_DEVICE
] = {
348 .prot_l1
= PMD_TYPE_TABLE
,
349 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_NONSHARED_DEV
|
356 * Adjust the PMD section entries according to the CPU in use.
358 void __init
build_mem_type_table(void)
360 struct cachepolicy
*cp
;
361 unsigned int cr
= get_cr();
362 unsigned int user_pgprot
, kern_pgprot
;
363 int cpu_arch
= cpu_architecture();
366 #if defined(CONFIG_CPU_DCACHE_DISABLE)
367 if (cachepolicy
> CPOLICY_BUFFERED
)
368 cachepolicy
= CPOLICY_BUFFERED
;
369 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
370 if (cachepolicy
> CPOLICY_WRITETHROUGH
)
371 cachepolicy
= CPOLICY_WRITETHROUGH
;
373 if (cpu_arch
< CPU_ARCH_ARMv5
) {
374 if (cachepolicy
>= CPOLICY_WRITEALLOC
)
375 cachepolicy
= CPOLICY_WRITEBACK
;
379 if (cpu_arch
<= CPU_ARCH_ARMv5TEJ
) {
380 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
381 if (mem_types
[i
].prot_l1
)
382 mem_types
[i
].prot_l1
|= PMD_BIT4
;
383 if (mem_types
[i
].prot_sect
)
384 mem_types
[i
].prot_sect
|= PMD_BIT4
;
388 cp
= &cache_policies
[cachepolicy
];
389 kern_pgprot
= user_pgprot
= cp
->pte
;
392 * ARMv6 and above have extended page tables.
394 if (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
)) {
396 * bit 4 becomes XN which we must clear for the
397 * kernel memory mapping.
399 mem_types
[MT_MEMORY
].prot_sect
&= ~PMD_BIT4
;
400 mem_types
[MT_ROM
].prot_sect
&= ~PMD_BIT4
;
403 * Mark cache clean areas and XIP ROM read only
404 * from SVC mode and no access from userspace.
406 mem_types
[MT_ROM
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
407 mem_types
[MT_MINICLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
408 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
411 * Mark the device area as "shared device"
413 mem_types
[MT_DEVICE
].prot_pte
|= L_PTE_BUFFERABLE
;
414 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_BUFFERED
;
417 * User pages need to be mapped with the ASID
420 user_pgprot
|= L_PTE_ASID
;
424 * Mark memory with the "shared" attribute for SMP systems
426 user_pgprot
|= L_PTE_SHARED
;
427 kern_pgprot
|= L_PTE_SHARED
;
428 mem_types
[MT_MEMORY
].prot_sect
|= PMD_SECT_S
;
432 for (i
= 0; i
< 16; i
++) {
433 unsigned long v
= pgprot_val(protection_map
[i
]);
434 v
= (v
& ~(L_PTE_BUFFERABLE
|L_PTE_CACHEABLE
)) | user_pgprot
;
435 protection_map
[i
] = __pgprot(v
);
438 mem_types
[MT_LOW_VECTORS
].prot_pte
|= kern_pgprot
;
439 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= kern_pgprot
;
441 if (cpu_arch
>= CPU_ARCH_ARMv5
) {
444 * Only use write-through for non-SMP systems
446 mem_types
[MT_LOW_VECTORS
].prot_pte
&= ~L_PTE_BUFFERABLE
;
447 mem_types
[MT_HIGH_VECTORS
].prot_pte
&= ~L_PTE_BUFFERABLE
;
450 mem_types
[MT_MINICLEAN
].prot_sect
&= ~PMD_SECT_TEX(1);
453 pgprot_kernel
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
|
454 L_PTE_DIRTY
| L_PTE_WRITE
|
455 L_PTE_EXEC
| kern_pgprot
);
457 mem_types
[MT_LOW_VECTORS
].prot_l1
|= ecc_mask
;
458 mem_types
[MT_HIGH_VECTORS
].prot_l1
|= ecc_mask
;
459 mem_types
[MT_MEMORY
].prot_sect
|= ecc_mask
| cp
->pmd
;
460 mem_types
[MT_ROM
].prot_sect
|= cp
->pmd
;
464 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WT
;
468 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WB
;
471 printk("Memory policy: ECC %sabled, Data cache %s\n",
472 ecc_mask
? "en" : "dis", cp
->policy
);
475 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
478 * Create the page directory entries and any necessary
479 * page tables for the mapping specified by `md'. We
480 * are able to cope here with varying sizes and address
481 * offsets, and we take full advantage of sections and
484 void __init
create_mapping(struct map_desc
*md
)
486 unsigned long virt
, length
;
487 int prot_sect
, prot_l1
, domain
;
489 unsigned long off
= (u32
)__pfn_to_phys(md
->pfn
);
491 if (md
->virtual != vectors_base() && md
->virtual < TASK_SIZE
) {
492 printk(KERN_WARNING
"BUG: not creating mapping for "
493 "0x%08llx at 0x%08lx in user region\n",
494 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
498 if ((md
->type
== MT_DEVICE
|| md
->type
== MT_ROM
) &&
499 md
->virtual >= PAGE_OFFSET
&& md
->virtual < VMALLOC_END
) {
500 printk(KERN_WARNING
"BUG: mapping for 0x%08llx at 0x%08lx "
501 "overlaps vmalloc space\n",
502 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
505 domain
= mem_types
[md
->type
].domain
;
506 prot_pte
= __pgprot(mem_types
[md
->type
].prot_pte
);
507 prot_l1
= mem_types
[md
->type
].prot_l1
| PMD_DOMAIN(domain
);
508 prot_sect
= mem_types
[md
->type
].prot_sect
| PMD_DOMAIN(domain
);
511 * Catch 36-bit addresses
513 if(md
->pfn
>= 0x100000) {
515 printk(KERN_ERR
"MM: invalid domain in supersection "
516 "mapping for 0x%08llx at 0x%08lx\n",
517 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
520 if((md
->virtual | md
->length
| __pfn_to_phys(md
->pfn
))
521 & ~SUPERSECTION_MASK
) {
522 printk(KERN_ERR
"MM: cannot create mapping for "
523 "0x%08llx at 0x%08lx invalid alignment\n",
524 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
529 * Shift bits [35:32] of address into bits [23:20] of PMD
532 off
|= (((md
->pfn
>> (32 - PAGE_SHIFT
)) & 0xF) << 20);
539 if (mem_types
[md
->type
].prot_l1
== 0 &&
540 (virt
& 0xfffff || (virt
+ off
) & 0xfffff || (virt
+ length
) & 0xfffff)) {
541 printk(KERN_WARNING
"BUG: map for 0x%08lx at 0x%08lx can not "
542 "be mapped using pages, ignoring.\n",
543 __pfn_to_phys(md
->pfn
), md
->virtual);
547 while ((virt
& 0xfffff || (virt
+ off
) & 0xfffff) && length
>= PAGE_SIZE
) {
548 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
554 /* N.B. ARMv6 supersections are only defined to work with domain 0.
555 * Since domain assignments can in fact be arbitrary, the
556 * 'domain == 0' check below is required to insure that ARMv6
557 * supersections are only allocated for domain 0 regardless
558 * of the actual domain assignments in use.
560 if ((cpu_architecture() >= CPU_ARCH_ARMv6
|| cpu_is_xsc3())
563 * Align to supersection boundary if !high pages.
564 * High pages have already been checked for proper
565 * alignment above and they will fail the SUPSERSECTION_MASK
566 * check because of the way the address is encoded into
569 if (md
->pfn
<= 0x100000) {
570 while ((virt
& ~SUPERSECTION_MASK
||
571 (virt
+ off
) & ~SUPERSECTION_MASK
) &&
572 length
>= (PGDIR_SIZE
/ 2)) {
573 alloc_init_section(virt
, virt
+ off
, prot_sect
);
575 virt
+= (PGDIR_SIZE
/ 2);
576 length
-= (PGDIR_SIZE
/ 2);
580 while (length
>= SUPERSECTION_SIZE
) {
581 alloc_init_supersection(virt
, virt
+ off
, prot_sect
);
583 virt
+= SUPERSECTION_SIZE
;
584 length
-= SUPERSECTION_SIZE
;
589 * A section mapping covers half a "pgdir" entry.
591 while (length
>= (PGDIR_SIZE
/ 2)) {
592 alloc_init_section(virt
, virt
+ off
, prot_sect
);
594 virt
+= (PGDIR_SIZE
/ 2);
595 length
-= (PGDIR_SIZE
/ 2);
598 while (length
>= PAGE_SIZE
) {
599 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
607 * In order to soft-boot, we need to insert a 1:1 mapping in place of
608 * the user-mode pages. This will then ensure that we have predictable
609 * results when turning the mmu off
611 void setup_mm_for_reboot(char mode
)
613 unsigned long base_pmdval
;
617 if (current
->mm
&& current
->mm
->pgd
)
618 pgd
= current
->mm
->pgd
;
622 base_pmdval
= PMD_SECT_AP_WRITE
| PMD_SECT_AP_READ
| PMD_TYPE_SECT
;
623 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ
)
624 base_pmdval
|= PMD_BIT4
;
626 for (i
= 0; i
< FIRST_USER_PGD_NR
+ USER_PTRS_PER_PGD
; i
++, pgd
++) {
627 unsigned long pmdval
= (i
<< PGDIR_SHIFT
) | base_pmdval
;
630 pmd
= pmd_off(pgd
, i
<< PGDIR_SHIFT
);
631 pmd
[0] = __pmd(pmdval
);
632 pmd
[1] = __pmd(pmdval
+ (1 << (PGDIR_SHIFT
- 1)));
633 flush_pmd_entry(pmd
);
638 * Create the architecture specific mappings
640 void __init
iotable_init(struct map_desc
*io_desc
, int nr
)
644 for (i
= 0; i
< nr
; i
++)
645 create_mapping(io_desc
+ i
);