x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / arm / mm / pgd.c
blobc1c1a5c67da1418c8822fda4b282578175bf6ec3
1 /*
2 * linux/arch/arm/mm/pgd.c
4 * Copyright (C) 1998-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
15 #include <asm/cp15.h>
16 #include <asm/pgalloc.h>
17 #include <asm/page.h>
18 #include <asm/tlbflush.h>
20 #include "mm.h"
22 #ifdef CONFIG_ARM_LPAE
23 #define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
24 #define __pgd_free(pgd) kfree(pgd)
25 #else
26 #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
27 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
28 #endif
31 * need to get a 16k page for level 1
33 pgd_t *pgd_alloc(struct mm_struct *mm)
35 pgd_t *new_pgd, *init_pgd;
36 pud_t *new_pud, *init_pud;
37 pmd_t *new_pmd, *init_pmd;
38 pte_t *new_pte, *init_pte;
40 new_pgd = __pgd_alloc();
41 if (!new_pgd)
42 goto no_pgd;
44 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
47 * Copy over the kernel and IO PGD entries
49 init_pgd = pgd_offset_k(0);
50 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
51 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
53 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
55 #ifdef CONFIG_ARM_LPAE
57 * Allocate PMD table for modules and pkmap mappings.
59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
60 MODULES_VADDR);
61 if (!new_pud)
62 goto no_pud;
64 new_pmd = pmd_alloc(mm, new_pud, 0);
65 if (!new_pmd)
66 goto no_pmd;
67 #endif
69 if (!vectors_high()) {
71 * On ARM, first page must always be allocated since it
72 * contains the machine vectors. The vectors are always high
73 * with LPAE.
75 new_pud = pud_alloc(mm, new_pgd, 0);
76 if (!new_pud)
77 goto no_pud;
79 new_pmd = pmd_alloc(mm, new_pud, 0);
80 if (!new_pmd)
81 goto no_pmd;
83 new_pte = pte_alloc_map(mm, new_pmd, 0);
84 if (!new_pte)
85 goto no_pte;
87 #ifndef CONFIG_ARM_LPAE
89 * Modify the PTE pointer to have the correct domain. This
90 * needs to be the vectors domain to avoid the low vectors
91 * being unmapped.
93 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
94 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
95 #endif
97 init_pud = pud_offset(init_pgd, 0);
98 init_pmd = pmd_offset(init_pud, 0);
99 init_pte = pte_offset_map(init_pmd, 0);
100 set_pte_ext(new_pte + 0, init_pte[0], 0);
101 set_pte_ext(new_pte + 1, init_pte[1], 0);
102 pte_unmap(init_pte);
103 pte_unmap(new_pte);
106 return new_pgd;
108 no_pte:
109 pmd_free(mm, new_pmd);
110 mm_dec_nr_pmds(mm);
111 no_pmd:
112 pud_free(mm, new_pud);
113 no_pud:
114 __pgd_free(new_pgd);
115 no_pgd:
116 return NULL;
119 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
121 pgd_t *pgd;
122 pud_t *pud;
123 pmd_t *pmd;
124 pgtable_t pte;
126 if (!pgd_base)
127 return;
129 pgd = pgd_base + pgd_index(0);
130 if (pgd_none_or_clear_bad(pgd))
131 goto no_pgd;
133 pud = pud_offset(pgd, 0);
134 if (pud_none_or_clear_bad(pud))
135 goto no_pud;
137 pmd = pmd_offset(pud, 0);
138 if (pmd_none_or_clear_bad(pmd))
139 goto no_pmd;
141 pte = pmd_pgtable(*pmd);
142 pmd_clear(pmd);
143 pte_free(mm, pte);
144 atomic_long_dec(&mm->nr_ptes);
145 no_pmd:
146 pud_clear(pud);
147 pmd_free(mm, pmd);
148 mm_dec_nr_pmds(mm);
149 no_pud:
150 pgd_clear(pgd);
151 pud_free(mm, pud);
152 no_pgd:
153 #ifdef CONFIG_ARM_LPAE
155 * Free modules/pkmap or identity pmd tables.
157 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
158 if (pgd_none_or_clear_bad(pgd))
159 continue;
160 if (pgd_val(*pgd) & L_PGD_SWAPPER)
161 continue;
162 pud = pud_offset(pgd, 0);
163 if (pud_none_or_clear_bad(pud))
164 continue;
165 pmd = pmd_offset(pud, 0);
166 pud_clear(pud);
167 pmd_free(mm, pmd);
168 mm_dec_nr_pmds(mm);
169 pgd_clear(pgd);
170 pud_free(mm, pud);
172 #endif
173 __pgd_free(pgd_base);