drm/rockchip: vop2: Fix the windows switch between different layers
[drm/drm-misc.git] / arch / x86 / mm / ident_map.c
blob5ab7bd2f1983c1195ef32b5d2a41e57fc19de6ce
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
5 */
7 static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
9 pte_t *pte = pte_offset_kernel(pmd, 0);
11 info->free_pgt_page(pte, info->context);
14 static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
16 pmd_t *pmd = pmd_offset(pud, 0);
17 int i;
19 for (i = 0; i < PTRS_PER_PMD; i++) {
20 if (!pmd_present(pmd[i]))
21 continue;
23 if (pmd_leaf(pmd[i]))
24 continue;
26 free_pte(info, &pmd[i]);
29 info->free_pgt_page(pmd, info->context);
32 static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
34 pud_t *pud = pud_offset(p4d, 0);
35 int i;
37 for (i = 0; i < PTRS_PER_PUD; i++) {
38 if (!pud_present(pud[i]))
39 continue;
41 if (pud_leaf(pud[i]))
42 continue;
44 free_pmd(info, &pud[i]);
47 info->free_pgt_page(pud, info->context);
50 static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
52 p4d_t *p4d = p4d_offset(pgd, 0);
53 int i;
55 for (i = 0; i < PTRS_PER_P4D; i++) {
56 if (!p4d_present(p4d[i]))
57 continue;
59 free_pud(info, &p4d[i]);
62 if (pgtable_l5_enabled())
63 info->free_pgt_page(p4d, info->context);
66 void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
68 int i;
70 for (i = 0; i < PTRS_PER_PGD; i++) {
71 if (!pgd_present(pgd[i]))
72 continue;
74 free_p4d(info, &pgd[i]);
77 info->free_pgt_page(pgd, info->context);
80 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
81 unsigned long addr, unsigned long end)
83 addr &= PMD_MASK;
84 for (; addr < end; addr += PMD_SIZE) {
85 pmd_t *pmd = pmd_page + pmd_index(addr);
87 if (pmd_present(*pmd))
88 continue;
90 set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
94 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
95 unsigned long addr, unsigned long end)
97 unsigned long next;
99 for (; addr < end; addr = next) {
100 pud_t *pud = pud_page + pud_index(addr);
101 pmd_t *pmd;
102 bool use_gbpage;
104 next = (addr & PUD_MASK) + PUD_SIZE;
105 if (next > end)
106 next = end;
108 /* if this is already a gbpage, this portion is already mapped */
109 if (pud_leaf(*pud))
110 continue;
112 /* Is using a gbpage allowed? */
113 use_gbpage = info->direct_gbpages;
115 /* Don't use gbpage if it maps more than the requested region. */
116 /* at the begining: */
117 use_gbpage &= ((addr & ~PUD_MASK) == 0);
118 /* ... or at the end: */
119 use_gbpage &= ((next & ~PUD_MASK) == 0);
121 /* Never overwrite existing mappings */
122 use_gbpage &= !pud_present(*pud);
124 if (use_gbpage) {
125 pud_t pudval;
127 pudval = __pud((addr - info->offset) | info->page_flag);
128 set_pud(pud, pudval);
129 continue;
132 if (pud_present(*pud)) {
133 pmd = pmd_offset(pud, 0);
134 ident_pmd_init(info, pmd, addr, next);
135 continue;
137 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
138 if (!pmd)
139 return -ENOMEM;
140 ident_pmd_init(info, pmd, addr, next);
141 set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
144 return 0;
147 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
148 unsigned long addr, unsigned long end)
150 unsigned long next;
151 int result;
153 for (; addr < end; addr = next) {
154 p4d_t *p4d = p4d_page + p4d_index(addr);
155 pud_t *pud;
157 next = (addr & P4D_MASK) + P4D_SIZE;
158 if (next > end)
159 next = end;
161 if (p4d_present(*p4d)) {
162 pud = pud_offset(p4d, 0);
163 result = ident_pud_init(info, pud, addr, next);
164 if (result)
165 return result;
167 continue;
169 pud = (pud_t *)info->alloc_pgt_page(info->context);
170 if (!pud)
171 return -ENOMEM;
173 result = ident_pud_init(info, pud, addr, next);
174 if (result)
175 return result;
177 set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
180 return 0;
183 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
184 unsigned long pstart, unsigned long pend)
186 unsigned long addr = pstart + info->offset;
187 unsigned long end = pend + info->offset;
188 unsigned long next;
189 int result;
191 /* Set the default pagetable flags if not supplied */
192 if (!info->kernpg_flag)
193 info->kernpg_flag = _KERNPG_TABLE;
195 /* Filter out unsupported __PAGE_KERNEL_* bits: */
196 info->kernpg_flag &= __default_kernel_pte_mask;
198 for (; addr < end; addr = next) {
199 pgd_t *pgd = pgd_page + pgd_index(addr);
200 p4d_t *p4d;
202 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
203 if (next > end)
204 next = end;
206 if (pgd_present(*pgd)) {
207 p4d = p4d_offset(pgd, 0);
208 result = ident_p4d_init(info, p4d, addr, next);
209 if (result)
210 return result;
211 continue;
214 p4d = (p4d_t *)info->alloc_pgt_page(info->context);
215 if (!p4d)
216 return -ENOMEM;
217 result = ident_p4d_init(info, p4d, addr, next);
218 if (result)
219 return result;
220 if (pgtable_l5_enabled()) {
221 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
222 } else {
224 * With p4d folded, pgd is equal to p4d.
225 * The pgd entry has to point to the pud page table in this case.
227 pud_t *pud = pud_offset(p4d, 0);
228 set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
232 return 0;