1 // SPDX-License-Identifier: GPL-2.0
3 * This code is used on x86_64 to create page table identity mappings on
4 * demand by building up a new set of page tables (or appending to the
5 * existing ones), and then switching over to them when ready.
7 * Copyright (C) 2015-2016 Yinghai Lu
8 * Copyright (C) 2016 Kees Cook
12 * Since we're dealing with identity mappings, physical and virtual
13 * addresses are the same, so override these defines which are ultimately
14 * used by the headers in misc.h.
16 #define __pa(x) ((unsigned long)(x))
17 #define __va(x) ((void *)((unsigned long)(x)))
19 /* No PAGE_TABLE_ISOLATION support needed either: */
20 #undef CONFIG_PAGE_TABLE_ISOLATION
25 /* These actually do the work of building the kernel identity maps. */
26 #include <linux/pgtable.h>
27 #include <asm/cmpxchg.h>
28 #include <asm/trap_pf.h>
29 #include <asm/trapnr.h>
31 /* Use the static base for this part of the boot process */
33 #define __PAGE_OFFSET __PAGE_OFFSET_BASE
34 #include "../../mm/ident_map.c"
37 #include <asm/setup.h> /* For COMMAND_LINE_SIZE */
40 extern unsigned long get_cmd_line_ptr(void);
42 /* Used by PAGE_KERN* macros: */
43 pteval_t __default_kernel_pte_mask __read_mostly
= ~0;
45 /* Used to track our page table allocation area. */
46 struct alloc_pgt_data
{
47 unsigned char *pgt_buf
;
48 unsigned long pgt_buf_size
;
49 unsigned long pgt_buf_offset
;
53 * Allocates space for a page table entry, using struct alloc_pgt_data
54 * above. Besides the local callers, this is used as the allocation
55 * callback in mapping_info below.
57 static void *alloc_pgt_page(void *context
)
59 struct alloc_pgt_data
*pages
= (struct alloc_pgt_data
*)context
;
62 /* Validate there is space available for a new page. */
63 if (pages
->pgt_buf_offset
>= pages
->pgt_buf_size
) {
64 debug_putstr("out of pgt_buf in " __FILE__
"!?\n");
65 debug_putaddr(pages
->pgt_buf_offset
);
66 debug_putaddr(pages
->pgt_buf_size
);
70 entry
= pages
->pgt_buf
+ pages
->pgt_buf_offset
;
71 pages
->pgt_buf_offset
+= PAGE_SIZE
;
76 /* Used to track our allocated page tables. */
77 static struct alloc_pgt_data pgt_data
;
79 /* The top level page table entry pointer. */
80 static unsigned long top_level_pgt
;
82 phys_addr_t physical_mask
= (1ULL << __PHYSICAL_MASK_SHIFT
) - 1;
85 * Mapping information structure passed to kernel_ident_mapping_init().
86 * Due to relocation, pointers must be assigned at run time not build time.
88 static struct x86_mapping_info mapping_info
;
91 * Adds the specified range to the identity mappings.
93 static void add_identity_map(unsigned long start
, unsigned long end
)
97 /* Align boundary to 2M. */
98 start
= round_down(start
, PMD_SIZE
);
99 end
= round_up(end
, PMD_SIZE
);
103 /* Build the mapping. */
104 ret
= kernel_ident_mapping_init(&mapping_info
, (pgd_t
*)top_level_pgt
, start
, end
);
106 error("Error: kernel_ident_mapping_init() failed\n");
109 /* Locates and clears a region for a new top level page table. */
110 void initialize_identity_maps(void *rmode
)
112 unsigned long cmdline
;
114 /* Exclude the encryption mask from __PHYSICAL_MASK */
115 physical_mask
&= ~sme_me_mask
;
117 /* Init mapping_info with run-time function/buffer pointers. */
118 mapping_info
.alloc_pgt_page
= alloc_pgt_page
;
119 mapping_info
.context
= &pgt_data
;
120 mapping_info
.page_flag
= __PAGE_KERNEL_LARGE_EXEC
| sme_me_mask
;
121 mapping_info
.kernpg_flag
= _KERNPG_TABLE
;
124 * It should be impossible for this not to already be true,
125 * but since calling this a second time would rewind the other
126 * counters, let's just make sure this is reset too.
128 pgt_data
.pgt_buf_offset
= 0;
131 * If we came here via startup_32(), cr3 will be _pgtable already
132 * and we must append to the existing area instead of entirely
135 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
136 * the top-level page table is allocated separately.
138 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
139 * cases. On 4-level paging it's equal to 'top_level_pgt'.
141 top_level_pgt
= read_cr3_pa();
142 if (p4d_offset((pgd_t
*)top_level_pgt
, 0) == (p4d_t
*)_pgtable
) {
143 pgt_data
.pgt_buf
= _pgtable
+ BOOT_INIT_PGT_SIZE
;
144 pgt_data
.pgt_buf_size
= BOOT_PGT_SIZE
- BOOT_INIT_PGT_SIZE
;
145 memset(pgt_data
.pgt_buf
, 0, pgt_data
.pgt_buf_size
);
147 pgt_data
.pgt_buf
= _pgtable
;
148 pgt_data
.pgt_buf_size
= BOOT_PGT_SIZE
;
149 memset(pgt_data
.pgt_buf
, 0, pgt_data
.pgt_buf_size
);
150 top_level_pgt
= (unsigned long)alloc_pgt_page(&pgt_data
);
154 * New page-table is set up - map the kernel image, boot_params and the
155 * command line. The uncompressed kernel requires boot_params and the
156 * command line to be mapped in the identity mapping. Map them
157 * explicitly here in case the compressed kernel does not touch them,
158 * or does not touch all the pages covering them.
160 add_identity_map((unsigned long)_head
, (unsigned long)_end
);
162 add_identity_map((unsigned long)boot_params
, (unsigned long)(boot_params
+ 1));
163 cmdline
= get_cmd_line_ptr();
164 add_identity_map(cmdline
, cmdline
+ COMMAND_LINE_SIZE
);
166 /* Load the new page-table. */
167 sev_verify_cbit(top_level_pgt
);
168 write_cr3(top_level_pgt
);
171 static pte_t
*split_large_pmd(struct x86_mapping_info
*info
,
172 pmd_t
*pmdp
, unsigned long __address
)
174 unsigned long page_flags
;
175 unsigned long address
;
180 pte
= (pte_t
*)info
->alloc_pgt_page(info
->context
);
184 address
= __address
& PMD_MASK
;
185 /* No large page - clear PSE flag */
186 page_flags
= info
->page_flag
& ~_PAGE_PSE
;
188 /* Populate the PTEs */
189 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
190 set_pte(&pte
[i
], __pte(address
| page_flags
));
191 address
+= PAGE_SIZE
;
195 * Ideally we need to clear the large PMD first and do a TLB
196 * flush before we write the new PMD. But the 2M range of the
197 * PMD might contain the code we execute and/or the stack
198 * we are on, so we can't do that. But that should be safe here
199 * because we are going from large to small mappings and we are
200 * also the only user of the page-table, so there is no chance
203 pmd
= __pmd((unsigned long)pte
| info
->kernpg_flag
);
205 /* Flush TLB to establish the new PMD */
206 write_cr3(top_level_pgt
);
208 return pte
+ pte_index(__address
);
211 static void clflush_page(unsigned long address
)
213 unsigned int flush_size
;
214 char *cl
, *start
, *end
;
217 * Hardcode cl-size to 64 - CPUID can't be used here because that might
218 * cause another #VC exception and the GHCB is not ready to use yet.
221 start
= (char *)(address
& PAGE_MASK
);
222 end
= start
+ PAGE_SIZE
;
225 * First make sure there are no pending writes on the cache-lines to
228 asm volatile("mfence" : : : "memory");
230 for (cl
= start
; cl
!= end
; cl
+= flush_size
)
234 static int set_clr_page_flags(struct x86_mapping_info
*info
,
235 unsigned long address
,
236 pteval_t set
, pteval_t clr
)
238 pgd_t
*pgdp
= (pgd_t
*)top_level_pgt
;
245 * First make sure there is a PMD mapping for 'address'.
246 * It should already exist, but keep things generic.
248 * To map the page just read from it and fault it in if there is no
249 * mapping yet. add_identity_map() can't be called here because that
250 * would unconditionally map the address on PMD level, destroying any
251 * PTE-level mappings that might already exist. Use assembly here so
252 * the access won't be optimized away.
254 asm volatile("mov %[address], %%r9"
255 :: [address
] "g" (*(unsigned long *)address
)
259 * The page is mapped at least with PMD size - so skip checks and walk
260 * directly to the PMD.
262 p4dp
= p4d_offset(pgdp
, address
);
263 pudp
= pud_offset(p4dp
, address
);
264 pmdp
= pmd_offset(pudp
, address
);
266 if (pmd_large(*pmdp
))
267 ptep
= split_large_pmd(info
, pmdp
, address
);
269 ptep
= pte_offset_kernel(pmdp
, address
);
275 * Changing encryption attributes of a page requires to flush it from
278 if ((set
| clr
) & _PAGE_ENC
)
279 clflush_page(address
);
283 pte
= pte_set_flags(pte
, set
);
284 pte
= pte_clear_flags(pte
, clr
);
287 /* Flush TLB after changing encryption attribute */
288 write_cr3(top_level_pgt
);
293 int set_page_decrypted(unsigned long address
)
295 return set_clr_page_flags(&mapping_info
, address
, 0, _PAGE_ENC
);
298 int set_page_encrypted(unsigned long address
)
300 return set_clr_page_flags(&mapping_info
, address
, _PAGE_ENC
, 0);
303 int set_page_non_present(unsigned long address
)
305 return set_clr_page_flags(&mapping_info
, address
, 0, _PAGE_PRESENT
);
308 static void do_pf_error(const char *msg
, unsigned long error_code
,
309 unsigned long address
, unsigned long ip
)
313 error_putstr("\nError Code: ");
314 error_puthex(error_code
);
315 error_putstr("\nCR2: 0x");
316 error_puthex(address
);
317 error_putstr("\nRIP relative to _head: 0x");
318 error_puthex(ip
- (unsigned long)_head
);
321 error("Stopping.\n");
324 void do_boot_page_fault(struct pt_regs
*regs
, unsigned long error_code
)
326 unsigned long address
= native_read_cr2();
330 ghcb_fault
= sev_es_check_ghcb_fault(address
);
333 end
= address
+ PMD_SIZE
;
336 * Check for unexpected error codes. Unexpected are:
337 * - Faults on present pages
339 * - Reserved bits set
341 if (error_code
& (X86_PF_PROT
| X86_PF_USER
| X86_PF_RSVD
))
342 do_pf_error("Unexpected page-fault:", error_code
, address
, regs
->ip
);
344 do_pf_error("Page-fault on GHCB page:", error_code
, address
, regs
->ip
);
347 * Error code is sane - now identity map the 2M region around
348 * the faulting address.
350 add_identity_map(address
, end
);