2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * This code is based in part on work published here:
15 * https://github.com/IAIK/KAISER
17 * The original work was written by and and signed off by for the Linux
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/bug.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
37 #include <linux/uaccess.h>
39 #include <asm/cpufeature.h>
40 #include <asm/hypervisor.h>
41 #include <asm/vsyscall.h>
42 #include <asm/cmdline.h>
44 #include <asm/pgtable.h>
45 #include <asm/pgalloc.h>
46 #include <asm/tlbflush.h>
48 #include <asm/sections.h>
51 #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
53 /* Backporting helper */
55 #define __GFP_NOTRACK 0
58 static void __init
pti_print_if_insecure(const char *reason
)
60 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN
))
61 pr_info("%s\n", reason
);
64 static void __init
pti_print_if_secure(const char *reason
)
66 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN
))
67 pr_info("%s\n", reason
);
76 void __init
pti_check_boottime_disable(void)
81 /* Assume mode is auto unless overridden. */
84 if (hypervisor_is_type(X86_HYPER_XEN_PV
)) {
85 pti_mode
= PTI_FORCE_OFF
;
86 pti_print_if_insecure("disabled on XEN PV.");
90 ret
= cmdline_find_option(boot_command_line
, "pti", arg
, sizeof(arg
));
92 if (ret
== 3 && !strncmp(arg
, "off", 3)) {
93 pti_mode
= PTI_FORCE_OFF
;
94 pti_print_if_insecure("disabled on command line.");
97 if (ret
== 2 && !strncmp(arg
, "on", 2)) {
98 pti_mode
= PTI_FORCE_ON
;
99 pti_print_if_secure("force enabled on command line.");
102 if (ret
== 4 && !strncmp(arg
, "auto", 4)) {
108 if (cmdline_find_option_bool(boot_command_line
, "nopti")) {
109 pti_mode
= PTI_FORCE_OFF
;
110 pti_print_if_insecure("disabled on command line.");
115 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN
))
118 setup_force_cpu_cap(X86_FEATURE_PTI
);
121 pgd_t
__pti_set_user_pgd(pgd_t
*pgdp
, pgd_t pgd
)
124 * Changes to the high (kernel) portion of the kernelmode page
125 * tables are not automatically propagated to the usermode tables.
127 * Users should keep in mind that, unlike the kernelmode tables,
128 * there is no vmalloc_fault equivalent for the usermode tables.
129 * Top-level entries added to init_mm's usermode pgd after boot
130 * will not be automatically propagated to other mms.
132 if (!pgdp_maps_userspace(pgdp
))
136 * The user page tables get the full PGD, accessible from
139 kernel_to_user_pgdp(pgdp
)->pgd
= pgd
.pgd
;
142 * If this is normal user memory, make it NX in the kernel
143 * pagetables so that, if we somehow screw up and return to
144 * usermode with the kernel CR3 loaded, we'll get a page fault
145 * instead of allowing user code to execute with the wrong CR3.
147 * As exceptions, we don't set NX if:
148 * - _PAGE_USER is not set. This could be an executable
149 * EFI runtime mapping or something similar, and the kernel
150 * may execute from it
151 * - we don't have NX support
152 * - we're clearing the PGD (i.e. the new pgd is not present).
154 if ((pgd
.pgd
& (_PAGE_USER
|_PAGE_PRESENT
)) == (_PAGE_USER
|_PAGE_PRESENT
) &&
155 (__supported_pte_mask
& _PAGE_NX
))
158 /* return the copy of the PGD we want the kernel to use: */
163 * Walk the user copy of the page tables (optionally) trying to allocate
164 * page table pages on the way down.
166 * Returns a pointer to a P4D on success, or NULL on failure.
168 static p4d_t
*pti_user_pagetable_walk_p4d(unsigned long address
)
170 pgd_t
*pgd
= kernel_to_user_pgdp(pgd_offset_k(address
));
171 gfp_t gfp
= (GFP_KERNEL
| __GFP_NOTRACK
| __GFP_ZERO
);
173 if (address
< PAGE_OFFSET
) {
174 WARN_ONCE(1, "attempt to walk user address\n");
178 if (pgd_none(*pgd
)) {
179 unsigned long new_p4d_page
= __get_free_page(gfp
);
180 if (WARN_ON_ONCE(!new_p4d_page
))
183 set_pgd(pgd
, __pgd(_KERNPG_TABLE
| __pa(new_p4d_page
)));
185 BUILD_BUG_ON(pgd_large(*pgd
) != 0);
187 return p4d_offset(pgd
, address
);
191 * Walk the user copy of the page tables (optionally) trying to allocate
192 * page table pages on the way down.
194 * Returns a pointer to a PMD on success, or NULL on failure.
196 static pmd_t
*pti_user_pagetable_walk_pmd(unsigned long address
)
198 gfp_t gfp
= (GFP_KERNEL
| __GFP_NOTRACK
| __GFP_ZERO
);
202 p4d
= pti_user_pagetable_walk_p4d(address
);
206 BUILD_BUG_ON(p4d_large(*p4d
) != 0);
207 if (p4d_none(*p4d
)) {
208 unsigned long new_pud_page
= __get_free_page(gfp
);
209 if (WARN_ON_ONCE(!new_pud_page
))
212 set_p4d(p4d
, __p4d(_KERNPG_TABLE
| __pa(new_pud_page
)));
215 pud
= pud_offset(p4d
, address
);
216 /* The user page tables do not use large mappings: */
217 if (pud_large(*pud
)) {
221 if (pud_none(*pud
)) {
222 unsigned long new_pmd_page
= __get_free_page(gfp
);
223 if (WARN_ON_ONCE(!new_pmd_page
))
226 set_pud(pud
, __pud(_KERNPG_TABLE
| __pa(new_pmd_page
)));
229 return pmd_offset(pud
, address
);
232 #ifdef CONFIG_X86_VSYSCALL_EMULATION
234 * Walk the shadow copy of the page tables (optionally) trying to allocate
235 * page table pages on the way down. Does not support large pages.
237 * Note: this is only used when mapping *new* kernel data into the
238 * user/shadow page tables. It is never used for userspace data.
240 * Returns a pointer to a PTE on success, or NULL on failure.
242 static __init pte_t
*pti_user_pagetable_walk_pte(unsigned long address
)
244 gfp_t gfp
= (GFP_KERNEL
| __GFP_NOTRACK
| __GFP_ZERO
);
248 pmd
= pti_user_pagetable_walk_pmd(address
);
252 /* We can't do anything sensible if we hit a large mapping. */
253 if (pmd_large(*pmd
)) {
258 if (pmd_none(*pmd
)) {
259 unsigned long new_pte_page
= __get_free_page(gfp
);
263 set_pmd(pmd
, __pmd(_KERNPG_TABLE
| __pa(new_pte_page
)));
266 pte
= pte_offset_kernel(pmd
, address
);
267 if (pte_flags(*pte
) & _PAGE_USER
) {
268 WARN_ONCE(1, "attempt to walk to user pte\n");
274 static void __init
pti_setup_vsyscall(void)
276 pte_t
*pte
, *target_pte
;
279 pte
= lookup_address(VSYSCALL_ADDR
, &level
);
280 if (!pte
|| WARN_ON(level
!= PG_LEVEL_4K
) || pte_none(*pte
))
283 target_pte
= pti_user_pagetable_walk_pte(VSYSCALL_ADDR
);
284 if (WARN_ON(!target_pte
))
288 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir
));
291 static void __init
pti_setup_vsyscall(void) { }
295 pti_clone_pmds(unsigned long start
, unsigned long end
, pmdval_t clear
)
300 * Clone the populated PMDs which cover start to end. These PMD areas
303 for (addr
= start
; addr
< end
; addr
+= PMD_SIZE
) {
304 pmd_t
*pmd
, *target_pmd
;
313 pgd
= pgd_offset_k(addr
);
314 if (WARN_ON(pgd_none(*pgd
)))
316 p4d
= p4d_offset(pgd
, addr
);
317 if (WARN_ON(p4d_none(*p4d
)))
319 pud
= pud_offset(p4d
, addr
);
322 pmd
= pmd_offset(pud
, addr
);
326 target_pmd
= pti_user_pagetable_walk_pmd(addr
);
327 if (WARN_ON(!target_pmd
))
331 * Only clone present PMDs. This ensures only setting
332 * _PAGE_GLOBAL on present PMDs. This should only be
333 * called on well-known addresses anyway, so a non-
334 * present PMD would be a surprise.
336 if (WARN_ON(!(pmd_flags(*pmd
) & _PAGE_PRESENT
)))
340 * Setting 'target_pmd' below creates a mapping in both
341 * the user and kernel page tables. It is effectively
342 * global, so set it as global in both copies. Note:
343 * the X86_FEATURE_PGE check is not _required_ because
344 * the CPU ignores _PAGE_GLOBAL when PGE is not
345 * supported. The check keeps consistentency with
346 * code that only set this bit when supported.
348 if (boot_cpu_has(X86_FEATURE_PGE
))
349 *pmd
= pmd_set_flags(*pmd
, _PAGE_GLOBAL
);
352 * Copy the PMD. That is, the kernelmode and usermode
353 * tables will share the last-level page tables of this
356 *target_pmd
= pmd_clear_flags(*pmd
, clear
);
361 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
362 * next-level entry on 5-level systems.
364 static void __init
pti_clone_p4d(unsigned long addr
)
366 p4d_t
*kernel_p4d
, *user_p4d
;
369 user_p4d
= pti_user_pagetable_walk_p4d(addr
);
373 kernel_pgd
= pgd_offset_k(addr
);
374 kernel_p4d
= p4d_offset(kernel_pgd
, addr
);
375 *user_p4d
= *kernel_p4d
;
379 * Clone the CPU_ENTRY_AREA into the user space visible page table.
381 static void __init
pti_clone_user_shared(void)
383 pti_clone_p4d(CPU_ENTRY_AREA_BASE
);
387 * Clone the ESPFIX P4D into the user space visible page table
389 static void __init
pti_setup_espfix64(void)
391 #ifdef CONFIG_X86_ESPFIX64
392 pti_clone_p4d(ESPFIX_BASE_ADDR
);
397 * Clone the populated PMDs of the entry and irqentry text and force it RO.
399 static void __init
pti_clone_entry_text(void)
401 pti_clone_pmds((unsigned long) __entry_text_start
,
402 (unsigned long) __irqentry_text_end
,
407 * Global pages and PCIDs are both ways to make kernel TLB entries
408 * live longer, reduce TLB misses and improve kernel performance.
409 * But, leaving all kernel text Global makes it potentially accessible
410 * to Meltdown-style attacks which make it trivial to find gadgets or
413 * Only use global pages when it is really worth it.
415 static inline bool pti_kernel_image_global_ok(void)
418 * Systems with PCIDs get litlle benefit from global
419 * kernel text and are not worth the downsides.
421 if (cpu_feature_enabled(X86_FEATURE_PCID
))
425 * Only do global kernel image for pti=auto. Do the most
426 * secure thing (not global) if pti=on specified.
428 if (pti_mode
!= PTI_AUTO
)
432 * K8 may not tolerate the cleared _PAGE_RW on the userspace
433 * global kernel image pages. Do the safe thing (disable
434 * global kernel image). This is unlikely to ever be
435 * noticed because PTI is disabled by default on AMD CPUs.
437 if (boot_cpu_has(X86_FEATURE_K8
))
441 * RANDSTRUCT derives its hardening benefits from the
442 * attacker's lack of knowledge about the layout of kernel
443 * data structures. Keep the kernel image non-global in
444 * cases where RANDSTRUCT is in use to help keep the layout a
447 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT
))
454 * This is the only user for these and it is not arch-generic
455 * like the other set_memory.h functions. Just extern them.
457 extern int set_memory_nonglobal(unsigned long addr
, int numpages
);
458 extern int set_memory_global(unsigned long addr
, int numpages
);
461 * For some configurations, map all of kernel text into the user page
462 * tables. This reduces TLB misses, especially on non-PCID systems.
464 void pti_clone_kernel_text(void)
467 * rodata is part of the kernel image and is normally
468 * readable on the filesystem or on the web. But, do not
469 * clone the areas past rodata, they might contain secrets.
471 unsigned long start
= PFN_ALIGN(_text
);
472 unsigned long end_clone
= (unsigned long)__end_rodata_hpage_align
;
473 unsigned long end_global
= PFN_ALIGN((unsigned long)__stop___ex_table
);
475 if (!pti_kernel_image_global_ok())
478 pr_debug("mapping partial kernel image into user address space\n");
481 * Note that this will undo _some_ of the work that
482 * pti_set_kernel_image_nonglobal() did to clear the
485 pti_clone_pmds(start
, end_clone
, _PAGE_RW
);
488 * pti_clone_pmds() will set the global bit in any PMDs
489 * that it clones, but we also need to get any PTEs in
490 * the last level for areas that are not huge-page-aligned.
493 /* Set the global bit for normal non-__init kernel text: */
494 set_memory_global(start
, (end_global
- start
) >> PAGE_SHIFT
);
497 void pti_set_kernel_image_nonglobal(void)
500 * The identity map is created with PMDs, regardless of the
501 * actual length of the kernel. We need to clear
502 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
505 unsigned long start
= PFN_ALIGN(_text
);
506 unsigned long end
= ALIGN((unsigned long)_end
, PMD_PAGE_SIZE
);
509 * This clears _PAGE_GLOBAL from the entire kernel image.
510 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
511 * areas that are mapped to userspace.
513 set_memory_nonglobal(start
, (end
- start
) >> PAGE_SHIFT
);
517 * Initialize kernel page table isolation
519 void __init
pti_init(void)
521 if (!static_cpu_has(X86_FEATURE_PTI
))
524 pr_info("enabled\n");
526 pti_clone_user_shared();
528 /* Undo all global bits from the init pagetables in head_64.S: */
529 pti_set_kernel_image_nonglobal();
530 /* Replace some of the global bits just for shared entry text: */
531 pti_clone_entry_text();
532 pti_setup_espfix64();
533 pti_setup_vsyscall();