1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
7 #include <linux/pgtable.h>
9 #include <asm/cpu_entry_area.h>
10 #include <asm/fixmap.h>
12 #include <asm/kasan.h>
13 #include <asm/setup.h>
15 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page
, entry_stack_storage
);
18 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks
, exception_stacks
);
19 DEFINE_PER_CPU(struct cea_exception_stacks
*, cea_exception_stacks
);
21 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset
);
23 static __always_inline
unsigned int cea_offset(unsigned int cpu
)
25 return per_cpu(_cea_offset
, cpu
);
28 static __init
void init_cea_offsets(void)
33 if (!kaslr_enabled()) {
34 for_each_possible_cpu(i
)
35 per_cpu(_cea_offset
, i
) = i
;
39 max_cea
= (CPU_ENTRY_AREA_MAP_SIZE
- PAGE_SIZE
) / CPU_ENTRY_AREA_SIZE
;
41 /* O(sodding terrible) */
42 for_each_possible_cpu(i
) {
46 cea
= get_random_u32_below(max_cea
);
48 for_each_possible_cpu(j
) {
49 if (cea_offset(j
) == cea
)
56 per_cpu(_cea_offset
, i
) = cea
;
60 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack
, doublefault_stack
);
62 static __always_inline
unsigned int cea_offset(unsigned int cpu
)
66 static inline void init_cea_offsets(void) { }
69 /* Is called from entry code, so must be noinstr */
70 noinstr
struct cpu_entry_area
*get_cpu_entry_area(int cpu
)
72 unsigned long va
= CPU_ENTRY_AREA_PER_CPU
+ cea_offset(cpu
) * CPU_ENTRY_AREA_SIZE
;
73 BUILD_BUG_ON(sizeof(struct cpu_entry_area
) % PAGE_SIZE
!= 0);
75 return (struct cpu_entry_area
*) va
;
77 EXPORT_SYMBOL(get_cpu_entry_area
);
79 void cea_set_pte(void *cea_vaddr
, phys_addr_t pa
, pgprot_t flags
)
81 unsigned long va
= (unsigned long) cea_vaddr
;
82 pte_t pte
= pfn_pte(pa
>> PAGE_SHIFT
, flags
);
85 * The cpu_entry_area is shared between the user and kernel
86 * page tables. All of its ptes can safely be global.
87 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
88 * non-present PTEs, so be careful not to set it in that
89 * case to avoid confusion.
91 if (boot_cpu_has(X86_FEATURE_PGE
) &&
92 (pgprot_val(flags
) & _PAGE_PRESENT
))
93 pte
= pte_set_flags(pte
, _PAGE_GLOBAL
);
95 set_pte_vaddr(va
, pte
);
99 cea_map_percpu_pages(void *cea_vaddr
, void *ptr
, int pages
, pgprot_t prot
)
101 for ( ; pages
; pages
--, cea_vaddr
+= PAGE_SIZE
, ptr
+= PAGE_SIZE
)
102 cea_set_pte(cea_vaddr
, per_cpu_ptr_to_phys(ptr
), prot
);
105 static void __init
percpu_setup_debug_store(unsigned int cpu
)
107 #ifdef CONFIG_CPU_SUP_INTEL
111 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
114 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
115 npages
= sizeof(struct debug_store
) / PAGE_SIZE
;
116 BUILD_BUG_ON(sizeof(struct debug_store
) % PAGE_SIZE
!= 0);
117 cea_map_percpu_pages(cea
, &per_cpu(cpu_debug_store
, cpu
), npages
,
120 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
;
122 * Force the population of PMDs for not yet allocated per cpu
123 * memory like debug store buffers.
125 npages
= sizeof(struct debug_store_buffers
) / PAGE_SIZE
;
126 for (; npages
; npages
--, cea
+= PAGE_SIZE
)
127 cea_set_pte(cea
, 0, PAGE_NONE
);
133 #define cea_map_stack(name) do { \
134 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
135 cea_map_percpu_pages(cea->estacks.name## _stack, \
136 estacks->name## _stack, npages, PAGE_KERNEL); \
139 static void __init
percpu_setup_exception_stacks(unsigned int cpu
)
141 struct exception_stacks
*estacks
= per_cpu_ptr(&exception_stacks
, cpu
);
142 struct cpu_entry_area
*cea
= get_cpu_entry_area(cpu
);
145 BUILD_BUG_ON(sizeof(exception_stacks
) % PAGE_SIZE
!= 0);
147 per_cpu(cea_exception_stacks
, cpu
) = &cea
->estacks
;
150 * The exceptions stack mappings in the per cpu area are protected
151 * by guard pages so each stack must be mapped separately. DB2 is
152 * not mapped; it just exists to catch triple nesting of #DB.
159 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT
)) {
160 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT
)) {
167 static void __init
percpu_setup_exception_stacks(unsigned int cpu
)
169 struct cpu_entry_area
*cea
= get_cpu_entry_area(cpu
);
171 cea_map_percpu_pages(&cea
->doublefault_stack
,
172 &per_cpu(doublefault_stack
, cpu
), 1, PAGE_KERNEL
);
176 /* Setup the fixmap mappings only once per-processor */
177 static void __init
setup_cpu_entry_area(unsigned int cpu
)
179 struct cpu_entry_area
*cea
= get_cpu_entry_area(cpu
);
181 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
182 pgprot_t gdt_prot
= PAGE_KERNEL_RO
;
183 pgprot_t tss_prot
= PAGE_KERNEL_RO
;
186 * On 32-bit systems, the GDT cannot be read-only because
187 * our double fault handler uses a task gate, and entering through
188 * a task gate needs to change an available TSS to busy. If the
189 * GDT is read-only, that will triple fault. The TSS cannot be
190 * read-only because the CPU writes to it on task switches.
192 pgprot_t gdt_prot
= PAGE_KERNEL
;
193 pgprot_t tss_prot
= PAGE_KERNEL
;
196 kasan_populate_shadow_for_vaddr(cea
, CPU_ENTRY_AREA_SIZE
,
197 early_cpu_to_node(cpu
));
199 cea_set_pte(&cea
->gdt
, get_cpu_gdt_paddr(cpu
), gdt_prot
);
201 cea_map_percpu_pages(&cea
->entry_stack_page
,
202 per_cpu_ptr(&entry_stack_storage
, cpu
), 1,
206 * The Intel SDM says (Volume 3, 7.2.1):
208 * Avoid placing a page boundary in the part of the TSS that the
209 * processor reads during a task switch (the first 104 bytes). The
210 * processor may not correctly perform address translations if a
211 * boundary occurs in this area. During a task switch, the processor
212 * reads and writes into the first 104 bytes of each TSS (using
213 * contiguous physical addresses beginning with the physical address
214 * of the first byte of the TSS). So, after TSS access begins, if
215 * part of the 104 bytes is not physically contiguous, the processor
216 * will access incorrect information without generating a page-fault
219 * There are also a lot of errata involving the TSS spanning a page
220 * boundary. Assert that we're not doing that.
222 BUILD_BUG_ON((offsetof(struct tss_struct
, x86_tss
) ^
223 offsetofend(struct tss_struct
, x86_tss
)) & PAGE_MASK
);
224 BUILD_BUG_ON(sizeof(struct tss_struct
) % PAGE_SIZE
!= 0);
226 * VMX changes the host TR limit to 0x67 after a VM exit. This is
227 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
228 * that this is correct.
230 BUILD_BUG_ON(offsetof(struct tss_struct
, x86_tss
) != 0);
231 BUILD_BUG_ON(sizeof(struct x86_hw_tss
) != 0x68);
233 cea_map_percpu_pages(&cea
->tss
, &per_cpu(cpu_tss_rw
, cpu
),
234 sizeof(struct tss_struct
) / PAGE_SIZE
, tss_prot
);
237 per_cpu(cpu_entry_area
, cpu
) = cea
;
240 percpu_setup_exception_stacks(cpu
);
242 percpu_setup_debug_store(cpu
);
245 static __init
void setup_cpu_entry_area_ptes(void)
248 unsigned long start
, end
;
250 /* The +1 is for the readonly IDT: */
251 BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES
+1)*PAGE_SIZE
!= CPU_ENTRY_AREA_MAP_SIZE
);
252 BUG_ON(CPU_ENTRY_AREA_BASE
& ~PMD_MASK
);
254 start
= CPU_ENTRY_AREA_BASE
;
255 end
= start
+ CPU_ENTRY_AREA_MAP_SIZE
;
257 /* Careful here: start + PMD_SIZE might wrap around */
258 for (; start
< end
&& start
>= CPU_ENTRY_AREA_BASE
; start
+= PMD_SIZE
)
259 populate_extra_pte(start
);
263 void __init
setup_cpu_entry_areas(void)
269 setup_cpu_entry_area_ptes();
271 for_each_possible_cpu(cpu
)
272 setup_cpu_entry_area(cpu
);
275 * This is the last essential update to swapper_pgdir which needs
276 * to be synchronized to initial_page_table on 32bit.
278 sync_initial_page_table();