1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
7 #include <linux/pgtable.h>
9 #include <asm/cpu_entry_area.h>
10 #include <asm/fixmap.h>
13 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page
, entry_stack_storage
);
16 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks
, exception_stacks
);
17 DEFINE_PER_CPU(struct cea_exception_stacks
*, cea_exception_stacks
);
21 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack
, doublefault_stack
);
24 /* Is called from entry code, so must be noinstr */
25 noinstr
struct cpu_entry_area
*get_cpu_entry_area(int cpu
)
27 unsigned long va
= CPU_ENTRY_AREA_PER_CPU
+ cpu
* CPU_ENTRY_AREA_SIZE
;
28 BUILD_BUG_ON(sizeof(struct cpu_entry_area
) % PAGE_SIZE
!= 0);
30 return (struct cpu_entry_area
*) va
;
32 EXPORT_SYMBOL(get_cpu_entry_area
);
34 void cea_set_pte(void *cea_vaddr
, phys_addr_t pa
, pgprot_t flags
)
36 unsigned long va
= (unsigned long) cea_vaddr
;
37 pte_t pte
= pfn_pte(pa
>> PAGE_SHIFT
, flags
);
40 * The cpu_entry_area is shared between the user and kernel
41 * page tables. All of its ptes can safely be global.
42 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
43 * non-present PTEs, so be careful not to set it in that
44 * case to avoid confusion.
46 if (boot_cpu_has(X86_FEATURE_PGE
) &&
47 (pgprot_val(flags
) & _PAGE_PRESENT
))
48 pte
= pte_set_flags(pte
, _PAGE_GLOBAL
);
50 set_pte_vaddr(va
, pte
);
54 cea_map_percpu_pages(void *cea_vaddr
, void *ptr
, int pages
, pgprot_t prot
)
56 for ( ; pages
; pages
--, cea_vaddr
+= PAGE_SIZE
, ptr
+= PAGE_SIZE
)
57 cea_set_pte(cea_vaddr
, per_cpu_ptr_to_phys(ptr
), prot
);
60 static void __init
percpu_setup_debug_store(unsigned int cpu
)
62 #ifdef CONFIG_CPU_SUP_INTEL
66 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
69 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
70 npages
= sizeof(struct debug_store
) / PAGE_SIZE
;
71 BUILD_BUG_ON(sizeof(struct debug_store
) % PAGE_SIZE
!= 0);
72 cea_map_percpu_pages(cea
, &per_cpu(cpu_debug_store
, cpu
), npages
,
75 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
;
77 * Force the population of PMDs for not yet allocated per cpu
78 * memory like debug store buffers.
80 npages
= sizeof(struct debug_store_buffers
) / PAGE_SIZE
;
81 for (; npages
; npages
--, cea
+= PAGE_SIZE
)
82 cea_set_pte(cea
, 0, PAGE_NONE
);
88 #define cea_map_stack(name) do { \
89 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
90 cea_map_percpu_pages(cea->estacks.name## _stack, \
91 estacks->name## _stack, npages, PAGE_KERNEL); \
94 static void __init
percpu_setup_exception_stacks(unsigned int cpu
)
96 struct exception_stacks
*estacks
= per_cpu_ptr(&exception_stacks
, cpu
);
97 struct cpu_entry_area
*cea
= get_cpu_entry_area(cpu
);
100 BUILD_BUG_ON(sizeof(exception_stacks
) % PAGE_SIZE
!= 0);
102 per_cpu(cea_exception_stacks
, cpu
) = &cea
->estacks
;
105 * The exceptions stack mappings in the per cpu area are protected
106 * by guard pages so each stack must be mapped separately. DB2 is
107 * not mapped; it just exists to catch triple nesting of #DB.
115 static inline void percpu_setup_exception_stacks(unsigned int cpu
)
117 struct cpu_entry_area
*cea
= get_cpu_entry_area(cpu
);
119 cea_map_percpu_pages(&cea
->doublefault_stack
,
120 &per_cpu(doublefault_stack
, cpu
), 1, PAGE_KERNEL
);
124 /* Setup the fixmap mappings only once per-processor */
125 static void __init
setup_cpu_entry_area(unsigned int cpu
)
127 struct cpu_entry_area
*cea
= get_cpu_entry_area(cpu
);
129 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
130 pgprot_t gdt_prot
= PAGE_KERNEL_RO
;
131 pgprot_t tss_prot
= PAGE_KERNEL_RO
;
134 * On native 32-bit systems, the GDT cannot be read-only because
135 * our double fault handler uses a task gate, and entering through
136 * a task gate needs to change an available TSS to busy. If the
137 * GDT is read-only, that will triple fault. The TSS cannot be
138 * read-only because the CPU writes to it on task switches.
140 * On Xen PV, the GDT must be read-only because the hypervisor
143 pgprot_t gdt_prot
= boot_cpu_has(X86_FEATURE_XENPV
) ?
144 PAGE_KERNEL_RO
: PAGE_KERNEL
;
145 pgprot_t tss_prot
= PAGE_KERNEL
;
148 cea_set_pte(&cea
->gdt
, get_cpu_gdt_paddr(cpu
), gdt_prot
);
150 cea_map_percpu_pages(&cea
->entry_stack_page
,
151 per_cpu_ptr(&entry_stack_storage
, cpu
), 1,
155 * The Intel SDM says (Volume 3, 7.2.1):
157 * Avoid placing a page boundary in the part of the TSS that the
158 * processor reads during a task switch (the first 104 bytes). The
159 * processor may not correctly perform address translations if a
160 * boundary occurs in this area. During a task switch, the processor
161 * reads and writes into the first 104 bytes of each TSS (using
162 * contiguous physical addresses beginning with the physical address
163 * of the first byte of the TSS). So, after TSS access begins, if
164 * part of the 104 bytes is not physically contiguous, the processor
165 * will access incorrect information without generating a page-fault
168 * There are also a lot of errata involving the TSS spanning a page
169 * boundary. Assert that we're not doing that.
171 BUILD_BUG_ON((offsetof(struct tss_struct
, x86_tss
) ^
172 offsetofend(struct tss_struct
, x86_tss
)) & PAGE_MASK
);
173 BUILD_BUG_ON(sizeof(struct tss_struct
) % PAGE_SIZE
!= 0);
175 * VMX changes the host TR limit to 0x67 after a VM exit. This is
176 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
177 * that this is correct.
179 BUILD_BUG_ON(offsetof(struct tss_struct
, x86_tss
) != 0);
180 BUILD_BUG_ON(sizeof(struct x86_hw_tss
) != 0x68);
182 cea_map_percpu_pages(&cea
->tss
, &per_cpu(cpu_tss_rw
, cpu
),
183 sizeof(struct tss_struct
) / PAGE_SIZE
, tss_prot
);
186 per_cpu(cpu_entry_area
, cpu
) = cea
;
189 percpu_setup_exception_stacks(cpu
);
191 percpu_setup_debug_store(cpu
);
194 static __init
void setup_cpu_entry_area_ptes(void)
197 unsigned long start
, end
;
199 /* The +1 is for the readonly IDT: */
200 BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES
+1)*PAGE_SIZE
!= CPU_ENTRY_AREA_MAP_SIZE
);
201 BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE
!= CPU_ENTRY_AREA_MAP_SIZE
);
202 BUG_ON(CPU_ENTRY_AREA_BASE
& ~PMD_MASK
);
204 start
= CPU_ENTRY_AREA_BASE
;
205 end
= start
+ CPU_ENTRY_AREA_MAP_SIZE
;
207 /* Careful here: start + PMD_SIZE might wrap around */
208 for (; start
< end
&& start
>= CPU_ENTRY_AREA_BASE
; start
+= PMD_SIZE
)
209 populate_extra_pte(start
);
213 void __init
setup_cpu_entry_areas(void)
217 setup_cpu_entry_area_ptes();
219 for_each_possible_cpu(cpu
)
220 setup_cpu_entry_area(cpu
);
223 * This is the last essential update to swapper_pgdir which needs
224 * to be synchronized to initial_page_table on 32bit.
226 sync_initial_page_table();