1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
8 #include <asm/cpu_entry_area.h>
9 #include <asm/pgtable.h>
10 #include <asm/fixmap.h>
13 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page
, entry_stack_storage
);
16 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
17 [(N_EXCEPTION_STACKS
- 1) * EXCEPTION_STKSZ
+ DEBUG_STKSZ
]);
20 struct cpu_entry_area
*get_cpu_entry_area(int cpu
)
22 unsigned long va
= CPU_ENTRY_AREA_PER_CPU
+ cpu
* CPU_ENTRY_AREA_SIZE
;
23 BUILD_BUG_ON(sizeof(struct cpu_entry_area
) % PAGE_SIZE
!= 0);
25 return (struct cpu_entry_area
*) va
;
27 EXPORT_SYMBOL(get_cpu_entry_area
);
29 void cea_set_pte(void *cea_vaddr
, phys_addr_t pa
, pgprot_t flags
)
31 unsigned long va
= (unsigned long) cea_vaddr
;
32 pte_t pte
= pfn_pte(pa
>> PAGE_SHIFT
, flags
);
35 * The cpu_entry_area is shared between the user and kernel
36 * page tables. All of its ptes can safely be global.
37 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
38 * non-present PTEs, so be careful not to set it in that
39 * case to avoid confusion.
41 if (boot_cpu_has(X86_FEATURE_PGE
) &&
42 (pgprot_val(flags
) & _PAGE_PRESENT
))
43 pte
= pte_set_flags(pte
, _PAGE_GLOBAL
);
45 set_pte_vaddr(va
, pte
);
49 cea_map_percpu_pages(void *cea_vaddr
, void *ptr
, int pages
, pgprot_t prot
)
51 for ( ; pages
; pages
--, cea_vaddr
+= PAGE_SIZE
, ptr
+= PAGE_SIZE
)
52 cea_set_pte(cea_vaddr
, per_cpu_ptr_to_phys(ptr
), prot
);
55 static void percpu_setup_debug_store(int cpu
)
57 #ifdef CONFIG_CPU_SUP_INTEL
61 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
64 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
65 npages
= sizeof(struct debug_store
) / PAGE_SIZE
;
66 BUILD_BUG_ON(sizeof(struct debug_store
) % PAGE_SIZE
!= 0);
67 cea_map_percpu_pages(cea
, &per_cpu(cpu_debug_store
, cpu
), npages
,
70 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
;
72 * Force the population of PMDs for not yet allocated per cpu
73 * memory like debug store buffers.
75 npages
= sizeof(struct debug_store_buffers
) / PAGE_SIZE
;
76 for (; npages
; npages
--, cea
+= PAGE_SIZE
)
77 cea_set_pte(cea
, 0, PAGE_NONE
);
81 /* Setup the fixmap mappings only once per-processor */
82 static void __init
setup_cpu_entry_area(int cpu
)
85 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
86 pgprot_t gdt_prot
= PAGE_KERNEL_RO
;
87 pgprot_t tss_prot
= PAGE_KERNEL_RO
;
90 * On native 32-bit systems, the GDT cannot be read-only because
91 * our double fault handler uses a task gate, and entering through
92 * a task gate needs to change an available TSS to busy. If the
93 * GDT is read-only, that will triple fault. The TSS cannot be
94 * read-only because the CPU writes to it on task switches.
96 * On Xen PV, the GDT must be read-only because the hypervisor
99 pgprot_t gdt_prot
= boot_cpu_has(X86_FEATURE_XENPV
) ?
100 PAGE_KERNEL_RO
: PAGE_KERNEL
;
101 pgprot_t tss_prot
= PAGE_KERNEL
;
104 cea_set_pte(&get_cpu_entry_area(cpu
)->gdt
, get_cpu_gdt_paddr(cpu
),
107 cea_map_percpu_pages(&get_cpu_entry_area(cpu
)->entry_stack_page
,
108 per_cpu_ptr(&entry_stack_storage
, cpu
), 1,
112 * The Intel SDM says (Volume 3, 7.2.1):
114 * Avoid placing a page boundary in the part of the TSS that the
115 * processor reads during a task switch (the first 104 bytes). The
116 * processor may not correctly perform address translations if a
117 * boundary occurs in this area. During a task switch, the processor
118 * reads and writes into the first 104 bytes of each TSS (using
119 * contiguous physical addresses beginning with the physical address
120 * of the first byte of the TSS). So, after TSS access begins, if
121 * part of the 104 bytes is not physically contiguous, the processor
122 * will access incorrect information without generating a page-fault
125 * There are also a lot of errata involving the TSS spanning a page
126 * boundary. Assert that we're not doing that.
128 BUILD_BUG_ON((offsetof(struct tss_struct
, x86_tss
) ^
129 offsetofend(struct tss_struct
, x86_tss
)) & PAGE_MASK
);
130 BUILD_BUG_ON(sizeof(struct tss_struct
) % PAGE_SIZE
!= 0);
131 cea_map_percpu_pages(&get_cpu_entry_area(cpu
)->tss
,
132 &per_cpu(cpu_tss_rw
, cpu
),
133 sizeof(struct tss_struct
) / PAGE_SIZE
, tss_prot
);
136 per_cpu(cpu_entry_area
, cpu
) = get_cpu_entry_area(cpu
);
140 BUILD_BUG_ON(sizeof(exception_stacks
) % PAGE_SIZE
!= 0);
141 BUILD_BUG_ON(sizeof(exception_stacks
) !=
142 sizeof(((struct cpu_entry_area
*)0)->exception_stacks
));
143 cea_map_percpu_pages(&get_cpu_entry_area(cpu
)->exception_stacks
,
144 &per_cpu(exception_stacks
, cpu
),
145 sizeof(exception_stacks
) / PAGE_SIZE
, PAGE_KERNEL
);
147 percpu_setup_debug_store(cpu
);
150 static __init
void setup_cpu_entry_area_ptes(void)
153 unsigned long start
, end
;
155 BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES
* PAGE_SIZE
< CPU_ENTRY_AREA_MAP_SIZE
);
156 BUG_ON(CPU_ENTRY_AREA_BASE
& ~PMD_MASK
);
158 start
= CPU_ENTRY_AREA_BASE
;
159 end
= start
+ CPU_ENTRY_AREA_MAP_SIZE
;
161 /* Careful here: start + PMD_SIZE might wrap around */
162 for (; start
< end
&& start
>= CPU_ENTRY_AREA_BASE
; start
+= PMD_SIZE
)
163 populate_extra_pte(start
);
167 void __init
setup_cpu_entry_areas(void)
171 setup_cpu_entry_area_ptes();
173 for_each_possible_cpu(cpu
)
174 setup_cpu_entry_area(cpu
);
177 * This is the last essential update to swapper_pgdir which needs
178 * to be synchronized to initial_page_table on 32bit.
180 sync_initial_page_table();