1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
6 #include <asm/cpu_entry_area.h>
7 #include <asm/pgtable.h>
8 #include <asm/fixmap.h>
11 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page
, entry_stack_storage
);
14 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
15 [(N_EXCEPTION_STACKS
- 1) * EXCEPTION_STKSZ
+ DEBUG_STKSZ
]);
18 struct cpu_entry_area
*get_cpu_entry_area(int cpu
)
20 unsigned long va
= CPU_ENTRY_AREA_PER_CPU
+ cpu
* CPU_ENTRY_AREA_SIZE
;
21 BUILD_BUG_ON(sizeof(struct cpu_entry_area
) % PAGE_SIZE
!= 0);
23 return (struct cpu_entry_area
*) va
;
25 EXPORT_SYMBOL(get_cpu_entry_area
);
27 void cea_set_pte(void *cea_vaddr
, phys_addr_t pa
, pgprot_t flags
)
29 unsigned long va
= (unsigned long) cea_vaddr
;
31 set_pte_vaddr(va
, pfn_pte(pa
>> PAGE_SHIFT
, flags
));
35 cea_map_percpu_pages(void *cea_vaddr
, void *ptr
, int pages
, pgprot_t prot
)
37 for ( ; pages
; pages
--, cea_vaddr
+= PAGE_SIZE
, ptr
+= PAGE_SIZE
)
38 cea_set_pte(cea_vaddr
, per_cpu_ptr_to_phys(ptr
), prot
);
41 static void percpu_setup_debug_store(int cpu
)
43 #ifdef CONFIG_CPU_SUP_INTEL
47 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
50 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
51 npages
= sizeof(struct debug_store
) / PAGE_SIZE
;
52 BUILD_BUG_ON(sizeof(struct debug_store
) % PAGE_SIZE
!= 0);
53 cea_map_percpu_pages(cea
, &per_cpu(cpu_debug_store
, cpu
), npages
,
56 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
;
58 * Force the population of PMDs for not yet allocated per cpu
59 * memory like debug store buffers.
61 npages
= sizeof(struct debug_store_buffers
) / PAGE_SIZE
;
62 for (; npages
; npages
--, cea
+= PAGE_SIZE
)
63 cea_set_pte(cea
, 0, PAGE_NONE
);
67 /* Setup the fixmap mappings only once per-processor */
68 static void __init
setup_cpu_entry_area(int cpu
)
71 extern char _entry_trampoline
[];
73 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
74 pgprot_t gdt_prot
= PAGE_KERNEL_RO
;
75 pgprot_t tss_prot
= PAGE_KERNEL_RO
;
78 * On native 32-bit systems, the GDT cannot be read-only because
79 * our double fault handler uses a task gate, and entering through
80 * a task gate needs to change an available TSS to busy. If the
81 * GDT is read-only, that will triple fault. The TSS cannot be
82 * read-only because the CPU writes to it on task switches.
84 * On Xen PV, the GDT must be read-only because the hypervisor
87 pgprot_t gdt_prot
= boot_cpu_has(X86_FEATURE_XENPV
) ?
88 PAGE_KERNEL_RO
: PAGE_KERNEL
;
89 pgprot_t tss_prot
= PAGE_KERNEL
;
92 cea_set_pte(&get_cpu_entry_area(cpu
)->gdt
, get_cpu_gdt_paddr(cpu
),
95 cea_map_percpu_pages(&get_cpu_entry_area(cpu
)->entry_stack_page
,
96 per_cpu_ptr(&entry_stack_storage
, cpu
), 1,
100 * The Intel SDM says (Volume 3, 7.2.1):
102 * Avoid placing a page boundary in the part of the TSS that the
103 * processor reads during a task switch (the first 104 bytes). The
104 * processor may not correctly perform address translations if a
105 * boundary occurs in this area. During a task switch, the processor
106 * reads and writes into the first 104 bytes of each TSS (using
107 * contiguous physical addresses beginning with the physical address
108 * of the first byte of the TSS). So, after TSS access begins, if
109 * part of the 104 bytes is not physically contiguous, the processor
110 * will access incorrect information without generating a page-fault
113 * There are also a lot of errata involving the TSS spanning a page
114 * boundary. Assert that we're not doing that.
116 BUILD_BUG_ON((offsetof(struct tss_struct
, x86_tss
) ^
117 offsetofend(struct tss_struct
, x86_tss
)) & PAGE_MASK
);
118 BUILD_BUG_ON(sizeof(struct tss_struct
) % PAGE_SIZE
!= 0);
119 cea_map_percpu_pages(&get_cpu_entry_area(cpu
)->tss
,
120 &per_cpu(cpu_tss_rw
, cpu
),
121 sizeof(struct tss_struct
) / PAGE_SIZE
, tss_prot
);
124 per_cpu(cpu_entry_area
, cpu
) = get_cpu_entry_area(cpu
);
128 BUILD_BUG_ON(sizeof(exception_stacks
) % PAGE_SIZE
!= 0);
129 BUILD_BUG_ON(sizeof(exception_stacks
) !=
130 sizeof(((struct cpu_entry_area
*)0)->exception_stacks
));
131 cea_map_percpu_pages(&get_cpu_entry_area(cpu
)->exception_stacks
,
132 &per_cpu(exception_stacks
, cpu
),
133 sizeof(exception_stacks
) / PAGE_SIZE
, PAGE_KERNEL
);
135 cea_set_pte(&get_cpu_entry_area(cpu
)->entry_trampoline
,
136 __pa_symbol(_entry_trampoline
), PAGE_KERNEL_RX
);
138 percpu_setup_debug_store(cpu
);
141 static __init
void setup_cpu_entry_area_ptes(void)
144 unsigned long start
, end
;
146 BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES
* PAGE_SIZE
< CPU_ENTRY_AREA_MAP_SIZE
);
147 BUG_ON(CPU_ENTRY_AREA_BASE
& ~PMD_MASK
);
149 start
= CPU_ENTRY_AREA_BASE
;
150 end
= start
+ CPU_ENTRY_AREA_MAP_SIZE
;
152 /* Careful here: start + PMD_SIZE might wrap around */
153 for (; start
< end
&& start
>= CPU_ENTRY_AREA_BASE
; start
+= PMD_SIZE
)
154 populate_extra_pte(start
);
158 void __init
setup_cpu_entry_areas(void)
162 setup_cpu_entry_area_ptes();
164 for_each_possible_cpu(cpu
)
165 setup_cpu_entry_area(cpu
);
168 * This is the last essential update to swapper_pgdir which needs
169 * to be synchronized to initial_page_table on 32bit.
171 sync_initial_page_table();