1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
4 #include <linux/memblock.h>
5 #include <linux/cc_platform.h>
6 #include <linux/pgtable.h>
8 #include <asm/set_memory.h>
9 #include <asm/realmode.h>
10 #include <asm/tlbflush.h>
11 #include <asm/crash.h>
14 struct real_mode_header
*real_mode_header
;
15 u32
*trampoline_cr4_features
;
17 /* Hold the pgd entry used on booting additional CPUs */
18 pgd_t trampoline_pgd_entry
;
20 void load_trampoline_pgtable(void)
23 load_cr3(initial_page_table
);
26 * This function is called before exiting to real-mode and that will
27 * fail with CR4.PCIDE still set.
29 if (boot_cpu_has(X86_FEATURE_PCID
))
30 cr4_clear_bits(X86_CR4_PCIDE
);
32 write_cr3(real_mode_header
->trampoline_pgd
);
36 * The CR3 write above will not flush global TLB entries.
37 * Stale, global entries from previous page tables may still be
38 * present. Flush those stale entries.
40 * This ensures that memory accessed while running with
41 * trampoline_pgd is *actually* mapped into trampoline_pgd.
46 void __init
reserve_real_mode(void)
49 size_t size
= real_mode_size_needed();
54 WARN_ON(slab_is_available());
56 /* Has to be under 1M so we can execute real-mode AP code. */
57 mem
= memblock_phys_alloc_range(size
, PAGE_SIZE
, 0, 1<<20);
59 pr_info("No sub-1M memory is available for the trampoline\n");
61 set_real_mode_mem(mem
);
64 * Unconditionally reserve the entire first 1M, see comment in
67 memblock_reserve(0, SZ_1M
);
70 static void __init
sme_sev_setup_real_mode(struct trampoline_header
*th
)
72 #ifdef CONFIG_AMD_MEM_ENCRYPT
73 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT
))
74 th
->flags
|= TH_FLAGS_SME_ACTIVE
;
76 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT
)) {
78 * Skip the call to verify_cpu() in secondary_startup_64 as it
79 * will cause #VC exceptions when the AP can't handle them yet.
81 th
->start
= (u64
) secondary_startup_64_no_verify
;
83 if (sev_es_setup_ap_jump_table(real_mode_header
))
84 panic("Failed to get/update SEV-ES AP Jump Table");
89 static void __init
setup_real_mode(void)
95 unsigned long phys_base
;
96 struct trampoline_header
*trampoline_header
;
97 size_t size
= PAGE_ALIGN(real_mode_blob_end
- real_mode_blob
);
104 base
= (unsigned char *)real_mode_header
;
107 * If SME is active, the trampoline area will need to be in
108 * decrypted memory in order to bring up other processors
109 * successfully. This is not needed for SEV.
111 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT
))
112 set_memory_decrypted((unsigned long)base
, size
>> PAGE_SHIFT
);
114 memcpy(base
, real_mode_blob
, size
);
116 phys_base
= __pa(base
);
117 real_mode_seg
= phys_base
>> 4;
119 rel
= (u32
*) real_mode_relocs
;
121 /* 16-bit segment relocations. */
124 u16
*seg
= (u16
*) (base
+ *rel
++);
125 *seg
= real_mode_seg
;
128 /* 32-bit linear relocations. */
131 u32
*ptr
= (u32
*) (base
+ *rel
++);
135 /* Must be performed *after* relocation. */
136 trampoline_header
= (struct trampoline_header
*)
137 __va(real_mode_header
->trampoline_header
);
140 trampoline_header
->start
= __pa_symbol(startup_32_smp
);
141 trampoline_header
->gdt_limit
= __BOOT_DS
+ 7;
142 trampoline_header
->gdt_base
= __pa_symbol(boot_gdt
);
145 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
146 * so we need to mask it out.
148 rdmsrl(MSR_EFER
, efer
);
149 trampoline_header
->efer
= efer
& ~EFER_LMA
;
151 trampoline_header
->start
= (u64
) secondary_startup_64
;
152 trampoline_cr4_features
= &trampoline_header
->cr4
;
153 *trampoline_cr4_features
= mmu_cr4_features
;
155 trampoline_header
->flags
= 0;
157 trampoline_lock
= &trampoline_header
->lock
;
158 *trampoline_lock
= 0;
160 trampoline_pgd
= (u64
*) __va(real_mode_header
->trampoline_pgd
);
162 /* Map the real mode stub as virtual == physical */
163 trampoline_pgd
[0] = trampoline_pgd_entry
.pgd
;
166 * Include the entirety of the kernel mapping into the trampoline
167 * PGD. This way, all mappings present in the normal kernel page
168 * tables are usable while running on trampoline_pgd.
170 for (i
= pgd_index(__PAGE_OFFSET
); i
< PTRS_PER_PGD
; i
++)
171 trampoline_pgd
[i
] = init_top_pgt
[i
].pgd
;
174 sme_sev_setup_real_mode(trampoline_header
);
178 * reserve_real_mode() gets called very early, to guarantee the
179 * availability of low memory. This is before the proper kernel page
180 * tables are set up, so we cannot set page permissions in that
181 * function. Also trampoline code will be executed by APs so we
182 * need to mark it executable at do_pre_smp_initcalls() at least,
183 * thus run it as a early_initcall().
185 static void __init
set_real_mode_permissions(void)
187 unsigned char *base
= (unsigned char *) real_mode_header
;
188 size_t size
= PAGE_ALIGN(real_mode_blob_end
- real_mode_blob
);
191 PAGE_ALIGN(real_mode_header
->ro_end
) -
195 PAGE_ALIGN(real_mode_header
->ro_end
) -
196 real_mode_header
->text_start
;
198 unsigned long text_start
=
199 (unsigned long) __va(real_mode_header
->text_start
);
201 set_memory_nx((unsigned long) base
, size
>> PAGE_SHIFT
);
202 set_memory_ro((unsigned long) base
, ro_size
>> PAGE_SHIFT
);
203 set_memory_x((unsigned long) text_start
, text_size
>> PAGE_SHIFT
);
206 void __init
init_real_mode(void)
208 if (!real_mode_header
)
209 panic("Real mode trampoline was not allocated");
212 set_real_mode_permissions();
215 static int __init
do_init_real_mode(void)
217 x86_platform
.realmode_init();
220 early_initcall(do_init_real_mode
);