1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
4 #include <linux/memblock.h>
5 #include <linux/mem_encrypt.h>
7 #include <asm/set_memory.h>
8 #include <asm/pgtable.h>
9 #include <asm/realmode.h>
10 #include <asm/tlbflush.h>
11 #include <asm/crash.h>
13 struct real_mode_header
*real_mode_header
;
14 u32
*trampoline_cr4_features
;
16 /* Hold the pgd entry used on booting additional CPUs */
17 pgd_t trampoline_pgd_entry
;
19 void __init
reserve_real_mode(void)
22 size_t size
= real_mode_size_needed();
27 WARN_ON(slab_is_available());
29 /* Has to be under 1M so we can execute real-mode AP code. */
30 mem
= memblock_find_in_range(0, 1<<20, size
, PAGE_SIZE
);
32 pr_info("No sub-1M memory is available for the trampoline\n");
36 memblock_reserve(mem
, size
);
37 set_real_mode_mem(mem
);
38 crash_reserve_low_1M();
41 static void __init
setup_real_mode(void)
47 unsigned long phys_base
;
48 struct trampoline_header
*trampoline_header
;
49 size_t size
= PAGE_ALIGN(real_mode_blob_end
- real_mode_blob
);
55 base
= (unsigned char *)real_mode_header
;
58 * If SME is active, the trampoline area will need to be in
59 * decrypted memory in order to bring up other processors
60 * successfully. This is not needed for SEV.
63 set_memory_decrypted((unsigned long)base
, size
>> PAGE_SHIFT
);
65 memcpy(base
, real_mode_blob
, size
);
67 phys_base
= __pa(base
);
68 real_mode_seg
= phys_base
>> 4;
70 rel
= (u32
*) real_mode_relocs
;
72 /* 16-bit segment relocations. */
75 u16
*seg
= (u16
*) (base
+ *rel
++);
79 /* 32-bit linear relocations. */
82 u32
*ptr
= (u32
*) (base
+ *rel
++);
86 /* Must be perfomed *after* relocation. */
87 trampoline_header
= (struct trampoline_header
*)
88 __va(real_mode_header
->trampoline_header
);
91 trampoline_header
->start
= __pa_symbol(startup_32_smp
);
92 trampoline_header
->gdt_limit
= __BOOT_DS
+ 7;
93 trampoline_header
->gdt_base
= __pa_symbol(boot_gdt
);
96 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
97 * so we need to mask it out.
99 rdmsrl(MSR_EFER
, efer
);
100 trampoline_header
->efer
= efer
& ~EFER_LMA
;
102 trampoline_header
->start
= (u64
) secondary_startup_64
;
103 trampoline_cr4_features
= &trampoline_header
->cr4
;
104 *trampoline_cr4_features
= mmu_cr4_features
;
106 trampoline_header
->flags
= 0;
108 trampoline_header
->flags
|= TH_FLAGS_SME_ACTIVE
;
110 trampoline_pgd
= (u64
*) __va(real_mode_header
->trampoline_pgd
);
111 trampoline_pgd
[0] = trampoline_pgd_entry
.pgd
;
112 trampoline_pgd
[511] = init_top_pgt
[511].pgd
;
117 * reserve_real_mode() gets called very early, to guarantee the
118 * availability of low memory. This is before the proper kernel page
119 * tables are set up, so we cannot set page permissions in that
120 * function. Also trampoline code will be executed by APs so we
121 * need to mark it executable at do_pre_smp_initcalls() at least,
122 * thus run it as a early_initcall().
124 static void __init
set_real_mode_permissions(void)
126 unsigned char *base
= (unsigned char *) real_mode_header
;
127 size_t size
= PAGE_ALIGN(real_mode_blob_end
- real_mode_blob
);
130 PAGE_ALIGN(real_mode_header
->ro_end
) -
134 PAGE_ALIGN(real_mode_header
->ro_end
) -
135 real_mode_header
->text_start
;
137 unsigned long text_start
=
138 (unsigned long) __va(real_mode_header
->text_start
);
140 set_memory_nx((unsigned long) base
, size
>> PAGE_SHIFT
);
141 set_memory_ro((unsigned long) base
, ro_size
>> PAGE_SHIFT
);
142 set_memory_x((unsigned long) text_start
, text_size
>> PAGE_SHIFT
);
145 static int __init
init_real_mode(void)
147 if (!real_mode_header
)
148 panic("Real mode trampoline was not allocated");
151 set_real_mode_permissions();
155 early_initcall(init_real_mode
);