1 /* SPDX-License-Identifier: GPL-2.0-only */
6 #include <cpu/x86/smm.h>
9 #define CACHELINE_SIZE 64
14 static inline void mfence(void)
16 /* mfence came with the introduction of SSE2. */
18 __asm__
__volatile__("mfence\t\n": : :"memory");
20 __asm__
__volatile__("lock; addl $0,0(%%esp)": : : "memory");
23 /* The sequence of the callbacks are in calling order. */
26 * Optionally provide a callback prior to kicking off MP
27 * startup. This callback is done prior to loading the SIPI
28 * vector but after gathering the MP state information. Please
29 * see the sequence below.
31 void (*pre_mp_init
)(void);
33 * Return the number of logical x86 execution contexts that
34 * need to be brought out of SIPI state as well as have SMM
37 int (*get_cpu_count
)(void);
39 * Optionally fill in permanent SMM region and save state size. If
40 * this callback is not present no SMM handlers will be installed.
41 * The perm_smsize is the size available to house the permanent SMM
44 void (*get_smm_info
)(uintptr_t *perm_smbase
, size_t *perm_smsize
,
45 size_t *smm_save_state_size
);
47 * Optionally fill in pointer to microcode and indicate if the APs
48 * can load the microcode in parallel.
50 void (*get_microcode_info
)(const void **microcode
, int *parallel
);
52 * Optionally provide a callback prior to the APs starting SMM
53 * relocation or CPU driver initialization. However, note that
54 * this callback is called after SMM handlers have been loaded.
56 void (*pre_mp_smm_init
)(void);
58 * Optional function to use to trigger SMM to perform relocation. If
59 * not provided, smm_initiate_relocation() is used.
60 * This function is called on each CPU.
61 * On platforms that select CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER) to
62 * not relocate in SMM, this function can be used to relocate CPUs.
64 void (*per_cpu_smm_trigger
)(void);
66 * This function is called while each CPU is in the SMM relocation
67 * handler. Its primary purpose is to adjust the SMBASE for the
68 * permanent handler. The parameters passed are the current cpu
69 * running the relocation handler, current SMBASE of relocation handler,
70 * and the pre-calculated staggered CPU SMBASE address of the permanent
72 * This function is only called with !CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER) set.
74 void (*relocation_handler
)(int cpu
, uintptr_t curr_smbase
,
75 uintptr_t staggered_smbase
);
77 * Optionally provide a callback that is called after the APs
78 * and the BSP have gone through the initialization sequence.
80 void (*post_mp_init
)(void);
84 * The mp_ops argument is used to drive the multiprocess initialization. Unless
85 * otherwise stated each callback is called on the BSP only. The sequence of
86 * operations is the following:
90 * 4. get_microcode_info()
91 * 5. adjust_cpu_apic_entry() for each number of get_cpu_count()
92 * 6. pre_mp_smm_init()
93 * 7. per_cpu_smm_trigger() in parallel for all cpus which calls
94 * relocation_handler() in SMM.
95 * 8. mp_initialize_cpu() for each cpu
98 enum cb_err
mp_init_with_smm(struct bus
*cpu_bus
, const struct mp_ops
*mp_ops
);
101 /* Function runs on all cores (both BSP and APs) */
103 /* Need to specify cores (only on APs) numbers */
107 * After APs are up and PARALLEL_MP_AP_WORK is enabled one can issue work
108 * to all the APs to perform. Currently the BSP is the only CPU that is allowed
109 * to issue work. i.e. the APs should not call any of these functions.
111 * Input parameter expire_us <= 0 to specify an infinite timeout.
112 * logical_cpu_num = MP_RUN_ON_ALL_CPUS to execute function over all cores (BSP
113 * + APs) else specified AP number using logical_cpu_num.
115 enum cb_err
mp_run_on_aps(void (*func
)(void *), void *arg
, int logical_cpu_num
,
119 * Runs func on all APs excluding BSP, with a provision to run calls in parallel
120 * or serially per AP.
122 enum cb_err
mp_run_on_all_aps(void (*func
)(void *), void *arg
, long expire_us
,
125 /* Like mp_run_on_aps() but also runs func on BSP. */
126 enum cb_err
mp_run_on_all_cpus(void (*func
)(void *), void *arg
);
128 /* Like mp_run_on_all_cpus but make sure all APs finish executing the
129 function call. The time limit on a function call is 1 second per AP. */
130 enum cb_err
mp_run_on_all_cpus_synchronously(void (*func
)(void *), void *arg
);
133 * Park all APs to prepare for OS boot. This is handled automatically
134 * by the coreboot infrastructure.
136 enum cb_err
mp_park_aps(void);
139 * SMM helpers to use with initializing CPUs.
142 /* Send SMI to self without any serialization. */
143 void smm_initiate_relocation_parallel(void);
144 /* Send SMI to self with single execution. */
145 void smm_initiate_relocation(void);
147 #endif /* _X86_MP_H_ */