2 * QEMU Hypervisor.framework support for Apple Silicon
4 * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5 * Copyright 2020 Google LLC
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
15 #include "sysemu/runstate.h"
16 #include "sysemu/hvf.h"
17 #include "sysemu/hvf_int.h"
18 #include "sysemu/hw_accel.h"
22 #include <mach/mach_time.h>
24 #include "exec/address-spaces.h"
25 #include "hw/boards.h"
27 #include "qemu/main-loop.h"
28 #include "sysemu/cpus.h"
29 #include "arm-powerctl.h"
30 #include "target/arm/cpu.h"
31 #include "target/arm/internals.h"
32 #include "target/arm/multiprocessing.h"
33 #include "target/arm/gtimer.h"
34 #include "trace/trace-target_arm_hvf.h"
35 #include "migration/vmstate.h"
37 #include "gdbstub/enums.h"
39 #define MDSCR_EL1_SS_SHIFT 0
40 #define MDSCR_EL1_MDE_SHIFT 15
42 static const uint16_t dbgbcr_regs
[] = {
43 HV_SYS_REG_DBGBCR0_EL1
,
44 HV_SYS_REG_DBGBCR1_EL1
,
45 HV_SYS_REG_DBGBCR2_EL1
,
46 HV_SYS_REG_DBGBCR3_EL1
,
47 HV_SYS_REG_DBGBCR4_EL1
,
48 HV_SYS_REG_DBGBCR5_EL1
,
49 HV_SYS_REG_DBGBCR6_EL1
,
50 HV_SYS_REG_DBGBCR7_EL1
,
51 HV_SYS_REG_DBGBCR8_EL1
,
52 HV_SYS_REG_DBGBCR9_EL1
,
53 HV_SYS_REG_DBGBCR10_EL1
,
54 HV_SYS_REG_DBGBCR11_EL1
,
55 HV_SYS_REG_DBGBCR12_EL1
,
56 HV_SYS_REG_DBGBCR13_EL1
,
57 HV_SYS_REG_DBGBCR14_EL1
,
58 HV_SYS_REG_DBGBCR15_EL1
,
61 static const uint16_t dbgbvr_regs
[] = {
62 HV_SYS_REG_DBGBVR0_EL1
,
63 HV_SYS_REG_DBGBVR1_EL1
,
64 HV_SYS_REG_DBGBVR2_EL1
,
65 HV_SYS_REG_DBGBVR3_EL1
,
66 HV_SYS_REG_DBGBVR4_EL1
,
67 HV_SYS_REG_DBGBVR5_EL1
,
68 HV_SYS_REG_DBGBVR6_EL1
,
69 HV_SYS_REG_DBGBVR7_EL1
,
70 HV_SYS_REG_DBGBVR8_EL1
,
71 HV_SYS_REG_DBGBVR9_EL1
,
72 HV_SYS_REG_DBGBVR10_EL1
,
73 HV_SYS_REG_DBGBVR11_EL1
,
74 HV_SYS_REG_DBGBVR12_EL1
,
75 HV_SYS_REG_DBGBVR13_EL1
,
76 HV_SYS_REG_DBGBVR14_EL1
,
77 HV_SYS_REG_DBGBVR15_EL1
,
80 static const uint16_t dbgwcr_regs
[] = {
81 HV_SYS_REG_DBGWCR0_EL1
,
82 HV_SYS_REG_DBGWCR1_EL1
,
83 HV_SYS_REG_DBGWCR2_EL1
,
84 HV_SYS_REG_DBGWCR3_EL1
,
85 HV_SYS_REG_DBGWCR4_EL1
,
86 HV_SYS_REG_DBGWCR5_EL1
,
87 HV_SYS_REG_DBGWCR6_EL1
,
88 HV_SYS_REG_DBGWCR7_EL1
,
89 HV_SYS_REG_DBGWCR8_EL1
,
90 HV_SYS_REG_DBGWCR9_EL1
,
91 HV_SYS_REG_DBGWCR10_EL1
,
92 HV_SYS_REG_DBGWCR11_EL1
,
93 HV_SYS_REG_DBGWCR12_EL1
,
94 HV_SYS_REG_DBGWCR13_EL1
,
95 HV_SYS_REG_DBGWCR14_EL1
,
96 HV_SYS_REG_DBGWCR15_EL1
,
99 static const uint16_t dbgwvr_regs
[] = {
100 HV_SYS_REG_DBGWVR0_EL1
,
101 HV_SYS_REG_DBGWVR1_EL1
,
102 HV_SYS_REG_DBGWVR2_EL1
,
103 HV_SYS_REG_DBGWVR3_EL1
,
104 HV_SYS_REG_DBGWVR4_EL1
,
105 HV_SYS_REG_DBGWVR5_EL1
,
106 HV_SYS_REG_DBGWVR6_EL1
,
107 HV_SYS_REG_DBGWVR7_EL1
,
108 HV_SYS_REG_DBGWVR8_EL1
,
109 HV_SYS_REG_DBGWVR9_EL1
,
110 HV_SYS_REG_DBGWVR10_EL1
,
111 HV_SYS_REG_DBGWVR11_EL1
,
112 HV_SYS_REG_DBGWVR12_EL1
,
113 HV_SYS_REG_DBGWVR13_EL1
,
114 HV_SYS_REG_DBGWVR14_EL1
,
115 HV_SYS_REG_DBGWVR15_EL1
,
118 static inline int hvf_arm_num_brps(hv_vcpu_config_t config
)
122 ret
= hv_vcpu_config_get_feature_reg(config
, HV_FEATURE_REG_ID_AA64DFR0_EL1
,
125 return FIELD_EX64(val
, ID_AA64DFR0
, BRPS
) + 1;
128 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config
)
132 ret
= hv_vcpu_config_get_feature_reg(config
, HV_FEATURE_REG_ID_AA64DFR0_EL1
,
135 return FIELD_EX64(val
, ID_AA64DFR0
, WRPS
) + 1;
138 void hvf_arm_init_debug(void)
140 hv_vcpu_config_t config
;
141 config
= hv_vcpu_config_create();
143 max_hw_bps
= hvf_arm_num_brps(config
);
145 g_array_sized_new(true, true, sizeof(HWBreakpoint
), max_hw_bps
);
147 max_hw_wps
= hvf_arm_num_wrps(config
);
149 g_array_sized_new(true, true, sizeof(HWWatchpoint
), max_hw_wps
);
152 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
153 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
155 #define SYSREG_OP0_SHIFT 20
156 #define SYSREG_OP0_MASK 0x3
157 #define SYSREG_OP0(sysreg) ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
158 #define SYSREG_OP1_SHIFT 14
159 #define SYSREG_OP1_MASK 0x7
160 #define SYSREG_OP1(sysreg) ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
161 #define SYSREG_CRN_SHIFT 10
162 #define SYSREG_CRN_MASK 0xf
163 #define SYSREG_CRN(sysreg) ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
164 #define SYSREG_CRM_SHIFT 1
165 #define SYSREG_CRM_MASK 0xf
166 #define SYSREG_CRM(sysreg) ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
167 #define SYSREG_OP2_SHIFT 17
168 #define SYSREG_OP2_MASK 0x7
169 #define SYSREG_OP2(sysreg) ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
171 #define SYSREG(op0, op1, crn, crm, op2) \
172 ((op0 << SYSREG_OP0_SHIFT) | \
173 (op1 << SYSREG_OP1_SHIFT) | \
174 (crn << SYSREG_CRN_SHIFT) | \
175 (crm << SYSREG_CRM_SHIFT) | \
176 (op2 << SYSREG_OP2_SHIFT))
177 #define SYSREG_MASK \
178 SYSREG(SYSREG_OP0_MASK, \
183 #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
184 #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
185 #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
186 #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
187 #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
188 #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
189 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
190 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
191 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
192 #define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
193 #define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
194 #define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
195 #define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
196 #define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
197 #define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
198 #define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
200 #define SYSREG_ICC_AP0R0_EL1 SYSREG(3, 0, 12, 8, 4)
201 #define SYSREG_ICC_AP0R1_EL1 SYSREG(3, 0, 12, 8, 5)
202 #define SYSREG_ICC_AP0R2_EL1 SYSREG(3, 0, 12, 8, 6)
203 #define SYSREG_ICC_AP0R3_EL1 SYSREG(3, 0, 12, 8, 7)
204 #define SYSREG_ICC_AP1R0_EL1 SYSREG(3, 0, 12, 9, 0)
205 #define SYSREG_ICC_AP1R1_EL1 SYSREG(3, 0, 12, 9, 1)
206 #define SYSREG_ICC_AP1R2_EL1 SYSREG(3, 0, 12, 9, 2)
207 #define SYSREG_ICC_AP1R3_EL1 SYSREG(3, 0, 12, 9, 3)
208 #define SYSREG_ICC_ASGI1R_EL1 SYSREG(3, 0, 12, 11, 6)
209 #define SYSREG_ICC_BPR0_EL1 SYSREG(3, 0, 12, 8, 3)
210 #define SYSREG_ICC_BPR1_EL1 SYSREG(3, 0, 12, 12, 3)
211 #define SYSREG_ICC_CTLR_EL1 SYSREG(3, 0, 12, 12, 4)
212 #define SYSREG_ICC_DIR_EL1 SYSREG(3, 0, 12, 11, 1)
213 #define SYSREG_ICC_EOIR0_EL1 SYSREG(3, 0, 12, 8, 1)
214 #define SYSREG_ICC_EOIR1_EL1 SYSREG(3, 0, 12, 12, 1)
215 #define SYSREG_ICC_HPPIR0_EL1 SYSREG(3, 0, 12, 8, 2)
216 #define SYSREG_ICC_HPPIR1_EL1 SYSREG(3, 0, 12, 12, 2)
217 #define SYSREG_ICC_IAR0_EL1 SYSREG(3, 0, 12, 8, 0)
218 #define SYSREG_ICC_IAR1_EL1 SYSREG(3, 0, 12, 12, 0)
219 #define SYSREG_ICC_IGRPEN0_EL1 SYSREG(3, 0, 12, 12, 6)
220 #define SYSREG_ICC_IGRPEN1_EL1 SYSREG(3, 0, 12, 12, 7)
221 #define SYSREG_ICC_PMR_EL1 SYSREG(3, 0, 4, 6, 0)
222 #define SYSREG_ICC_RPR_EL1 SYSREG(3, 0, 12, 11, 3)
223 #define SYSREG_ICC_SGI0R_EL1 SYSREG(3, 0, 12, 11, 7)
224 #define SYSREG_ICC_SGI1R_EL1 SYSREG(3, 0, 12, 11, 5)
225 #define SYSREG_ICC_SRE_EL1 SYSREG(3, 0, 12, 12, 5)
227 #define SYSREG_MDSCR_EL1 SYSREG(2, 0, 0, 2, 2)
228 #define SYSREG_DBGBVR0_EL1 SYSREG(2, 0, 0, 0, 4)
229 #define SYSREG_DBGBCR0_EL1 SYSREG(2, 0, 0, 0, 5)
230 #define SYSREG_DBGWVR0_EL1 SYSREG(2, 0, 0, 0, 6)
231 #define SYSREG_DBGWCR0_EL1 SYSREG(2, 0, 0, 0, 7)
232 #define SYSREG_DBGBVR1_EL1 SYSREG(2, 0, 0, 1, 4)
233 #define SYSREG_DBGBCR1_EL1 SYSREG(2, 0, 0, 1, 5)
234 #define SYSREG_DBGWVR1_EL1 SYSREG(2, 0, 0, 1, 6)
235 #define SYSREG_DBGWCR1_EL1 SYSREG(2, 0, 0, 1, 7)
236 #define SYSREG_DBGBVR2_EL1 SYSREG(2, 0, 0, 2, 4)
237 #define SYSREG_DBGBCR2_EL1 SYSREG(2, 0, 0, 2, 5)
238 #define SYSREG_DBGWVR2_EL1 SYSREG(2, 0, 0, 2, 6)
239 #define SYSREG_DBGWCR2_EL1 SYSREG(2, 0, 0, 2, 7)
240 #define SYSREG_DBGBVR3_EL1 SYSREG(2, 0, 0, 3, 4)
241 #define SYSREG_DBGBCR3_EL1 SYSREG(2, 0, 0, 3, 5)
242 #define SYSREG_DBGWVR3_EL1 SYSREG(2, 0, 0, 3, 6)
243 #define SYSREG_DBGWCR3_EL1 SYSREG(2, 0, 0, 3, 7)
244 #define SYSREG_DBGBVR4_EL1 SYSREG(2, 0, 0, 4, 4)
245 #define SYSREG_DBGBCR4_EL1 SYSREG(2, 0, 0, 4, 5)
246 #define SYSREG_DBGWVR4_EL1 SYSREG(2, 0, 0, 4, 6)
247 #define SYSREG_DBGWCR4_EL1 SYSREG(2, 0, 0, 4, 7)
248 #define SYSREG_DBGBVR5_EL1 SYSREG(2, 0, 0, 5, 4)
249 #define SYSREG_DBGBCR5_EL1 SYSREG(2, 0, 0, 5, 5)
250 #define SYSREG_DBGWVR5_EL1 SYSREG(2, 0, 0, 5, 6)
251 #define SYSREG_DBGWCR5_EL1 SYSREG(2, 0, 0, 5, 7)
252 #define SYSREG_DBGBVR6_EL1 SYSREG(2, 0, 0, 6, 4)
253 #define SYSREG_DBGBCR6_EL1 SYSREG(2, 0, 0, 6, 5)
254 #define SYSREG_DBGWVR6_EL1 SYSREG(2, 0, 0, 6, 6)
255 #define SYSREG_DBGWCR6_EL1 SYSREG(2, 0, 0, 6, 7)
256 #define SYSREG_DBGBVR7_EL1 SYSREG(2, 0, 0, 7, 4)
257 #define SYSREG_DBGBCR7_EL1 SYSREG(2, 0, 0, 7, 5)
258 #define SYSREG_DBGWVR7_EL1 SYSREG(2, 0, 0, 7, 6)
259 #define SYSREG_DBGWCR7_EL1 SYSREG(2, 0, 0, 7, 7)
260 #define SYSREG_DBGBVR8_EL1 SYSREG(2, 0, 0, 8, 4)
261 #define SYSREG_DBGBCR8_EL1 SYSREG(2, 0, 0, 8, 5)
262 #define SYSREG_DBGWVR8_EL1 SYSREG(2, 0, 0, 8, 6)
263 #define SYSREG_DBGWCR8_EL1 SYSREG(2, 0, 0, 8, 7)
264 #define SYSREG_DBGBVR9_EL1 SYSREG(2, 0, 0, 9, 4)
265 #define SYSREG_DBGBCR9_EL1 SYSREG(2, 0, 0, 9, 5)
266 #define SYSREG_DBGWVR9_EL1 SYSREG(2, 0, 0, 9, 6)
267 #define SYSREG_DBGWCR9_EL1 SYSREG(2, 0, 0, 9, 7)
268 #define SYSREG_DBGBVR10_EL1 SYSREG(2, 0, 0, 10, 4)
269 #define SYSREG_DBGBCR10_EL1 SYSREG(2, 0, 0, 10, 5)
270 #define SYSREG_DBGWVR10_EL1 SYSREG(2, 0, 0, 10, 6)
271 #define SYSREG_DBGWCR10_EL1 SYSREG(2, 0, 0, 10, 7)
272 #define SYSREG_DBGBVR11_EL1 SYSREG(2, 0, 0, 11, 4)
273 #define SYSREG_DBGBCR11_EL1 SYSREG(2, 0, 0, 11, 5)
274 #define SYSREG_DBGWVR11_EL1 SYSREG(2, 0, 0, 11, 6)
275 #define SYSREG_DBGWCR11_EL1 SYSREG(2, 0, 0, 11, 7)
276 #define SYSREG_DBGBVR12_EL1 SYSREG(2, 0, 0, 12, 4)
277 #define SYSREG_DBGBCR12_EL1 SYSREG(2, 0, 0, 12, 5)
278 #define SYSREG_DBGWVR12_EL1 SYSREG(2, 0, 0, 12, 6)
279 #define SYSREG_DBGWCR12_EL1 SYSREG(2, 0, 0, 12, 7)
280 #define SYSREG_DBGBVR13_EL1 SYSREG(2, 0, 0, 13, 4)
281 #define SYSREG_DBGBCR13_EL1 SYSREG(2, 0, 0, 13, 5)
282 #define SYSREG_DBGWVR13_EL1 SYSREG(2, 0, 0, 13, 6)
283 #define SYSREG_DBGWCR13_EL1 SYSREG(2, 0, 0, 13, 7)
284 #define SYSREG_DBGBVR14_EL1 SYSREG(2, 0, 0, 14, 4)
285 #define SYSREG_DBGBCR14_EL1 SYSREG(2, 0, 0, 14, 5)
286 #define SYSREG_DBGWVR14_EL1 SYSREG(2, 0, 0, 14, 6)
287 #define SYSREG_DBGWCR14_EL1 SYSREG(2, 0, 0, 14, 7)
288 #define SYSREG_DBGBVR15_EL1 SYSREG(2, 0, 0, 15, 4)
289 #define SYSREG_DBGBCR15_EL1 SYSREG(2, 0, 0, 15, 5)
290 #define SYSREG_DBGWVR15_EL1 SYSREG(2, 0, 0, 15, 6)
291 #define SYSREG_DBGWCR15_EL1 SYSREG(2, 0, 0, 15, 7)
293 #define WFX_IS_WFE (1 << 0)
295 #define TMR_CTL_ENABLE (1 << 0)
296 #define TMR_CTL_IMASK (1 << 1)
297 #define TMR_CTL_ISTATUS (1 << 2)
299 static void hvf_wfi(CPUState
*cpu
);
301 static uint32_t chosen_ipa_bit_size
;
303 typedef struct HVFVTimer
{
304 /* Vtimer value during migration and paused state */
308 static HVFVTimer vtimer
;
310 typedef struct ARMHostCPUFeatures
{
311 ARMISARegisters isar
;
314 uint32_t reset_sctlr
;
315 const char *dtb_compatible
;
316 } ARMHostCPUFeatures
;
318 static ARMHostCPUFeatures arm_host_cpu_features
;
320 struct hvf_reg_match
{
325 static const struct hvf_reg_match hvf_reg_match
[] = {
326 { HV_REG_X0
, offsetof(CPUARMState
, xregs
[0]) },
327 { HV_REG_X1
, offsetof(CPUARMState
, xregs
[1]) },
328 { HV_REG_X2
, offsetof(CPUARMState
, xregs
[2]) },
329 { HV_REG_X3
, offsetof(CPUARMState
, xregs
[3]) },
330 { HV_REG_X4
, offsetof(CPUARMState
, xregs
[4]) },
331 { HV_REG_X5
, offsetof(CPUARMState
, xregs
[5]) },
332 { HV_REG_X6
, offsetof(CPUARMState
, xregs
[6]) },
333 { HV_REG_X7
, offsetof(CPUARMState
, xregs
[7]) },
334 { HV_REG_X8
, offsetof(CPUARMState
, xregs
[8]) },
335 { HV_REG_X9
, offsetof(CPUARMState
, xregs
[9]) },
336 { HV_REG_X10
, offsetof(CPUARMState
, xregs
[10]) },
337 { HV_REG_X11
, offsetof(CPUARMState
, xregs
[11]) },
338 { HV_REG_X12
, offsetof(CPUARMState
, xregs
[12]) },
339 { HV_REG_X13
, offsetof(CPUARMState
, xregs
[13]) },
340 { HV_REG_X14
, offsetof(CPUARMState
, xregs
[14]) },
341 { HV_REG_X15
, offsetof(CPUARMState
, xregs
[15]) },
342 { HV_REG_X16
, offsetof(CPUARMState
, xregs
[16]) },
343 { HV_REG_X17
, offsetof(CPUARMState
, xregs
[17]) },
344 { HV_REG_X18
, offsetof(CPUARMState
, xregs
[18]) },
345 { HV_REG_X19
, offsetof(CPUARMState
, xregs
[19]) },
346 { HV_REG_X20
, offsetof(CPUARMState
, xregs
[20]) },
347 { HV_REG_X21
, offsetof(CPUARMState
, xregs
[21]) },
348 { HV_REG_X22
, offsetof(CPUARMState
, xregs
[22]) },
349 { HV_REG_X23
, offsetof(CPUARMState
, xregs
[23]) },
350 { HV_REG_X24
, offsetof(CPUARMState
, xregs
[24]) },
351 { HV_REG_X25
, offsetof(CPUARMState
, xregs
[25]) },
352 { HV_REG_X26
, offsetof(CPUARMState
, xregs
[26]) },
353 { HV_REG_X27
, offsetof(CPUARMState
, xregs
[27]) },
354 { HV_REG_X28
, offsetof(CPUARMState
, xregs
[28]) },
355 { HV_REG_X29
, offsetof(CPUARMState
, xregs
[29]) },
356 { HV_REG_X30
, offsetof(CPUARMState
, xregs
[30]) },
357 { HV_REG_PC
, offsetof(CPUARMState
, pc
) },
360 static const struct hvf_reg_match hvf_fpreg_match
[] = {
361 { HV_SIMD_FP_REG_Q0
, offsetof(CPUARMState
, vfp
.zregs
[0]) },
362 { HV_SIMD_FP_REG_Q1
, offsetof(CPUARMState
, vfp
.zregs
[1]) },
363 { HV_SIMD_FP_REG_Q2
, offsetof(CPUARMState
, vfp
.zregs
[2]) },
364 { HV_SIMD_FP_REG_Q3
, offsetof(CPUARMState
, vfp
.zregs
[3]) },
365 { HV_SIMD_FP_REG_Q4
, offsetof(CPUARMState
, vfp
.zregs
[4]) },
366 { HV_SIMD_FP_REG_Q5
, offsetof(CPUARMState
, vfp
.zregs
[5]) },
367 { HV_SIMD_FP_REG_Q6
, offsetof(CPUARMState
, vfp
.zregs
[6]) },
368 { HV_SIMD_FP_REG_Q7
, offsetof(CPUARMState
, vfp
.zregs
[7]) },
369 { HV_SIMD_FP_REG_Q8
, offsetof(CPUARMState
, vfp
.zregs
[8]) },
370 { HV_SIMD_FP_REG_Q9
, offsetof(CPUARMState
, vfp
.zregs
[9]) },
371 { HV_SIMD_FP_REG_Q10
, offsetof(CPUARMState
, vfp
.zregs
[10]) },
372 { HV_SIMD_FP_REG_Q11
, offsetof(CPUARMState
, vfp
.zregs
[11]) },
373 { HV_SIMD_FP_REG_Q12
, offsetof(CPUARMState
, vfp
.zregs
[12]) },
374 { HV_SIMD_FP_REG_Q13
, offsetof(CPUARMState
, vfp
.zregs
[13]) },
375 { HV_SIMD_FP_REG_Q14
, offsetof(CPUARMState
, vfp
.zregs
[14]) },
376 { HV_SIMD_FP_REG_Q15
, offsetof(CPUARMState
, vfp
.zregs
[15]) },
377 { HV_SIMD_FP_REG_Q16
, offsetof(CPUARMState
, vfp
.zregs
[16]) },
378 { HV_SIMD_FP_REG_Q17
, offsetof(CPUARMState
, vfp
.zregs
[17]) },
379 { HV_SIMD_FP_REG_Q18
, offsetof(CPUARMState
, vfp
.zregs
[18]) },
380 { HV_SIMD_FP_REG_Q19
, offsetof(CPUARMState
, vfp
.zregs
[19]) },
381 { HV_SIMD_FP_REG_Q20
, offsetof(CPUARMState
, vfp
.zregs
[20]) },
382 { HV_SIMD_FP_REG_Q21
, offsetof(CPUARMState
, vfp
.zregs
[21]) },
383 { HV_SIMD_FP_REG_Q22
, offsetof(CPUARMState
, vfp
.zregs
[22]) },
384 { HV_SIMD_FP_REG_Q23
, offsetof(CPUARMState
, vfp
.zregs
[23]) },
385 { HV_SIMD_FP_REG_Q24
, offsetof(CPUARMState
, vfp
.zregs
[24]) },
386 { HV_SIMD_FP_REG_Q25
, offsetof(CPUARMState
, vfp
.zregs
[25]) },
387 { HV_SIMD_FP_REG_Q26
, offsetof(CPUARMState
, vfp
.zregs
[26]) },
388 { HV_SIMD_FP_REG_Q27
, offsetof(CPUARMState
, vfp
.zregs
[27]) },
389 { HV_SIMD_FP_REG_Q28
, offsetof(CPUARMState
, vfp
.zregs
[28]) },
390 { HV_SIMD_FP_REG_Q29
, offsetof(CPUARMState
, vfp
.zregs
[29]) },
391 { HV_SIMD_FP_REG_Q30
, offsetof(CPUARMState
, vfp
.zregs
[30]) },
392 { HV_SIMD_FP_REG_Q31
, offsetof(CPUARMState
, vfp
.zregs
[31]) },
395 struct hvf_sreg_match
{
401 static struct hvf_sreg_match hvf_sreg_match
[] = {
402 { HV_SYS_REG_DBGBVR0_EL1
, HVF_SYSREG(0, 0, 2, 0, 4) },
403 { HV_SYS_REG_DBGBCR0_EL1
, HVF_SYSREG(0, 0, 2, 0, 5) },
404 { HV_SYS_REG_DBGWVR0_EL1
, HVF_SYSREG(0, 0, 2, 0, 6) },
405 { HV_SYS_REG_DBGWCR0_EL1
, HVF_SYSREG(0, 0, 2, 0, 7) },
407 { HV_SYS_REG_DBGBVR1_EL1
, HVF_SYSREG(0, 1, 2, 0, 4) },
408 { HV_SYS_REG_DBGBCR1_EL1
, HVF_SYSREG(0, 1, 2, 0, 5) },
409 { HV_SYS_REG_DBGWVR1_EL1
, HVF_SYSREG(0, 1, 2, 0, 6) },
410 { HV_SYS_REG_DBGWCR1_EL1
, HVF_SYSREG(0, 1, 2, 0, 7) },
412 { HV_SYS_REG_DBGBVR2_EL1
, HVF_SYSREG(0, 2, 2, 0, 4) },
413 { HV_SYS_REG_DBGBCR2_EL1
, HVF_SYSREG(0, 2, 2, 0, 5) },
414 { HV_SYS_REG_DBGWVR2_EL1
, HVF_SYSREG(0, 2, 2, 0, 6) },
415 { HV_SYS_REG_DBGWCR2_EL1
, HVF_SYSREG(0, 2, 2, 0, 7) },
417 { HV_SYS_REG_DBGBVR3_EL1
, HVF_SYSREG(0, 3, 2, 0, 4) },
418 { HV_SYS_REG_DBGBCR3_EL1
, HVF_SYSREG(0, 3, 2, 0, 5) },
419 { HV_SYS_REG_DBGWVR3_EL1
, HVF_SYSREG(0, 3, 2, 0, 6) },
420 { HV_SYS_REG_DBGWCR3_EL1
, HVF_SYSREG(0, 3, 2, 0, 7) },
422 { HV_SYS_REG_DBGBVR4_EL1
, HVF_SYSREG(0, 4, 2, 0, 4) },
423 { HV_SYS_REG_DBGBCR4_EL1
, HVF_SYSREG(0, 4, 2, 0, 5) },
424 { HV_SYS_REG_DBGWVR4_EL1
, HVF_SYSREG(0, 4, 2, 0, 6) },
425 { HV_SYS_REG_DBGWCR4_EL1
, HVF_SYSREG(0, 4, 2, 0, 7) },
427 { HV_SYS_REG_DBGBVR5_EL1
, HVF_SYSREG(0, 5, 2, 0, 4) },
428 { HV_SYS_REG_DBGBCR5_EL1
, HVF_SYSREG(0, 5, 2, 0, 5) },
429 { HV_SYS_REG_DBGWVR5_EL1
, HVF_SYSREG(0, 5, 2, 0, 6) },
430 { HV_SYS_REG_DBGWCR5_EL1
, HVF_SYSREG(0, 5, 2, 0, 7) },
432 { HV_SYS_REG_DBGBVR6_EL1
, HVF_SYSREG(0, 6, 2, 0, 4) },
433 { HV_SYS_REG_DBGBCR6_EL1
, HVF_SYSREG(0, 6, 2, 0, 5) },
434 { HV_SYS_REG_DBGWVR6_EL1
, HVF_SYSREG(0, 6, 2, 0, 6) },
435 { HV_SYS_REG_DBGWCR6_EL1
, HVF_SYSREG(0, 6, 2, 0, 7) },
437 { HV_SYS_REG_DBGBVR7_EL1
, HVF_SYSREG(0, 7, 2, 0, 4) },
438 { HV_SYS_REG_DBGBCR7_EL1
, HVF_SYSREG(0, 7, 2, 0, 5) },
439 { HV_SYS_REG_DBGWVR7_EL1
, HVF_SYSREG(0, 7, 2, 0, 6) },
440 { HV_SYS_REG_DBGWCR7_EL1
, HVF_SYSREG(0, 7, 2, 0, 7) },
442 { HV_SYS_REG_DBGBVR8_EL1
, HVF_SYSREG(0, 8, 2, 0, 4) },
443 { HV_SYS_REG_DBGBCR8_EL1
, HVF_SYSREG(0, 8, 2, 0, 5) },
444 { HV_SYS_REG_DBGWVR8_EL1
, HVF_SYSREG(0, 8, 2, 0, 6) },
445 { HV_SYS_REG_DBGWCR8_EL1
, HVF_SYSREG(0, 8, 2, 0, 7) },
447 { HV_SYS_REG_DBGBVR9_EL1
, HVF_SYSREG(0, 9, 2, 0, 4) },
448 { HV_SYS_REG_DBGBCR9_EL1
, HVF_SYSREG(0, 9, 2, 0, 5) },
449 { HV_SYS_REG_DBGWVR9_EL1
, HVF_SYSREG(0, 9, 2, 0, 6) },
450 { HV_SYS_REG_DBGWCR9_EL1
, HVF_SYSREG(0, 9, 2, 0, 7) },
452 { HV_SYS_REG_DBGBVR10_EL1
, HVF_SYSREG(0, 10, 2, 0, 4) },
453 { HV_SYS_REG_DBGBCR10_EL1
, HVF_SYSREG(0, 10, 2, 0, 5) },
454 { HV_SYS_REG_DBGWVR10_EL1
, HVF_SYSREG(0, 10, 2, 0, 6) },
455 { HV_SYS_REG_DBGWCR10_EL1
, HVF_SYSREG(0, 10, 2, 0, 7) },
457 { HV_SYS_REG_DBGBVR11_EL1
, HVF_SYSREG(0, 11, 2, 0, 4) },
458 { HV_SYS_REG_DBGBCR11_EL1
, HVF_SYSREG(0, 11, 2, 0, 5) },
459 { HV_SYS_REG_DBGWVR11_EL1
, HVF_SYSREG(0, 11, 2, 0, 6) },
460 { HV_SYS_REG_DBGWCR11_EL1
, HVF_SYSREG(0, 11, 2, 0, 7) },
462 { HV_SYS_REG_DBGBVR12_EL1
, HVF_SYSREG(0, 12, 2, 0, 4) },
463 { HV_SYS_REG_DBGBCR12_EL1
, HVF_SYSREG(0, 12, 2, 0, 5) },
464 { HV_SYS_REG_DBGWVR12_EL1
, HVF_SYSREG(0, 12, 2, 0, 6) },
465 { HV_SYS_REG_DBGWCR12_EL1
, HVF_SYSREG(0, 12, 2, 0, 7) },
467 { HV_SYS_REG_DBGBVR13_EL1
, HVF_SYSREG(0, 13, 2, 0, 4) },
468 { HV_SYS_REG_DBGBCR13_EL1
, HVF_SYSREG(0, 13, 2, 0, 5) },
469 { HV_SYS_REG_DBGWVR13_EL1
, HVF_SYSREG(0, 13, 2, 0, 6) },
470 { HV_SYS_REG_DBGWCR13_EL1
, HVF_SYSREG(0, 13, 2, 0, 7) },
472 { HV_SYS_REG_DBGBVR14_EL1
, HVF_SYSREG(0, 14, 2, 0, 4) },
473 { HV_SYS_REG_DBGBCR14_EL1
, HVF_SYSREG(0, 14, 2, 0, 5) },
474 { HV_SYS_REG_DBGWVR14_EL1
, HVF_SYSREG(0, 14, 2, 0, 6) },
475 { HV_SYS_REG_DBGWCR14_EL1
, HVF_SYSREG(0, 14, 2, 0, 7) },
477 { HV_SYS_REG_DBGBVR15_EL1
, HVF_SYSREG(0, 15, 2, 0, 4) },
478 { HV_SYS_REG_DBGBCR15_EL1
, HVF_SYSREG(0, 15, 2, 0, 5) },
479 { HV_SYS_REG_DBGWVR15_EL1
, HVF_SYSREG(0, 15, 2, 0, 6) },
480 { HV_SYS_REG_DBGWCR15_EL1
, HVF_SYSREG(0, 15, 2, 0, 7) },
482 #ifdef SYNC_NO_RAW_REGS
484 * The registers below are manually synced on init because they are
485 * marked as NO_RAW. We still list them to make number space sync easier.
487 { HV_SYS_REG_MDCCINT_EL1
, HVF_SYSREG(0, 2, 2, 0, 0) },
488 { HV_SYS_REG_MIDR_EL1
, HVF_SYSREG(0, 0, 3, 0, 0) },
489 { HV_SYS_REG_MPIDR_EL1
, HVF_SYSREG(0, 0, 3, 0, 5) },
490 { HV_SYS_REG_ID_AA64PFR0_EL1
, HVF_SYSREG(0, 4, 3, 0, 0) },
492 { HV_SYS_REG_ID_AA64PFR1_EL1
, HVF_SYSREG(0, 4, 3, 0, 1) },
493 { HV_SYS_REG_ID_AA64DFR0_EL1
, HVF_SYSREG(0, 5, 3, 0, 0) },
494 { HV_SYS_REG_ID_AA64DFR1_EL1
, HVF_SYSREG(0, 5, 3, 0, 1) },
495 { HV_SYS_REG_ID_AA64ISAR0_EL1
, HVF_SYSREG(0, 6, 3, 0, 0) },
496 { HV_SYS_REG_ID_AA64ISAR1_EL1
, HVF_SYSREG(0, 6, 3, 0, 1) },
498 /* We keep the hardware MMFR0 around. HW limits are there anyway */
499 { HV_SYS_REG_ID_AA64MMFR0_EL1
, HVF_SYSREG(0, 7, 3, 0, 0) },
501 { HV_SYS_REG_ID_AA64MMFR1_EL1
, HVF_SYSREG(0, 7, 3, 0, 1) },
502 { HV_SYS_REG_ID_AA64MMFR2_EL1
, HVF_SYSREG(0, 7, 3, 0, 2) },
503 /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
505 { HV_SYS_REG_MDSCR_EL1
, HVF_SYSREG(0, 2, 2, 0, 2) },
506 { HV_SYS_REG_SCTLR_EL1
, HVF_SYSREG(1, 0, 3, 0, 0) },
507 { HV_SYS_REG_CPACR_EL1
, HVF_SYSREG(1, 0, 3, 0, 2) },
508 { HV_SYS_REG_TTBR0_EL1
, HVF_SYSREG(2, 0, 3, 0, 0) },
509 { HV_SYS_REG_TTBR1_EL1
, HVF_SYSREG(2, 0, 3, 0, 1) },
510 { HV_SYS_REG_TCR_EL1
, HVF_SYSREG(2, 0, 3, 0, 2) },
512 { HV_SYS_REG_APIAKEYLO_EL1
, HVF_SYSREG(2, 1, 3, 0, 0) },
513 { HV_SYS_REG_APIAKEYHI_EL1
, HVF_SYSREG(2, 1, 3, 0, 1) },
514 { HV_SYS_REG_APIBKEYLO_EL1
, HVF_SYSREG(2, 1, 3, 0, 2) },
515 { HV_SYS_REG_APIBKEYHI_EL1
, HVF_SYSREG(2, 1, 3, 0, 3) },
516 { HV_SYS_REG_APDAKEYLO_EL1
, HVF_SYSREG(2, 2, 3, 0, 0) },
517 { HV_SYS_REG_APDAKEYHI_EL1
, HVF_SYSREG(2, 2, 3, 0, 1) },
518 { HV_SYS_REG_APDBKEYLO_EL1
, HVF_SYSREG(2, 2, 3, 0, 2) },
519 { HV_SYS_REG_APDBKEYHI_EL1
, HVF_SYSREG(2, 2, 3, 0, 3) },
520 { HV_SYS_REG_APGAKEYLO_EL1
, HVF_SYSREG(2, 3, 3, 0, 0) },
521 { HV_SYS_REG_APGAKEYHI_EL1
, HVF_SYSREG(2, 3, 3, 0, 1) },
523 { HV_SYS_REG_SPSR_EL1
, HVF_SYSREG(4, 0, 3, 0, 0) },
524 { HV_SYS_REG_ELR_EL1
, HVF_SYSREG(4, 0, 3, 0, 1) },
525 { HV_SYS_REG_SP_EL0
, HVF_SYSREG(4, 1, 3, 0, 0) },
526 { HV_SYS_REG_AFSR0_EL1
, HVF_SYSREG(5, 1, 3, 0, 0) },
527 { HV_SYS_REG_AFSR1_EL1
, HVF_SYSREG(5, 1, 3, 0, 1) },
528 { HV_SYS_REG_ESR_EL1
, HVF_SYSREG(5, 2, 3, 0, 0) },
529 { HV_SYS_REG_FAR_EL1
, HVF_SYSREG(6, 0, 3, 0, 0) },
530 { HV_SYS_REG_PAR_EL1
, HVF_SYSREG(7, 4, 3, 0, 0) },
531 { HV_SYS_REG_MAIR_EL1
, HVF_SYSREG(10, 2, 3, 0, 0) },
532 { HV_SYS_REG_AMAIR_EL1
, HVF_SYSREG(10, 3, 3, 0, 0) },
533 { HV_SYS_REG_VBAR_EL1
, HVF_SYSREG(12, 0, 3, 0, 0) },
534 { HV_SYS_REG_CONTEXTIDR_EL1
, HVF_SYSREG(13, 0, 3, 0, 1) },
535 { HV_SYS_REG_TPIDR_EL1
, HVF_SYSREG(13, 0, 3, 0, 4) },
536 { HV_SYS_REG_CNTKCTL_EL1
, HVF_SYSREG(14, 1, 3, 0, 0) },
537 { HV_SYS_REG_CSSELR_EL1
, HVF_SYSREG(0, 0, 3, 2, 0) },
538 { HV_SYS_REG_TPIDR_EL0
, HVF_SYSREG(13, 0, 3, 3, 2) },
539 { HV_SYS_REG_TPIDRRO_EL0
, HVF_SYSREG(13, 0, 3, 3, 3) },
540 { HV_SYS_REG_CNTV_CTL_EL0
, HVF_SYSREG(14, 3, 3, 3, 1) },
541 { HV_SYS_REG_CNTV_CVAL_EL0
, HVF_SYSREG(14, 3, 3, 3, 2) },
542 { HV_SYS_REG_SP_EL1
, HVF_SYSREG(4, 1, 3, 4, 0) },
545 int hvf_get_registers(CPUState
*cpu
)
547 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
548 CPUARMState
*env
= &arm_cpu
->env
;
551 hv_simd_fp_uchar16_t fpval
;
554 for (i
= 0; i
< ARRAY_SIZE(hvf_reg_match
); i
++) {
555 ret
= hv_vcpu_get_reg(cpu
->accel
->fd
, hvf_reg_match
[i
].reg
, &val
);
556 *(uint64_t *)((void *)env
+ hvf_reg_match
[i
].offset
) = val
;
560 for (i
= 0; i
< ARRAY_SIZE(hvf_fpreg_match
); i
++) {
561 ret
= hv_vcpu_get_simd_fp_reg(cpu
->accel
->fd
, hvf_fpreg_match
[i
].reg
,
563 memcpy((void *)env
+ hvf_fpreg_match
[i
].offset
, &fpval
, sizeof(fpval
));
568 ret
= hv_vcpu_get_reg(cpu
->accel
->fd
, HV_REG_FPCR
, &val
);
570 vfp_set_fpcr(env
, val
);
573 ret
= hv_vcpu_get_reg(cpu
->accel
->fd
, HV_REG_FPSR
, &val
);
575 vfp_set_fpsr(env
, val
);
577 ret
= hv_vcpu_get_reg(cpu
->accel
->fd
, HV_REG_CPSR
, &val
);
579 pstate_write(env
, val
);
581 for (i
= 0; i
< ARRAY_SIZE(hvf_sreg_match
); i
++) {
582 if (hvf_sreg_match
[i
].cp_idx
== -1) {
586 if (cpu
->accel
->guest_debug_enabled
) {
587 /* Handle debug registers */
588 switch (hvf_sreg_match
[i
].reg
) {
589 case HV_SYS_REG_DBGBVR0_EL1
:
590 case HV_SYS_REG_DBGBCR0_EL1
:
591 case HV_SYS_REG_DBGWVR0_EL1
:
592 case HV_SYS_REG_DBGWCR0_EL1
:
593 case HV_SYS_REG_DBGBVR1_EL1
:
594 case HV_SYS_REG_DBGBCR1_EL1
:
595 case HV_SYS_REG_DBGWVR1_EL1
:
596 case HV_SYS_REG_DBGWCR1_EL1
:
597 case HV_SYS_REG_DBGBVR2_EL1
:
598 case HV_SYS_REG_DBGBCR2_EL1
:
599 case HV_SYS_REG_DBGWVR2_EL1
:
600 case HV_SYS_REG_DBGWCR2_EL1
:
601 case HV_SYS_REG_DBGBVR3_EL1
:
602 case HV_SYS_REG_DBGBCR3_EL1
:
603 case HV_SYS_REG_DBGWVR3_EL1
:
604 case HV_SYS_REG_DBGWCR3_EL1
:
605 case HV_SYS_REG_DBGBVR4_EL1
:
606 case HV_SYS_REG_DBGBCR4_EL1
:
607 case HV_SYS_REG_DBGWVR4_EL1
:
608 case HV_SYS_REG_DBGWCR4_EL1
:
609 case HV_SYS_REG_DBGBVR5_EL1
:
610 case HV_SYS_REG_DBGBCR5_EL1
:
611 case HV_SYS_REG_DBGWVR5_EL1
:
612 case HV_SYS_REG_DBGWCR5_EL1
:
613 case HV_SYS_REG_DBGBVR6_EL1
:
614 case HV_SYS_REG_DBGBCR6_EL1
:
615 case HV_SYS_REG_DBGWVR6_EL1
:
616 case HV_SYS_REG_DBGWCR6_EL1
:
617 case HV_SYS_REG_DBGBVR7_EL1
:
618 case HV_SYS_REG_DBGBCR7_EL1
:
619 case HV_SYS_REG_DBGWVR7_EL1
:
620 case HV_SYS_REG_DBGWCR7_EL1
:
621 case HV_SYS_REG_DBGBVR8_EL1
:
622 case HV_SYS_REG_DBGBCR8_EL1
:
623 case HV_SYS_REG_DBGWVR8_EL1
:
624 case HV_SYS_REG_DBGWCR8_EL1
:
625 case HV_SYS_REG_DBGBVR9_EL1
:
626 case HV_SYS_REG_DBGBCR9_EL1
:
627 case HV_SYS_REG_DBGWVR9_EL1
:
628 case HV_SYS_REG_DBGWCR9_EL1
:
629 case HV_SYS_REG_DBGBVR10_EL1
:
630 case HV_SYS_REG_DBGBCR10_EL1
:
631 case HV_SYS_REG_DBGWVR10_EL1
:
632 case HV_SYS_REG_DBGWCR10_EL1
:
633 case HV_SYS_REG_DBGBVR11_EL1
:
634 case HV_SYS_REG_DBGBCR11_EL1
:
635 case HV_SYS_REG_DBGWVR11_EL1
:
636 case HV_SYS_REG_DBGWCR11_EL1
:
637 case HV_SYS_REG_DBGBVR12_EL1
:
638 case HV_SYS_REG_DBGBCR12_EL1
:
639 case HV_SYS_REG_DBGWVR12_EL1
:
640 case HV_SYS_REG_DBGWCR12_EL1
:
641 case HV_SYS_REG_DBGBVR13_EL1
:
642 case HV_SYS_REG_DBGBCR13_EL1
:
643 case HV_SYS_REG_DBGWVR13_EL1
:
644 case HV_SYS_REG_DBGWCR13_EL1
:
645 case HV_SYS_REG_DBGBVR14_EL1
:
646 case HV_SYS_REG_DBGBCR14_EL1
:
647 case HV_SYS_REG_DBGWVR14_EL1
:
648 case HV_SYS_REG_DBGWCR14_EL1
:
649 case HV_SYS_REG_DBGBVR15_EL1
:
650 case HV_SYS_REG_DBGBCR15_EL1
:
651 case HV_SYS_REG_DBGWVR15_EL1
:
652 case HV_SYS_REG_DBGWCR15_EL1
: {
654 * If the guest is being debugged, the vCPU's debug registers
655 * are holding the gdbstub's view of the registers (set in
656 * hvf_arch_update_guest_debug()).
657 * Since the environment is used to store only the guest's view
658 * of the registers, don't update it with the values from the
659 * vCPU but simply keep the values from the previous
662 const ARMCPRegInfo
*ri
;
663 ri
= get_arm_cp_reginfo(arm_cpu
->cp_regs
, hvf_sreg_match
[i
].key
);
664 val
= read_raw_cp_reg(env
, ri
);
666 arm_cpu
->cpreg_values
[hvf_sreg_match
[i
].cp_idx
] = val
;
672 ret
= hv_vcpu_get_sys_reg(cpu
->accel
->fd
, hvf_sreg_match
[i
].reg
, &val
);
675 arm_cpu
->cpreg_values
[hvf_sreg_match
[i
].cp_idx
] = val
;
677 assert(write_list_to_cpustate(arm_cpu
));
679 aarch64_restore_sp(env
, arm_current_el(env
));
684 int hvf_put_registers(CPUState
*cpu
)
686 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
687 CPUARMState
*env
= &arm_cpu
->env
;
690 hv_simd_fp_uchar16_t fpval
;
693 for (i
= 0; i
< ARRAY_SIZE(hvf_reg_match
); i
++) {
694 val
= *(uint64_t *)((void *)env
+ hvf_reg_match
[i
].offset
);
695 ret
= hv_vcpu_set_reg(cpu
->accel
->fd
, hvf_reg_match
[i
].reg
, val
);
699 for (i
= 0; i
< ARRAY_SIZE(hvf_fpreg_match
); i
++) {
700 memcpy(&fpval
, (void *)env
+ hvf_fpreg_match
[i
].offset
, sizeof(fpval
));
701 ret
= hv_vcpu_set_simd_fp_reg(cpu
->accel
->fd
, hvf_fpreg_match
[i
].reg
,
706 ret
= hv_vcpu_set_reg(cpu
->accel
->fd
, HV_REG_FPCR
, vfp_get_fpcr(env
));
709 ret
= hv_vcpu_set_reg(cpu
->accel
->fd
, HV_REG_FPSR
, vfp_get_fpsr(env
));
712 ret
= hv_vcpu_set_reg(cpu
->accel
->fd
, HV_REG_CPSR
, pstate_read(env
));
715 aarch64_save_sp(env
, arm_current_el(env
));
717 assert(write_cpustate_to_list(arm_cpu
, false));
718 for (i
= 0; i
< ARRAY_SIZE(hvf_sreg_match
); i
++) {
719 if (hvf_sreg_match
[i
].cp_idx
== -1) {
723 if (cpu
->accel
->guest_debug_enabled
) {
724 /* Handle debug registers */
725 switch (hvf_sreg_match
[i
].reg
) {
726 case HV_SYS_REG_DBGBVR0_EL1
:
727 case HV_SYS_REG_DBGBCR0_EL1
:
728 case HV_SYS_REG_DBGWVR0_EL1
:
729 case HV_SYS_REG_DBGWCR0_EL1
:
730 case HV_SYS_REG_DBGBVR1_EL1
:
731 case HV_SYS_REG_DBGBCR1_EL1
:
732 case HV_SYS_REG_DBGWVR1_EL1
:
733 case HV_SYS_REG_DBGWCR1_EL1
:
734 case HV_SYS_REG_DBGBVR2_EL1
:
735 case HV_SYS_REG_DBGBCR2_EL1
:
736 case HV_SYS_REG_DBGWVR2_EL1
:
737 case HV_SYS_REG_DBGWCR2_EL1
:
738 case HV_SYS_REG_DBGBVR3_EL1
:
739 case HV_SYS_REG_DBGBCR3_EL1
:
740 case HV_SYS_REG_DBGWVR3_EL1
:
741 case HV_SYS_REG_DBGWCR3_EL1
:
742 case HV_SYS_REG_DBGBVR4_EL1
:
743 case HV_SYS_REG_DBGBCR4_EL1
:
744 case HV_SYS_REG_DBGWVR4_EL1
:
745 case HV_SYS_REG_DBGWCR4_EL1
:
746 case HV_SYS_REG_DBGBVR5_EL1
:
747 case HV_SYS_REG_DBGBCR5_EL1
:
748 case HV_SYS_REG_DBGWVR5_EL1
:
749 case HV_SYS_REG_DBGWCR5_EL1
:
750 case HV_SYS_REG_DBGBVR6_EL1
:
751 case HV_SYS_REG_DBGBCR6_EL1
:
752 case HV_SYS_REG_DBGWVR6_EL1
:
753 case HV_SYS_REG_DBGWCR6_EL1
:
754 case HV_SYS_REG_DBGBVR7_EL1
:
755 case HV_SYS_REG_DBGBCR7_EL1
:
756 case HV_SYS_REG_DBGWVR7_EL1
:
757 case HV_SYS_REG_DBGWCR7_EL1
:
758 case HV_SYS_REG_DBGBVR8_EL1
:
759 case HV_SYS_REG_DBGBCR8_EL1
:
760 case HV_SYS_REG_DBGWVR8_EL1
:
761 case HV_SYS_REG_DBGWCR8_EL1
:
762 case HV_SYS_REG_DBGBVR9_EL1
:
763 case HV_SYS_REG_DBGBCR9_EL1
:
764 case HV_SYS_REG_DBGWVR9_EL1
:
765 case HV_SYS_REG_DBGWCR9_EL1
:
766 case HV_SYS_REG_DBGBVR10_EL1
:
767 case HV_SYS_REG_DBGBCR10_EL1
:
768 case HV_SYS_REG_DBGWVR10_EL1
:
769 case HV_SYS_REG_DBGWCR10_EL1
:
770 case HV_SYS_REG_DBGBVR11_EL1
:
771 case HV_SYS_REG_DBGBCR11_EL1
:
772 case HV_SYS_REG_DBGWVR11_EL1
:
773 case HV_SYS_REG_DBGWCR11_EL1
:
774 case HV_SYS_REG_DBGBVR12_EL1
:
775 case HV_SYS_REG_DBGBCR12_EL1
:
776 case HV_SYS_REG_DBGWVR12_EL1
:
777 case HV_SYS_REG_DBGWCR12_EL1
:
778 case HV_SYS_REG_DBGBVR13_EL1
:
779 case HV_SYS_REG_DBGBCR13_EL1
:
780 case HV_SYS_REG_DBGWVR13_EL1
:
781 case HV_SYS_REG_DBGWCR13_EL1
:
782 case HV_SYS_REG_DBGBVR14_EL1
:
783 case HV_SYS_REG_DBGBCR14_EL1
:
784 case HV_SYS_REG_DBGWVR14_EL1
:
785 case HV_SYS_REG_DBGWCR14_EL1
:
786 case HV_SYS_REG_DBGBVR15_EL1
:
787 case HV_SYS_REG_DBGBCR15_EL1
:
788 case HV_SYS_REG_DBGWVR15_EL1
:
789 case HV_SYS_REG_DBGWCR15_EL1
:
791 * If the guest is being debugged, the vCPU's debug registers
792 * are already holding the gdbstub's view of the registers (set
793 * in hvf_arch_update_guest_debug()).
799 val
= arm_cpu
->cpreg_values
[hvf_sreg_match
[i
].cp_idx
];
800 ret
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, hvf_sreg_match
[i
].reg
, val
);
804 ret
= hv_vcpu_set_vtimer_offset(cpu
->accel
->fd
, hvf_state
->vtimer_offset
);
810 static void flush_cpu_state(CPUState
*cpu
)
812 if (cpu
->accel
->dirty
) {
813 hvf_put_registers(cpu
);
814 cpu
->accel
->dirty
= false;
818 static void hvf_set_reg(CPUState
*cpu
, int rt
, uint64_t val
)
822 flush_cpu_state(cpu
);
825 r
= hv_vcpu_set_reg(cpu
->accel
->fd
, HV_REG_X0
+ rt
, val
);
830 static uint64_t hvf_get_reg(CPUState
*cpu
, int rt
)
835 flush_cpu_state(cpu
);
838 r
= hv_vcpu_get_reg(cpu
->accel
->fd
, HV_REG_X0
+ rt
, &val
);
845 static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0
)
847 uint32_t ipa_size
= chosen_ipa_bit_size
?
848 chosen_ipa_bit_size
: hvf_arm_get_max_ipa_bit_size();
850 /* Clamp down the PARange to the IPA size the kernel supports. */
851 uint8_t index
= round_down_to_parange_index(ipa_size
);
852 *id_aa64mmfr0
= (*id_aa64mmfr0
& ~R_ID_AA64MMFR0_PARANGE_MASK
) | index
;
855 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
857 ARMISARegisters host_isar
= {};
858 const struct isar_regs
{
862 { HV_SYS_REG_ID_AA64PFR0_EL1
, &host_isar
.id_aa64pfr0
},
863 { HV_SYS_REG_ID_AA64PFR1_EL1
, &host_isar
.id_aa64pfr1
},
864 { HV_SYS_REG_ID_AA64DFR0_EL1
, &host_isar
.id_aa64dfr0
},
865 { HV_SYS_REG_ID_AA64DFR1_EL1
, &host_isar
.id_aa64dfr1
},
866 { HV_SYS_REG_ID_AA64ISAR0_EL1
, &host_isar
.id_aa64isar0
},
867 { HV_SYS_REG_ID_AA64ISAR1_EL1
, &host_isar
.id_aa64isar1
},
868 /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
869 { HV_SYS_REG_ID_AA64MMFR0_EL1
, &host_isar
.id_aa64mmfr0
},
870 { HV_SYS_REG_ID_AA64MMFR1_EL1
, &host_isar
.id_aa64mmfr1
},
871 { HV_SYS_REG_ID_AA64MMFR2_EL1
, &host_isar
.id_aa64mmfr2
},
872 /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
875 hv_return_t r
= HV_SUCCESS
;
876 hv_vcpu_exit_t
*exit
;
879 ahcf
->dtb_compatible
= "arm,arm-v8";
880 ahcf
->features
= (1ULL << ARM_FEATURE_V8
) |
881 (1ULL << ARM_FEATURE_NEON
) |
882 (1ULL << ARM_FEATURE_AARCH64
) |
883 (1ULL << ARM_FEATURE_PMU
) |
884 (1ULL << ARM_FEATURE_GENERIC_TIMER
);
886 /* We set up a small vcpu to extract host registers */
888 if (hv_vcpu_create(&fd
, &exit
, NULL
) != HV_SUCCESS
) {
892 for (i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
893 r
|= hv_vcpu_get_sys_reg(fd
, regs
[i
].reg
, regs
[i
].val
);
895 r
|= hv_vcpu_get_sys_reg(fd
, HV_SYS_REG_MIDR_EL1
, &ahcf
->midr
);
896 r
|= hv_vcpu_destroy(fd
);
898 clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar
.id_aa64mmfr0
);
900 ahcf
->isar
= host_isar
;
903 * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
904 * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
906 ahcf
->reset_sctlr
= 0x30100180;
908 * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
909 * let's disable it on boot and then allow guest software to turn it on by
912 ahcf
->reset_sctlr
|= 0x00800000;
914 /* Make sure we don't advertise AArch32 support for EL0/EL1 */
915 if ((host_isar
.id_aa64pfr0
& 0xff) != 0x11) {
919 return r
== HV_SUCCESS
;
922 uint32_t hvf_arm_get_default_ipa_bit_size(void)
924 uint32_t default_ipa_size
;
925 hv_return_t ret
= hv_vm_config_get_default_ipa_size(&default_ipa_size
);
928 return default_ipa_size
;
931 uint32_t hvf_arm_get_max_ipa_bit_size(void)
933 uint32_t max_ipa_size
;
934 hv_return_t ret
= hv_vm_config_get_max_ipa_size(&max_ipa_size
);
938 * We clamp any IPA size we want to back the VM with to a valid PARange
939 * value so the guest doesn't try and map memory outside of the valid range.
940 * This logic just clamps the passed in IPA bit size to the first valid
941 * PARange value <= to it.
943 return round_down_to_parange_bit_size(max_ipa_size
);
946 void hvf_arm_set_cpu_features_from_host(ARMCPU
*cpu
)
948 if (!arm_host_cpu_features
.dtb_compatible
) {
949 if (!hvf_enabled() ||
950 !hvf_arm_get_host_cpu_features(&arm_host_cpu_features
)) {
952 * We can't report this error yet, so flag that we need to
953 * in arm_cpu_realizefn().
955 cpu
->host_cpu_probe_failed
= true;
960 cpu
->dtb_compatible
= arm_host_cpu_features
.dtb_compatible
;
961 cpu
->isar
= arm_host_cpu_features
.isar
;
962 cpu
->env
.features
= arm_host_cpu_features
.features
;
963 cpu
->midr
= arm_host_cpu_features
.midr
;
964 cpu
->reset_sctlr
= arm_host_cpu_features
.reset_sctlr
;
967 void hvf_arch_vcpu_destroy(CPUState
*cpu
)
971 hv_return_t
hvf_arch_vm_create(MachineState
*ms
, uint32_t pa_range
)
974 hv_vm_config_t config
= hv_vm_config_create();
976 ret
= hv_vm_config_set_ipa_size(config
, pa_range
);
977 if (ret
!= HV_SUCCESS
) {
980 chosen_ipa_bit_size
= pa_range
;
982 ret
= hv_vm_create(config
);
990 int hvf_arch_init_vcpu(CPUState
*cpu
)
992 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
993 CPUARMState
*env
= &arm_cpu
->env
;
994 uint32_t sregs_match_len
= ARRAY_SIZE(hvf_sreg_match
);
995 uint32_t sregs_cnt
= 0;
1000 env
->aarch64
= true;
1001 asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu
->gt_cntfrq_hz
));
1003 /* Allocate enough space for our sysreg sync */
1004 arm_cpu
->cpreg_indexes
= g_renew(uint64_t, arm_cpu
->cpreg_indexes
,
1006 arm_cpu
->cpreg_values
= g_renew(uint64_t, arm_cpu
->cpreg_values
,
1008 arm_cpu
->cpreg_vmstate_indexes
= g_renew(uint64_t,
1009 arm_cpu
->cpreg_vmstate_indexes
,
1011 arm_cpu
->cpreg_vmstate_values
= g_renew(uint64_t,
1012 arm_cpu
->cpreg_vmstate_values
,
1015 memset(arm_cpu
->cpreg_values
, 0, sregs_match_len
* sizeof(uint64_t));
1017 /* Populate cp list for all known sysregs */
1018 for (i
= 0; i
< sregs_match_len
; i
++) {
1019 const ARMCPRegInfo
*ri
;
1020 uint32_t key
= hvf_sreg_match
[i
].key
;
1022 ri
= get_arm_cp_reginfo(arm_cpu
->cp_regs
, key
);
1024 assert(!(ri
->type
& ARM_CP_NO_RAW
));
1025 hvf_sreg_match
[i
].cp_idx
= sregs_cnt
;
1026 arm_cpu
->cpreg_indexes
[sregs_cnt
++] = cpreg_to_kvm_id(key
);
1028 hvf_sreg_match
[i
].cp_idx
= -1;
1031 arm_cpu
->cpreg_array_len
= sregs_cnt
;
1032 arm_cpu
->cpreg_vmstate_array_len
= sregs_cnt
;
1034 assert(write_cpustate_to_list(arm_cpu
, false));
1036 /* Set CP_NO_RAW system registers on init */
1037 ret
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_MIDR_EL1
,
1041 ret
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_MPIDR_EL1
,
1042 arm_cpu
->mp_affinity
);
1045 ret
= hv_vcpu_get_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_ID_AA64PFR0_EL1
, &pfr
);
1047 pfr
|= env
->gicv3state
? (1 << 24) : 0;
1048 ret
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_ID_AA64PFR0_EL1
, pfr
);
1051 /* We're limited to underlying hardware caps, override internal versions */
1052 ret
= hv_vcpu_get_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_ID_AA64MMFR0_EL1
,
1053 &arm_cpu
->isar
.id_aa64mmfr0
);
1056 clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu
->isar
.id_aa64mmfr0
);
1057 ret
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_ID_AA64MMFR0_EL1
,
1058 arm_cpu
->isar
.id_aa64mmfr0
);
1064 void hvf_kick_vcpu_thread(CPUState
*cpu
)
1066 cpus_kick_thread(cpu
);
1067 hv_vcpus_exit(&cpu
->accel
->fd
, 1);
1070 static void hvf_raise_exception(CPUState
*cpu
, uint32_t excp
,
1073 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1074 CPUARMState
*env
= &arm_cpu
->env
;
1076 cpu
->exception_index
= excp
;
1077 env
->exception
.target_el
= 1;
1078 env
->exception
.syndrome
= syndrome
;
1080 arm_cpu_do_interrupt(cpu
);
1083 static void hvf_psci_cpu_off(ARMCPU
*arm_cpu
)
1085 int32_t ret
= arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu
));
1086 assert(ret
== QEMU_ARM_POWERCTL_RET_SUCCESS
);
1090 * Handle a PSCI call.
1092 * Returns 0 on success
1093 * -1 when the PSCI call is unknown,
1095 static bool hvf_handle_psci_call(CPUState
*cpu
)
1097 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1098 CPUARMState
*env
= &arm_cpu
->env
;
1099 uint64_t param
[4] = {
1105 uint64_t context_id
, mpidr
;
1106 bool target_aarch64
= true;
1107 CPUState
*target_cpu_state
;
1113 trace_hvf_psci_call(param
[0], param
[1], param
[2], param
[3],
1114 arm_cpu_mp_affinity(arm_cpu
));
1117 case QEMU_PSCI_0_2_FN_PSCI_VERSION
:
1118 ret
= QEMU_PSCI_VERSION_1_1
;
1120 case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE
:
1121 ret
= QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED
; /* No trusted OS */
1123 case QEMU_PSCI_0_2_FN_AFFINITY_INFO
:
1124 case QEMU_PSCI_0_2_FN64_AFFINITY_INFO
:
1129 target_cpu_state
= arm_get_cpu_by_id(mpidr
);
1130 if (!target_cpu_state
) {
1131 ret
= QEMU_PSCI_RET_INVALID_PARAMS
;
1134 target_cpu
= ARM_CPU(target_cpu_state
);
1136 ret
= target_cpu
->power_state
;
1139 /* Everything above affinity level 0 is always on. */
1143 case QEMU_PSCI_0_2_FN_SYSTEM_RESET
:
1144 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
1146 * QEMU reset and shutdown are async requests, but PSCI
1147 * mandates that we never return from the reset/shutdown
1148 * call, so power the CPU off now so it doesn't execute
1151 hvf_psci_cpu_off(arm_cpu
);
1153 case QEMU_PSCI_0_2_FN_SYSTEM_OFF
:
1154 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
1155 hvf_psci_cpu_off(arm_cpu
);
1157 case QEMU_PSCI_0_1_FN_CPU_ON
:
1158 case QEMU_PSCI_0_2_FN_CPU_ON
:
1159 case QEMU_PSCI_0_2_FN64_CPU_ON
:
1162 context_id
= param
[3];
1163 ret
= arm_set_cpu_on(mpidr
, entry
, context_id
,
1164 target_el
, target_aarch64
);
1166 case QEMU_PSCI_0_1_FN_CPU_OFF
:
1167 case QEMU_PSCI_0_2_FN_CPU_OFF
:
1168 hvf_psci_cpu_off(arm_cpu
);
1170 case QEMU_PSCI_0_1_FN_CPU_SUSPEND
:
1171 case QEMU_PSCI_0_2_FN_CPU_SUSPEND
:
1172 case QEMU_PSCI_0_2_FN64_CPU_SUSPEND
:
1173 /* Affinity levels are not supported in QEMU */
1174 if (param
[1] & 0xfffe0000) {
1175 ret
= QEMU_PSCI_RET_INVALID_PARAMS
;
1178 /* Powerdown is not supported, we always go into WFI */
1182 case QEMU_PSCI_0_1_FN_MIGRATE
:
1183 case QEMU_PSCI_0_2_FN_MIGRATE
:
1184 ret
= QEMU_PSCI_RET_NOT_SUPPORTED
;
1186 case QEMU_PSCI_1_0_FN_PSCI_FEATURES
:
1188 case QEMU_PSCI_0_2_FN_PSCI_VERSION
:
1189 case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE
:
1190 case QEMU_PSCI_0_2_FN_AFFINITY_INFO
:
1191 case QEMU_PSCI_0_2_FN64_AFFINITY_INFO
:
1192 case QEMU_PSCI_0_2_FN_SYSTEM_RESET
:
1193 case QEMU_PSCI_0_2_FN_SYSTEM_OFF
:
1194 case QEMU_PSCI_0_1_FN_CPU_ON
:
1195 case QEMU_PSCI_0_2_FN_CPU_ON
:
1196 case QEMU_PSCI_0_2_FN64_CPU_ON
:
1197 case QEMU_PSCI_0_1_FN_CPU_OFF
:
1198 case QEMU_PSCI_0_2_FN_CPU_OFF
:
1199 case QEMU_PSCI_0_1_FN_CPU_SUSPEND
:
1200 case QEMU_PSCI_0_2_FN_CPU_SUSPEND
:
1201 case QEMU_PSCI_0_2_FN64_CPU_SUSPEND
:
1202 case QEMU_PSCI_1_0_FN_PSCI_FEATURES
:
1205 case QEMU_PSCI_0_1_FN_MIGRATE
:
1206 case QEMU_PSCI_0_2_FN_MIGRATE
:
1208 ret
= QEMU_PSCI_RET_NOT_SUPPORTED
;
1215 env
->xregs
[0] = ret
;
1219 static bool is_id_sysreg(uint32_t reg
)
1221 return SYSREG_OP0(reg
) == 3 &&
1222 SYSREG_OP1(reg
) == 0 &&
1223 SYSREG_CRN(reg
) == 0 &&
1224 SYSREG_CRM(reg
) >= 1 &&
1225 SYSREG_CRM(reg
) < 8;
1228 static uint32_t hvf_reg2cp_reg(uint32_t reg
)
1230 return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
1231 (reg
>> SYSREG_CRN_SHIFT
) & SYSREG_CRN_MASK
,
1232 (reg
>> SYSREG_CRM_SHIFT
) & SYSREG_CRM_MASK
,
1233 (reg
>> SYSREG_OP0_SHIFT
) & SYSREG_OP0_MASK
,
1234 (reg
>> SYSREG_OP1_SHIFT
) & SYSREG_OP1_MASK
,
1235 (reg
>> SYSREG_OP2_SHIFT
) & SYSREG_OP2_MASK
);
1238 static bool hvf_sysreg_read_cp(CPUState
*cpu
, uint32_t reg
, uint64_t *val
)
1240 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1241 CPUARMState
*env
= &arm_cpu
->env
;
1242 const ARMCPRegInfo
*ri
;
1244 ri
= get_arm_cp_reginfo(arm_cpu
->cp_regs
, hvf_reg2cp_reg(reg
));
1247 if (ri
->accessfn(env
, ri
, true) != CP_ACCESS_OK
) {
1251 if (ri
->type
& ARM_CP_CONST
) {
1252 *val
= ri
->resetvalue
;
1253 } else if (ri
->readfn
) {
1254 *val
= ri
->readfn(env
, ri
);
1256 *val
= CPREG_FIELD64(env
, ri
);
1258 trace_hvf_vgic_read(ri
->name
, *val
);
1265 static int hvf_sysreg_read(CPUState
*cpu
, uint32_t reg
, uint64_t *val
)
1267 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1268 CPUARMState
*env
= &arm_cpu
->env
;
1270 if (arm_feature(env
, ARM_FEATURE_PMU
)) {
1272 case SYSREG_PMCR_EL0
:
1273 *val
= env
->cp15
.c9_pmcr
;
1275 case SYSREG_PMCCNTR_EL0
:
1277 *val
= env
->cp15
.c15_ccnt
;
1280 case SYSREG_PMCNTENCLR_EL0
:
1281 *val
= env
->cp15
.c9_pmcnten
;
1283 case SYSREG_PMOVSCLR_EL0
:
1284 *val
= env
->cp15
.c9_pmovsr
;
1286 case SYSREG_PMSELR_EL0
:
1287 *val
= env
->cp15
.c9_pmselr
;
1289 case SYSREG_PMINTENCLR_EL1
:
1290 *val
= env
->cp15
.c9_pminten
;
1292 case SYSREG_PMCCFILTR_EL0
:
1293 *val
= env
->cp15
.pmccfiltr_el0
;
1295 case SYSREG_PMCNTENSET_EL0
:
1296 *val
= env
->cp15
.c9_pmcnten
;
1298 case SYSREG_PMUSERENR_EL0
:
1299 *val
= env
->cp15
.c9_pmuserenr
;
1301 case SYSREG_PMCEID0_EL0
:
1302 case SYSREG_PMCEID1_EL0
:
1303 /* We can't really count anything yet, declare all events invalid */
1310 case SYSREG_CNTPCT_EL0
:
1311 *val
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) /
1312 gt_cntfrq_period_ns(arm_cpu
);
1314 case SYSREG_OSLSR_EL1
:
1315 *val
= env
->cp15
.oslsr_el1
;
1317 case SYSREG_OSDLR_EL1
:
1318 /* Dummy register */
1320 case SYSREG_ICC_AP0R0_EL1
:
1321 case SYSREG_ICC_AP0R1_EL1
:
1322 case SYSREG_ICC_AP0R2_EL1
:
1323 case SYSREG_ICC_AP0R3_EL1
:
1324 case SYSREG_ICC_AP1R0_EL1
:
1325 case SYSREG_ICC_AP1R1_EL1
:
1326 case SYSREG_ICC_AP1R2_EL1
:
1327 case SYSREG_ICC_AP1R3_EL1
:
1328 case SYSREG_ICC_ASGI1R_EL1
:
1329 case SYSREG_ICC_BPR0_EL1
:
1330 case SYSREG_ICC_BPR1_EL1
:
1331 case SYSREG_ICC_DIR_EL1
:
1332 case SYSREG_ICC_EOIR0_EL1
:
1333 case SYSREG_ICC_EOIR1_EL1
:
1334 case SYSREG_ICC_HPPIR0_EL1
:
1335 case SYSREG_ICC_HPPIR1_EL1
:
1336 case SYSREG_ICC_IAR0_EL1
:
1337 case SYSREG_ICC_IAR1_EL1
:
1338 case SYSREG_ICC_IGRPEN0_EL1
:
1339 case SYSREG_ICC_IGRPEN1_EL1
:
1340 case SYSREG_ICC_PMR_EL1
:
1341 case SYSREG_ICC_SGI0R_EL1
:
1342 case SYSREG_ICC_SGI1R_EL1
:
1343 case SYSREG_ICC_SRE_EL1
:
1344 case SYSREG_ICC_CTLR_EL1
:
1345 /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1346 if (hvf_sysreg_read_cp(cpu
, reg
, val
)) {
1350 case SYSREG_DBGBVR0_EL1
:
1351 case SYSREG_DBGBVR1_EL1
:
1352 case SYSREG_DBGBVR2_EL1
:
1353 case SYSREG_DBGBVR3_EL1
:
1354 case SYSREG_DBGBVR4_EL1
:
1355 case SYSREG_DBGBVR5_EL1
:
1356 case SYSREG_DBGBVR6_EL1
:
1357 case SYSREG_DBGBVR7_EL1
:
1358 case SYSREG_DBGBVR8_EL1
:
1359 case SYSREG_DBGBVR9_EL1
:
1360 case SYSREG_DBGBVR10_EL1
:
1361 case SYSREG_DBGBVR11_EL1
:
1362 case SYSREG_DBGBVR12_EL1
:
1363 case SYSREG_DBGBVR13_EL1
:
1364 case SYSREG_DBGBVR14_EL1
:
1365 case SYSREG_DBGBVR15_EL1
:
1366 *val
= env
->cp15
.dbgbvr
[SYSREG_CRM(reg
)];
1368 case SYSREG_DBGBCR0_EL1
:
1369 case SYSREG_DBGBCR1_EL1
:
1370 case SYSREG_DBGBCR2_EL1
:
1371 case SYSREG_DBGBCR3_EL1
:
1372 case SYSREG_DBGBCR4_EL1
:
1373 case SYSREG_DBGBCR5_EL1
:
1374 case SYSREG_DBGBCR6_EL1
:
1375 case SYSREG_DBGBCR7_EL1
:
1376 case SYSREG_DBGBCR8_EL1
:
1377 case SYSREG_DBGBCR9_EL1
:
1378 case SYSREG_DBGBCR10_EL1
:
1379 case SYSREG_DBGBCR11_EL1
:
1380 case SYSREG_DBGBCR12_EL1
:
1381 case SYSREG_DBGBCR13_EL1
:
1382 case SYSREG_DBGBCR14_EL1
:
1383 case SYSREG_DBGBCR15_EL1
:
1384 *val
= env
->cp15
.dbgbcr
[SYSREG_CRM(reg
)];
1386 case SYSREG_DBGWVR0_EL1
:
1387 case SYSREG_DBGWVR1_EL1
:
1388 case SYSREG_DBGWVR2_EL1
:
1389 case SYSREG_DBGWVR3_EL1
:
1390 case SYSREG_DBGWVR4_EL1
:
1391 case SYSREG_DBGWVR5_EL1
:
1392 case SYSREG_DBGWVR6_EL1
:
1393 case SYSREG_DBGWVR7_EL1
:
1394 case SYSREG_DBGWVR8_EL1
:
1395 case SYSREG_DBGWVR9_EL1
:
1396 case SYSREG_DBGWVR10_EL1
:
1397 case SYSREG_DBGWVR11_EL1
:
1398 case SYSREG_DBGWVR12_EL1
:
1399 case SYSREG_DBGWVR13_EL1
:
1400 case SYSREG_DBGWVR14_EL1
:
1401 case SYSREG_DBGWVR15_EL1
:
1402 *val
= env
->cp15
.dbgwvr
[SYSREG_CRM(reg
)];
1404 case SYSREG_DBGWCR0_EL1
:
1405 case SYSREG_DBGWCR1_EL1
:
1406 case SYSREG_DBGWCR2_EL1
:
1407 case SYSREG_DBGWCR3_EL1
:
1408 case SYSREG_DBGWCR4_EL1
:
1409 case SYSREG_DBGWCR5_EL1
:
1410 case SYSREG_DBGWCR6_EL1
:
1411 case SYSREG_DBGWCR7_EL1
:
1412 case SYSREG_DBGWCR8_EL1
:
1413 case SYSREG_DBGWCR9_EL1
:
1414 case SYSREG_DBGWCR10_EL1
:
1415 case SYSREG_DBGWCR11_EL1
:
1416 case SYSREG_DBGWCR12_EL1
:
1417 case SYSREG_DBGWCR13_EL1
:
1418 case SYSREG_DBGWCR14_EL1
:
1419 case SYSREG_DBGWCR15_EL1
:
1420 *val
= env
->cp15
.dbgwcr
[SYSREG_CRM(reg
)];
1423 if (is_id_sysreg(reg
)) {
1424 /* ID system registers read as RES0 */
1430 cpu_synchronize_state(cpu
);
1431 trace_hvf_unhandled_sysreg_read(env
->pc
, reg
,
1437 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
1441 static void pmu_update_irq(CPUARMState
*env
)
1443 ARMCPU
*cpu
= env_archcpu(env
);
1444 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1445 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1448 static bool pmu_event_supported(uint16_t number
)
1453 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1454 * the current EL, security state, and register configuration.
1456 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1459 bool enabled
, filtered
= true;
1460 int el
= arm_current_el(env
);
1462 enabled
= (env
->cp15
.c9_pmcr
& PMCRE
) &&
1463 (env
->cp15
.c9_pmcnten
& (1 << counter
));
1465 if (counter
== 31) {
1466 filter
= env
->cp15
.pmccfiltr_el0
;
1468 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1472 filtered
= filter
& PMXEVTYPER_U
;
1473 } else if (el
== 1) {
1474 filtered
= filter
& PMXEVTYPER_P
;
1477 if (counter
!= 31) {
1479 * If not checking PMCCNTR, ensure the counter is setup to an event we
1482 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1483 if (!pmu_event_supported(event
)) {
1488 return enabled
&& !filtered
;
1491 static void pmswinc_write(CPUARMState
*env
, uint64_t value
)
1494 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1495 /* Increment a counter's count iff: */
1496 if ((value
& (1 << i
)) && /* counter's bit is set */
1497 /* counter is enabled and not filtered */
1498 pmu_counter_enabled(env
, i
) &&
1499 /* counter is SW_INCR */
1500 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1502 * Detect if this write causes an overflow since we can't predict
1503 * PMSWINC overflows like we can for other events
1505 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1507 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1508 env
->cp15
.c9_pmovsr
|= (1 << i
);
1509 pmu_update_irq(env
);
1512 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1517 static bool hvf_sysreg_write_cp(CPUState
*cpu
, uint32_t reg
, uint64_t val
)
1519 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1520 CPUARMState
*env
= &arm_cpu
->env
;
1521 const ARMCPRegInfo
*ri
;
1523 ri
= get_arm_cp_reginfo(arm_cpu
->cp_regs
, hvf_reg2cp_reg(reg
));
1527 if (ri
->accessfn(env
, ri
, false) != CP_ACCESS_OK
) {
1532 ri
->writefn(env
, ri
, val
);
1534 CPREG_FIELD64(env
, ri
) = val
;
1537 trace_hvf_vgic_write(ri
->name
, val
);
1544 static int hvf_sysreg_write(CPUState
*cpu
, uint32_t reg
, uint64_t val
)
1546 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1547 CPUARMState
*env
= &arm_cpu
->env
;
1549 trace_hvf_sysreg_write(reg
,
1557 if (arm_feature(env
, ARM_FEATURE_PMU
)) {
1559 case SYSREG_PMCCNTR_EL0
:
1561 env
->cp15
.c15_ccnt
= val
;
1564 case SYSREG_PMCR_EL0
:
1568 /* The counter has been reset */
1569 env
->cp15
.c15_ccnt
= 0;
1574 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1575 env
->cp15
.c14_pmevcntr
[i
] = 0;
1579 env
->cp15
.c9_pmcr
&= ~PMCR_WRITABLE_MASK
;
1580 env
->cp15
.c9_pmcr
|= (val
& PMCR_WRITABLE_MASK
);
1584 case SYSREG_PMUSERENR_EL0
:
1585 env
->cp15
.c9_pmuserenr
= val
& 0xf;
1587 case SYSREG_PMCNTENSET_EL0
:
1588 env
->cp15
.c9_pmcnten
|= (val
& pmu_counter_mask(env
));
1590 case SYSREG_PMCNTENCLR_EL0
:
1591 env
->cp15
.c9_pmcnten
&= ~(val
& pmu_counter_mask(env
));
1593 case SYSREG_PMINTENCLR_EL1
:
1595 env
->cp15
.c9_pminten
|= val
;
1598 case SYSREG_PMOVSCLR_EL0
:
1600 env
->cp15
.c9_pmovsr
&= ~val
;
1603 case SYSREG_PMSWINC_EL0
:
1605 pmswinc_write(env
, val
);
1608 case SYSREG_PMSELR_EL0
:
1609 env
->cp15
.c9_pmselr
= val
& 0x1f;
1611 case SYSREG_PMCCFILTR_EL0
:
1613 env
->cp15
.pmccfiltr_el0
= val
& PMCCFILTR_EL0
;
1620 case SYSREG_OSLAR_EL1
:
1621 env
->cp15
.oslsr_el1
= val
& 1;
1623 case SYSREG_OSDLR_EL1
:
1624 /* Dummy register */
1626 case SYSREG_ICC_AP0R0_EL1
:
1627 case SYSREG_ICC_AP0R1_EL1
:
1628 case SYSREG_ICC_AP0R2_EL1
:
1629 case SYSREG_ICC_AP0R3_EL1
:
1630 case SYSREG_ICC_AP1R0_EL1
:
1631 case SYSREG_ICC_AP1R1_EL1
:
1632 case SYSREG_ICC_AP1R2_EL1
:
1633 case SYSREG_ICC_AP1R3_EL1
:
1634 case SYSREG_ICC_ASGI1R_EL1
:
1635 case SYSREG_ICC_BPR0_EL1
:
1636 case SYSREG_ICC_BPR1_EL1
:
1637 case SYSREG_ICC_CTLR_EL1
:
1638 case SYSREG_ICC_DIR_EL1
:
1639 case SYSREG_ICC_EOIR0_EL1
:
1640 case SYSREG_ICC_EOIR1_EL1
:
1641 case SYSREG_ICC_HPPIR0_EL1
:
1642 case SYSREG_ICC_HPPIR1_EL1
:
1643 case SYSREG_ICC_IAR0_EL1
:
1644 case SYSREG_ICC_IAR1_EL1
:
1645 case SYSREG_ICC_IGRPEN0_EL1
:
1646 case SYSREG_ICC_IGRPEN1_EL1
:
1647 case SYSREG_ICC_PMR_EL1
:
1648 case SYSREG_ICC_SGI0R_EL1
:
1649 case SYSREG_ICC_SGI1R_EL1
:
1650 case SYSREG_ICC_SRE_EL1
:
1651 /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1652 if (hvf_sysreg_write_cp(cpu
, reg
, val
)) {
1656 case SYSREG_MDSCR_EL1
:
1657 env
->cp15
.mdscr_el1
= val
;
1659 case SYSREG_DBGBVR0_EL1
:
1660 case SYSREG_DBGBVR1_EL1
:
1661 case SYSREG_DBGBVR2_EL1
:
1662 case SYSREG_DBGBVR3_EL1
:
1663 case SYSREG_DBGBVR4_EL1
:
1664 case SYSREG_DBGBVR5_EL1
:
1665 case SYSREG_DBGBVR6_EL1
:
1666 case SYSREG_DBGBVR7_EL1
:
1667 case SYSREG_DBGBVR8_EL1
:
1668 case SYSREG_DBGBVR9_EL1
:
1669 case SYSREG_DBGBVR10_EL1
:
1670 case SYSREG_DBGBVR11_EL1
:
1671 case SYSREG_DBGBVR12_EL1
:
1672 case SYSREG_DBGBVR13_EL1
:
1673 case SYSREG_DBGBVR14_EL1
:
1674 case SYSREG_DBGBVR15_EL1
:
1675 env
->cp15
.dbgbvr
[SYSREG_CRM(reg
)] = val
;
1677 case SYSREG_DBGBCR0_EL1
:
1678 case SYSREG_DBGBCR1_EL1
:
1679 case SYSREG_DBGBCR2_EL1
:
1680 case SYSREG_DBGBCR3_EL1
:
1681 case SYSREG_DBGBCR4_EL1
:
1682 case SYSREG_DBGBCR5_EL1
:
1683 case SYSREG_DBGBCR6_EL1
:
1684 case SYSREG_DBGBCR7_EL1
:
1685 case SYSREG_DBGBCR8_EL1
:
1686 case SYSREG_DBGBCR9_EL1
:
1687 case SYSREG_DBGBCR10_EL1
:
1688 case SYSREG_DBGBCR11_EL1
:
1689 case SYSREG_DBGBCR12_EL1
:
1690 case SYSREG_DBGBCR13_EL1
:
1691 case SYSREG_DBGBCR14_EL1
:
1692 case SYSREG_DBGBCR15_EL1
:
1693 env
->cp15
.dbgbcr
[SYSREG_CRM(reg
)] = val
;
1695 case SYSREG_DBGWVR0_EL1
:
1696 case SYSREG_DBGWVR1_EL1
:
1697 case SYSREG_DBGWVR2_EL1
:
1698 case SYSREG_DBGWVR3_EL1
:
1699 case SYSREG_DBGWVR4_EL1
:
1700 case SYSREG_DBGWVR5_EL1
:
1701 case SYSREG_DBGWVR6_EL1
:
1702 case SYSREG_DBGWVR7_EL1
:
1703 case SYSREG_DBGWVR8_EL1
:
1704 case SYSREG_DBGWVR9_EL1
:
1705 case SYSREG_DBGWVR10_EL1
:
1706 case SYSREG_DBGWVR11_EL1
:
1707 case SYSREG_DBGWVR12_EL1
:
1708 case SYSREG_DBGWVR13_EL1
:
1709 case SYSREG_DBGWVR14_EL1
:
1710 case SYSREG_DBGWVR15_EL1
:
1711 env
->cp15
.dbgwvr
[SYSREG_CRM(reg
)] = val
;
1713 case SYSREG_DBGWCR0_EL1
:
1714 case SYSREG_DBGWCR1_EL1
:
1715 case SYSREG_DBGWCR2_EL1
:
1716 case SYSREG_DBGWCR3_EL1
:
1717 case SYSREG_DBGWCR4_EL1
:
1718 case SYSREG_DBGWCR5_EL1
:
1719 case SYSREG_DBGWCR6_EL1
:
1720 case SYSREG_DBGWCR7_EL1
:
1721 case SYSREG_DBGWCR8_EL1
:
1722 case SYSREG_DBGWCR9_EL1
:
1723 case SYSREG_DBGWCR10_EL1
:
1724 case SYSREG_DBGWCR11_EL1
:
1725 case SYSREG_DBGWCR12_EL1
:
1726 case SYSREG_DBGWCR13_EL1
:
1727 case SYSREG_DBGWCR14_EL1
:
1728 case SYSREG_DBGWCR15_EL1
:
1729 env
->cp15
.dbgwcr
[SYSREG_CRM(reg
)] = val
;
1733 cpu_synchronize_state(cpu
);
1734 trace_hvf_unhandled_sysreg_write(env
->pc
, reg
,
1740 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
1744 static int hvf_inject_interrupts(CPUState
*cpu
)
1746 if (cpu
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1747 trace_hvf_inject_fiq();
1748 hv_vcpu_set_pending_interrupt(cpu
->accel
->fd
, HV_INTERRUPT_TYPE_FIQ
,
1752 if (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1753 trace_hvf_inject_irq();
1754 hv_vcpu_set_pending_interrupt(cpu
->accel
->fd
, HV_INTERRUPT_TYPE_IRQ
,
1761 static uint64_t hvf_vtimer_val_raw(void)
1764 * mach_absolute_time() returns the vtimer value without the VM
1765 * offset that we define. Add our own offset on top.
1767 return mach_absolute_time() - hvf_state
->vtimer_offset
;
1770 static uint64_t hvf_vtimer_val(void)
1772 if (!runstate_is_running()) {
1773 /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1774 return vtimer
.vtimer_val
;
1777 return hvf_vtimer_val_raw();
1780 static void hvf_wait_for_ipi(CPUState
*cpu
, struct timespec
*ts
)
1783 * Use pselect to sleep so that other threads can IPI us while we're
1786 qatomic_set_mb(&cpu
->thread_kicked
, false);
1788 pselect(0, 0, 0, 0, ts
, &cpu
->accel
->unblock_ipi_mask
);
1792 static void hvf_wfi(CPUState
*cpu
)
1794 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1799 int64_t ticks_to_sleep
;
1804 if (cpu
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_FIQ
)) {
1805 /* Interrupt pending, no need to wait */
1809 r
= hv_vcpu_get_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_CNTV_CTL_EL0
, &ctl
);
1812 if (!(ctl
& 1) || (ctl
& 2)) {
1813 /* Timer disabled or masked, just wait for an IPI. */
1814 hvf_wait_for_ipi(cpu
, NULL
);
1818 r
= hv_vcpu_get_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_CNTV_CVAL_EL0
, &cval
);
1821 ticks_to_sleep
= cval
- hvf_vtimer_val();
1822 if (ticks_to_sleep
< 0) {
1826 cntfrq
= gt_cntfrq_period_ns(arm_cpu
);
1827 seconds
= muldiv64(ticks_to_sleep
, cntfrq
, NANOSECONDS_PER_SECOND
);
1828 ticks_to_sleep
-= muldiv64(seconds
, NANOSECONDS_PER_SECOND
, cntfrq
);
1829 nanos
= ticks_to_sleep
* cntfrq
;
1832 * Don't sleep for less than the time a context switch would take,
1833 * so that we can satisfy fast timer requests on the same CPU.
1834 * Measurements on M1 show the sweet spot to be ~2ms.
1836 if (!seconds
&& nanos
< (2 * SCALE_MS
)) {
1840 ts
= (struct timespec
) { seconds
, nanos
};
1841 hvf_wait_for_ipi(cpu
, &ts
);
1844 static void hvf_sync_vtimer(CPUState
*cpu
)
1846 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1851 if (!cpu
->accel
->vtimer_masked
) {
1852 /* We will get notified on vtimer changes by hvf, nothing to do */
1856 r
= hv_vcpu_get_sys_reg(cpu
->accel
->fd
, HV_SYS_REG_CNTV_CTL_EL0
, &ctl
);
1859 irq_state
= (ctl
& (TMR_CTL_ENABLE
| TMR_CTL_IMASK
| TMR_CTL_ISTATUS
)) ==
1860 (TMR_CTL_ENABLE
| TMR_CTL_ISTATUS
);
1861 qemu_set_irq(arm_cpu
->gt_timer_outputs
[GTIMER_VIRT
], irq_state
);
1864 /* Timer no longer asserting, we can unmask it */
1865 hv_vcpu_set_vtimer_mask(cpu
->accel
->fd
, false);
1866 cpu
->accel
->vtimer_masked
= false;
1870 int hvf_vcpu_exec(CPUState
*cpu
)
1872 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
1873 CPUARMState
*env
= &arm_cpu
->env
;
1875 hv_vcpu_exit_t
*hvf_exit
= cpu
->accel
->exit
;
1877 bool advance_pc
= false;
1879 if (!(cpu
->singlestep_enabled
& SSTEP_NOIRQ
) &&
1880 hvf_inject_interrupts(cpu
)) {
1881 return EXCP_INTERRUPT
;
1888 flush_cpu_state(cpu
);
1891 assert_hvf_ok(hv_vcpu_run(cpu
->accel
->fd
));
1894 uint64_t exit_reason
= hvf_exit
->reason
;
1895 uint64_t syndrome
= hvf_exit
->exception
.syndrome
;
1896 uint32_t ec
= syn_get_ec(syndrome
);
1900 switch (exit_reason
) {
1901 case HV_EXIT_REASON_EXCEPTION
:
1902 /* This is the main one, handle below. */
1904 case HV_EXIT_REASON_VTIMER_ACTIVATED
:
1905 qemu_set_irq(arm_cpu
->gt_timer_outputs
[GTIMER_VIRT
], 1);
1906 cpu
->accel
->vtimer_masked
= true;
1908 case HV_EXIT_REASON_CANCELED
:
1909 /* we got kicked, no exit to process */
1912 g_assert_not_reached();
1915 hvf_sync_vtimer(cpu
);
1918 case EC_SOFTWARESTEP
: {
1921 if (!cpu
->singlestep_enabled
) {
1922 error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1926 case EC_AA64_BKPT
: {
1929 cpu_synchronize_state(cpu
);
1931 if (!hvf_find_sw_breakpoint(cpu
, env
->pc
)) {
1932 /* Re-inject into the guest */
1934 hvf_raise_exception(cpu
, EXCP_BKPT
, syn_aa64_bkpt(0));
1938 case EC_BREAKPOINT
: {
1941 cpu_synchronize_state(cpu
);
1943 if (!find_hw_breakpoint(cpu
, env
->pc
)) {
1944 error_report("EC_BREAKPOINT but unknown hw breakpoint");
1948 case EC_WATCHPOINT
: {
1951 cpu_synchronize_state(cpu
);
1954 find_hw_watchpoint(cpu
, hvf_exit
->exception
.virtual_address
);
1956 error_report("EXCP_DEBUG but unknown hw watchpoint");
1958 cpu
->watchpoint_hit
= wp
;
1961 case EC_DATAABORT
: {
1962 bool isv
= syndrome
& ARM_EL_ISV
;
1963 bool iswrite
= (syndrome
>> 6) & 1;
1964 bool s1ptw
= (syndrome
>> 7) & 1;
1965 uint32_t sas
= (syndrome
>> 22) & 3;
1966 uint32_t len
= 1 << sas
;
1967 uint32_t srt
= (syndrome
>> 16) & 0x1f;
1968 uint32_t cm
= (syndrome
>> 8) & 0x1;
1971 trace_hvf_data_abort(env
->pc
, hvf_exit
->exception
.virtual_address
,
1972 hvf_exit
->exception
.physical_address
, isv
,
1973 iswrite
, s1ptw
, len
, srt
);
1976 /* We don't cache MMIO regions */
1984 val
= hvf_get_reg(cpu
, srt
);
1985 address_space_write(&address_space_memory
,
1986 hvf_exit
->exception
.physical_address
,
1987 MEMTXATTRS_UNSPECIFIED
, &val
, len
);
1989 address_space_read(&address_space_memory
,
1990 hvf_exit
->exception
.physical_address
,
1991 MEMTXATTRS_UNSPECIFIED
, &val
, len
);
1992 hvf_set_reg(cpu
, srt
, val
);
1998 case EC_SYSTEMREGISTERTRAP
: {
1999 bool isread
= (syndrome
>> 0) & 1;
2000 uint32_t rt
= (syndrome
>> 5) & 0x1f;
2001 uint32_t reg
= syndrome
& SYSREG_MASK
;
2006 sysreg_ret
= hvf_sysreg_read(cpu
, reg
, &val
);
2008 trace_hvf_sysreg_read(reg
,
2015 hvf_set_reg(cpu
, rt
, val
);
2018 val
= hvf_get_reg(cpu
, rt
);
2019 sysreg_ret
= hvf_sysreg_write(cpu
, reg
, val
);
2022 advance_pc
= !sysreg_ret
;
2027 if (!(syndrome
& WFX_IS_WFE
)) {
2032 cpu_synchronize_state(cpu
);
2033 if (arm_cpu
->psci_conduit
== QEMU_PSCI_CONDUIT_HVC
) {
2034 if (!hvf_handle_psci_call(cpu
)) {
2035 trace_hvf_unknown_hvc(env
->xregs
[0]);
2036 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2040 trace_hvf_unknown_hvc(env
->xregs
[0]);
2041 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
2045 cpu_synchronize_state(cpu
);
2046 if (arm_cpu
->psci_conduit
== QEMU_PSCI_CONDUIT_SMC
) {
2049 if (!hvf_handle_psci_call(cpu
)) {
2050 trace_hvf_unknown_smc(env
->xregs
[0]);
2051 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2055 trace_hvf_unknown_smc(env
->xregs
[0]);
2056 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
2060 cpu_synchronize_state(cpu
);
2061 trace_hvf_exit(syndrome
, ec
, env
->pc
);
2062 error_report("0x%llx: unhandled exception ec=0x%x", env
->pc
, ec
);
2068 flush_cpu_state(cpu
);
2070 r
= hv_vcpu_get_reg(cpu
->accel
->fd
, HV_REG_PC
, &pc
);
2073 r
= hv_vcpu_set_reg(cpu
->accel
->fd
, HV_REG_PC
, pc
);
2076 /* Handle single-stepping over instructions which trigger a VM exit */
2077 if (cpu
->singlestep_enabled
) {
2085 static const VMStateDescription vmstate_hvf_vtimer
= {
2086 .name
= "hvf-vtimer",
2088 .minimum_version_id
= 1,
2089 .fields
= (const VMStateField
[]) {
2090 VMSTATE_UINT64(vtimer_val
, HVFVTimer
),
2091 VMSTATE_END_OF_LIST()
2095 static void hvf_vm_state_change(void *opaque
, bool running
, RunState state
)
2097 HVFVTimer
*s
= opaque
;
2100 /* Update vtimer offset on all CPUs */
2101 hvf_state
->vtimer_offset
= mach_absolute_time() - s
->vtimer_val
;
2102 cpu_synchronize_all_states();
2104 /* Remember vtimer value on every pause */
2105 s
->vtimer_val
= hvf_vtimer_val_raw();
2109 int hvf_arch_init(void)
2111 hvf_state
->vtimer_offset
= mach_absolute_time();
2112 vmstate_register(NULL
, 0, &vmstate_hvf_vtimer
, &vtimer
);
2113 qemu_add_vm_change_state_handler(hvf_vm_state_change
, &vtimer
);
2115 hvf_arm_init_debug();
2120 static const uint32_t brk_insn
= 0xd4200000;
2122 int hvf_arch_insert_sw_breakpoint(CPUState
*cpu
, struct hvf_sw_breakpoint
*bp
)
2124 if (cpu_memory_rw_debug(cpu
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
2125 cpu_memory_rw_debug(cpu
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
2131 int hvf_arch_remove_sw_breakpoint(CPUState
*cpu
, struct hvf_sw_breakpoint
*bp
)
2133 static uint32_t brk
;
2135 if (cpu_memory_rw_debug(cpu
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
2137 cpu_memory_rw_debug(cpu
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
2143 int hvf_arch_insert_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
2146 case GDB_BREAKPOINT_HW
:
2147 return insert_hw_breakpoint(addr
);
2148 case GDB_WATCHPOINT_READ
:
2149 case GDB_WATCHPOINT_WRITE
:
2150 case GDB_WATCHPOINT_ACCESS
:
2151 return insert_hw_watchpoint(addr
, len
, type
);
2157 int hvf_arch_remove_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
2160 case GDB_BREAKPOINT_HW
:
2161 return delete_hw_breakpoint(addr
);
2162 case GDB_WATCHPOINT_READ
:
2163 case GDB_WATCHPOINT_WRITE
:
2164 case GDB_WATCHPOINT_ACCESS
:
2165 return delete_hw_watchpoint(addr
, len
, type
);
2171 void hvf_arch_remove_all_hw_breakpoints(void)
2173 if (cur_hw_wps
> 0) {
2174 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
2176 if (cur_hw_bps
> 0) {
2177 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
2182 * Update the vCPU with the gdbstub's view of debug registers. This view
2183 * consists of all hardware breakpoints and watchpoints inserted so far while
2184 * debugging the guest.
2186 static void hvf_put_gdbstub_debug_registers(CPUState
*cpu
)
2188 hv_return_t r
= HV_SUCCESS
;
2191 for (i
= 0; i
< cur_hw_bps
; i
++) {
2192 HWBreakpoint
*bp
= get_hw_bp(i
);
2193 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgbcr_regs
[i
], bp
->bcr
);
2195 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgbvr_regs
[i
], bp
->bvr
);
2198 for (i
= cur_hw_bps
; i
< max_hw_bps
; i
++) {
2199 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgbcr_regs
[i
], 0);
2201 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgbvr_regs
[i
], 0);
2205 for (i
= 0; i
< cur_hw_wps
; i
++) {
2206 HWWatchpoint
*wp
= get_hw_wp(i
);
2207 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgwcr_regs
[i
], wp
->wcr
);
2209 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgwvr_regs
[i
], wp
->wvr
);
2212 for (i
= cur_hw_wps
; i
< max_hw_wps
; i
++) {
2213 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgwcr_regs
[i
], 0);
2215 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgwvr_regs
[i
], 0);
2221 * Update the vCPU with the guest's view of debug registers. This view is kept
2222 * in the environment at all times.
2224 static void hvf_put_guest_debug_registers(CPUState
*cpu
)
2226 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
2227 CPUARMState
*env
= &arm_cpu
->env
;
2228 hv_return_t r
= HV_SUCCESS
;
2231 for (i
= 0; i
< max_hw_bps
; i
++) {
2232 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgbcr_regs
[i
],
2233 env
->cp15
.dbgbcr
[i
]);
2235 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgbvr_regs
[i
],
2236 env
->cp15
.dbgbvr
[i
]);
2240 for (i
= 0; i
< max_hw_wps
; i
++) {
2241 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgwcr_regs
[i
],
2242 env
->cp15
.dbgwcr
[i
]);
2244 r
= hv_vcpu_set_sys_reg(cpu
->accel
->fd
, dbgwvr_regs
[i
],
2245 env
->cp15
.dbgwvr
[i
]);
2250 static inline bool hvf_arm_hw_debug_active(CPUState
*cpu
)
2252 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
2255 static void hvf_arch_set_traps(void)
2258 bool should_enable_traps
= false;
2259 hv_return_t r
= HV_SUCCESS
;
2261 /* Check whether guest debugging is enabled for at least one vCPU; if it
2262 * is, enable exiting the guest on all vCPUs */
2264 should_enable_traps
|= cpu
->accel
->guest_debug_enabled
;
2267 /* Set whether debug exceptions exit the guest */
2268 r
= hv_vcpu_set_trap_debug_exceptions(cpu
->accel
->fd
,
2269 should_enable_traps
);
2272 /* Set whether accesses to debug registers exit the guest */
2273 r
= hv_vcpu_set_trap_debug_reg_accesses(cpu
->accel
->fd
,
2274 should_enable_traps
);
2279 void hvf_arch_update_guest_debug(CPUState
*cpu
)
2281 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
2282 CPUARMState
*env
= &arm_cpu
->env
;
2284 /* Check whether guest debugging is enabled */
2285 cpu
->accel
->guest_debug_enabled
= cpu
->singlestep_enabled
||
2286 hvf_sw_breakpoints_active(cpu
) ||
2287 hvf_arm_hw_debug_active(cpu
);
2289 /* Update debug registers */
2290 if (cpu
->accel
->guest_debug_enabled
) {
2291 hvf_put_gdbstub_debug_registers(cpu
);
2293 hvf_put_guest_debug_registers(cpu
);
2296 cpu_synchronize_state(cpu
);
2298 /* Enable/disable single-stepping */
2299 if (cpu
->singlestep_enabled
) {
2300 env
->cp15
.mdscr_el1
=
2301 deposit64(env
->cp15
.mdscr_el1
, MDSCR_EL1_SS_SHIFT
, 1, 1);
2302 pstate_write(env
, pstate_read(env
) | PSTATE_SS
);
2304 env
->cp15
.mdscr_el1
=
2305 deposit64(env
->cp15
.mdscr_el1
, MDSCR_EL1_SS_SHIFT
, 1, 0);
2308 /* Enable/disable Breakpoint exceptions */
2309 if (hvf_arm_hw_debug_active(cpu
)) {
2310 env
->cp15
.mdscr_el1
=
2311 deposit64(env
->cp15
.mdscr_el1
, MDSCR_EL1_MDE_SHIFT
, 1, 1);
2313 env
->cp15
.mdscr_el1
=
2314 deposit64(env
->cp15
.mdscr_el1
, MDSCR_EL1_MDE_SHIFT
, 1, 0);
2317 hvf_arch_set_traps();
2320 bool hvf_arch_supports_guest_debug(void)