Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / include / hw / ppc / spapr_nested.h
blob93ef14adcc5e96a09dbac4e37a49e9be8d40ac7d
1 #ifndef HW_SPAPR_NESTED_H
2 #define HW_SPAPR_NESTED_H
4 #include "target/ppc/cpu.h"
6 /* Guest State Buffer Element IDs */
7 #define GSB_HV_VCPU_IGNORED_ID 0x0000 /* An element whose value is ignored */
8 #define GSB_HV_VCPU_STATE_SIZE 0x0001 /* HV internal format VCPU state size */
9 #define GSB_VCPU_OUT_BUF_MIN_SZ 0x0002 /* Min size of the Run VCPU o/p buffer */
10 #define GSB_VCPU_LPVR 0x0003 /* Logical PVR */
11 #define GSB_TB_OFFSET 0x0004 /* Timebase Offset */
12 #define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */
13 #define GSB_PROCESS_TBL 0x0006 /* Process Table */
14 /* RESERVED 0x0007 - 0x0BFF */
15 #define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */
16 #define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */
17 #define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */
18 /* RESERVED 0x0C03 - 0x0FFF */
19 #define GSB_VCPU_GPR0 0x1000
20 #define GSB_VCPU_GPR1 0x1001
21 #define GSB_VCPU_GPR2 0x1002
22 #define GSB_VCPU_GPR3 0x1003
23 #define GSB_VCPU_GPR4 0x1004
24 #define GSB_VCPU_GPR5 0x1005
25 #define GSB_VCPU_GPR6 0x1006
26 #define GSB_VCPU_GPR7 0x1007
27 #define GSB_VCPU_GPR8 0x1008
28 #define GSB_VCPU_GPR9 0x1009
29 #define GSB_VCPU_GPR10 0x100A
30 #define GSB_VCPU_GPR11 0x100B
31 #define GSB_VCPU_GPR12 0x100C
32 #define GSB_VCPU_GPR13 0x100D
33 #define GSB_VCPU_GPR14 0x100E
34 #define GSB_VCPU_GPR15 0x100F
35 #define GSB_VCPU_GPR16 0x1010
36 #define GSB_VCPU_GPR17 0x1011
37 #define GSB_VCPU_GPR18 0x1012
38 #define GSB_VCPU_GPR19 0x1013
39 #define GSB_VCPU_GPR20 0x1014
40 #define GSB_VCPU_GPR21 0x1015
41 #define GSB_VCPU_GPR22 0x1016
42 #define GSB_VCPU_GPR23 0x1017
43 #define GSB_VCPU_GPR24 0x1018
44 #define GSB_VCPU_GPR25 0x1019
45 #define GSB_VCPU_GPR26 0x101A
46 #define GSB_VCPU_GPR27 0x101B
47 #define GSB_VCPU_GPR28 0x101C
48 #define GSB_VCPU_GPR29 0x101D
49 #define GSB_VCPU_GPR30 0x101E
50 #define GSB_VCPU_GPR31 0x101F
51 #define GSB_VCPU_HDEC_EXPIRY_TB 0x1020
52 #define GSB_VCPU_SPR_NIA 0x1021
53 #define GSB_VCPU_SPR_MSR 0x1022
54 #define GSB_VCPU_SPR_LR 0x1023
55 #define GSB_VCPU_SPR_XER 0x1024
56 #define GSB_VCPU_SPR_CTR 0x1025
57 #define GSB_VCPU_SPR_CFAR 0x1026
58 #define GSB_VCPU_SPR_SRR0 0x1027
59 #define GSB_VCPU_SPR_SRR1 0x1028
60 #define GSB_VCPU_SPR_DAR 0x1029
61 #define GSB_VCPU_DEC_EXPIRE_TB 0x102A
62 #define GSB_VCPU_SPR_VTB 0x102B
63 #define GSB_VCPU_SPR_LPCR 0x102C
64 #define GSB_VCPU_SPR_HFSCR 0x102D
65 #define GSB_VCPU_SPR_FSCR 0x102E
66 #define GSB_VCPU_SPR_FPSCR 0x102F
67 #define GSB_VCPU_SPR_DAWR0 0x1030
68 #define GSB_VCPU_SPR_DAWR1 0x1031
69 #define GSB_VCPU_SPR_CIABR 0x1032
70 #define GSB_VCPU_SPR_PURR 0x1033
71 #define GSB_VCPU_SPR_SPURR 0x1034
72 #define GSB_VCPU_SPR_IC 0x1035
73 #define GSB_VCPU_SPR_SPRG0 0x1036
74 #define GSB_VCPU_SPR_SPRG1 0x1037
75 #define GSB_VCPU_SPR_SPRG2 0x1038
76 #define GSB_VCPU_SPR_SPRG3 0x1039
77 #define GSB_VCPU_SPR_PPR 0x103A
78 #define GSB_VCPU_SPR_MMCR0 0x103B
79 #define GSB_VCPU_SPR_MMCR1 0x103C
80 #define GSB_VCPU_SPR_MMCR2 0x103D
81 #define GSB_VCPU_SPR_MMCR3 0x103E
82 #define GSB_VCPU_SPR_MMCRA 0x103F
83 #define GSB_VCPU_SPR_SIER 0x1040
84 #define GSB_VCPU_SPR_SIER2 0x1041
85 #define GSB_VCPU_SPR_SIER3 0x1042
86 #define GSB_VCPU_SPR_BESCR 0x1043
87 #define GSB_VCPU_SPR_EBBHR 0x1044
88 #define GSB_VCPU_SPR_EBBRR 0x1045
89 #define GSB_VCPU_SPR_AMR 0x1046
90 #define GSB_VCPU_SPR_IAMR 0x1047
91 #define GSB_VCPU_SPR_AMOR 0x1048
92 #define GSB_VCPU_SPR_UAMOR 0x1049
93 #define GSB_VCPU_SPR_SDAR 0x104A
94 #define GSB_VCPU_SPR_SIAR 0x104B
95 #define GSB_VCPU_SPR_DSCR 0x104C
96 #define GSB_VCPU_SPR_TAR 0x104D
97 #define GSB_VCPU_SPR_DEXCR 0x104E
98 #define GSB_VCPU_SPR_HDEXCR 0x104F
99 #define GSB_VCPU_SPR_HASHKEYR 0x1050
100 #define GSB_VCPU_SPR_HASHPKEYR 0x1051
101 #define GSB_VCPU_SPR_CTRL 0x1052
102 /* RESERVED 0x1053 - 0x1FFF */
103 #define GSB_VCPU_SPR_CR 0x2000
104 #define GSB_VCPU_SPR_PIDR 0x2001
105 #define GSB_VCPU_SPR_DSISR 0x2002
106 #define GSB_VCPU_SPR_VSCR 0x2003
107 #define GSB_VCPU_SPR_VRSAVE 0x2004
108 #define GSB_VCPU_SPR_DAWRX0 0x2005
109 #define GSB_VCPU_SPR_DAWRX1 0x2006
110 #define GSB_VCPU_SPR_PMC1 0x2007
111 #define GSB_VCPU_SPR_PMC2 0x2008
112 #define GSB_VCPU_SPR_PMC3 0x2009
113 #define GSB_VCPU_SPR_PMC4 0x200A
114 #define GSB_VCPU_SPR_PMC5 0x200B
115 #define GSB_VCPU_SPR_PMC6 0x200C
116 #define GSB_VCPU_SPR_WORT 0x200D
117 #define GSB_VCPU_SPR_PSPB 0x200E
118 /* RESERVED 0x200F - 0x2FFF */
119 #define GSB_VCPU_SPR_VSR0 0x3000
120 #define GSB_VCPU_SPR_VSR1 0x3001
121 #define GSB_VCPU_SPR_VSR2 0x3002
122 #define GSB_VCPU_SPR_VSR3 0x3003
123 #define GSB_VCPU_SPR_VSR4 0x3004
124 #define GSB_VCPU_SPR_VSR5 0x3005
125 #define GSB_VCPU_SPR_VSR6 0x3006
126 #define GSB_VCPU_SPR_VSR7 0x3007
127 #define GSB_VCPU_SPR_VSR8 0x3008
128 #define GSB_VCPU_SPR_VSR9 0x3009
129 #define GSB_VCPU_SPR_VSR10 0x300A
130 #define GSB_VCPU_SPR_VSR11 0x300B
131 #define GSB_VCPU_SPR_VSR12 0x300C
132 #define GSB_VCPU_SPR_VSR13 0x300D
133 #define GSB_VCPU_SPR_VSR14 0x300E
134 #define GSB_VCPU_SPR_VSR15 0x300F
135 #define GSB_VCPU_SPR_VSR16 0x3010
136 #define GSB_VCPU_SPR_VSR17 0x3011
137 #define GSB_VCPU_SPR_VSR18 0x3012
138 #define GSB_VCPU_SPR_VSR19 0x3013
139 #define GSB_VCPU_SPR_VSR20 0x3014
140 #define GSB_VCPU_SPR_VSR21 0x3015
141 #define GSB_VCPU_SPR_VSR22 0x3016
142 #define GSB_VCPU_SPR_VSR23 0x3017
143 #define GSB_VCPU_SPR_VSR24 0x3018
144 #define GSB_VCPU_SPR_VSR25 0x3019
145 #define GSB_VCPU_SPR_VSR26 0x301A
146 #define GSB_VCPU_SPR_VSR27 0x301B
147 #define GSB_VCPU_SPR_VSR28 0x301C
148 #define GSB_VCPU_SPR_VSR29 0x301D
149 #define GSB_VCPU_SPR_VSR30 0x301E
150 #define GSB_VCPU_SPR_VSR31 0x301F
151 #define GSB_VCPU_SPR_VSR32 0x3020
152 #define GSB_VCPU_SPR_VSR33 0x3021
153 #define GSB_VCPU_SPR_VSR34 0x3022
154 #define GSB_VCPU_SPR_VSR35 0x3023
155 #define GSB_VCPU_SPR_VSR36 0x3024
156 #define GSB_VCPU_SPR_VSR37 0x3025
157 #define GSB_VCPU_SPR_VSR38 0x3026
158 #define GSB_VCPU_SPR_VSR39 0x3027
159 #define GSB_VCPU_SPR_VSR40 0x3028
160 #define GSB_VCPU_SPR_VSR41 0x3029
161 #define GSB_VCPU_SPR_VSR42 0x302A
162 #define GSB_VCPU_SPR_VSR43 0x302B
163 #define GSB_VCPU_SPR_VSR44 0x302C
164 #define GSB_VCPU_SPR_VSR45 0x302D
165 #define GSB_VCPU_SPR_VSR46 0x302E
166 #define GSB_VCPU_SPR_VSR47 0x302F
167 #define GSB_VCPU_SPR_VSR48 0x3030
168 #define GSB_VCPU_SPR_VSR49 0x3031
169 #define GSB_VCPU_SPR_VSR50 0x3032
170 #define GSB_VCPU_SPR_VSR51 0x3033
171 #define GSB_VCPU_SPR_VSR52 0x3034
172 #define GSB_VCPU_SPR_VSR53 0x3035
173 #define GSB_VCPU_SPR_VSR54 0x3036
174 #define GSB_VCPU_SPR_VSR55 0x3037
175 #define GSB_VCPU_SPR_VSR56 0x3038
176 #define GSB_VCPU_SPR_VSR57 0x3039
177 #define GSB_VCPU_SPR_VSR58 0x303A
178 #define GSB_VCPU_SPR_VSR59 0x303B
179 #define GSB_VCPU_SPR_VSR60 0x303C
180 #define GSB_VCPU_SPR_VSR61 0x303D
181 #define GSB_VCPU_SPR_VSR62 0x303E
182 #define GSB_VCPU_SPR_VSR63 0x303F
183 /* RESERVED 0x3040 - 0xEFFF */
184 #define GSB_VCPU_SPR_HDAR 0xF000
185 #define GSB_VCPU_SPR_HDSISR 0xF001
186 #define GSB_VCPU_SPR_HEIR 0xF002
187 #define GSB_VCPU_SPR_ASDR 0xF003
188 /* End of list of Guest State Buffer Element IDs */
189 #define GSB_LAST GSB_VCPU_SPR_ASDR
191 typedef struct SpaprMachineStateNested {
192 uint64_t ptcr;
193 uint8_t api;
194 #define NESTED_API_KVM_HV 1
195 #define NESTED_API_PAPR 2
196 bool capabilities_set;
197 uint32_t pvr_base;
198 GHashTable *guests;
199 } SpaprMachineStateNested;
201 typedef struct SpaprMachineStateNestedGuest {
202 uint32_t pvr_logical;
203 unsigned long nr_vcpus;
204 uint64_t parttbl[2];
205 uint64_t tb_offset;
206 struct SpaprMachineStateNestedGuestVcpu *vcpus;
207 } SpaprMachineStateNestedGuest;
209 /* Nested PAPR API related macros */
210 #define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000
211 #define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000
212 #define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000
213 #define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \
214 H_GUEST_CAPABILITIES_P9_MODE)
215 #define H_GUEST_CAP_COPY_MEM_BMAP 0
216 #define H_GUEST_CAP_P9_MODE_BMAP 1
217 #define H_GUEST_CAP_P10_MODE_BMAP 2
218 #define PAPR_NESTED_GUEST_MAX 4096
219 #define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL
220 #define PAPR_NESTED_GUEST_VCPU_MAX 2048
221 #define VCPU_OUT_BUF_MIN_SZ 0x80ULL
222 #define HVMASK_DEFAULT 0xffffffffffffffff
223 #define HVMASK_LPCR 0x0070000003820800
224 #define HVMASK_MSR 0xEBFFFFFFFFBFEFFF
225 #define HVMASK_HDEXCR 0x00000000FFFFFFFF
226 #define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF
227 #define GSB_MAX_BUF_SIZE (1024 * 1024)
228 #define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000
229 #define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
230 #define GUEST_STATE_REQUEST_SET 0x2
233 * As per ISA v3.1B, following bits are reserved:
234 * 0:2
235 * 4:57 (ISA mentions bit 58 as well but it should be used for P10)
236 * 61:63 (hence, haven't included PCR bits for v2.06 and v2.05
237 * in LOW BITS)
239 #define PCR_LOW_BITS (PCR_COMPAT_3_10 | PCR_COMPAT_3_00)
240 #define HVMASK_PCR (~PCR_LOW_BITS)
242 #define GUEST_STATE_ELEMENT(i, sz, s, f, ptr, c) { \
243 .id = (i), \
244 .size = (sz), \
245 .location = ptr, \
246 .offset = offsetof(struct s, f), \
247 .copy = (c) \
250 #define GSBE_NESTED(i, sz, f, c) { \
251 .id = (i), \
252 .size = (sz), \
253 .location = get_guest_ptr, \
254 .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\
255 .copy = (c), \
256 .mask = HVMASK_DEFAULT \
259 #define GSBE_NESTED_MSK(i, sz, f, c, m) { \
260 .id = (i), \
261 .size = (sz), \
262 .location = get_guest_ptr, \
263 .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\
264 .copy = (c), \
265 .mask = (m) \
268 #define GSBE_NESTED_VCPU(i, sz, f, c) { \
269 .id = (i), \
270 .size = (sz), \
271 .location = get_vcpu_ptr, \
272 .offset = offsetof(struct SpaprMachineStateNestedGuestVcpu, f),\
273 .copy = (c), \
274 .mask = HVMASK_DEFAULT \
277 #define GUEST_STATE_ELEMENT_NOP(i, sz) { \
278 .id = (i), \
279 .size = (sz), \
280 .location = NULL, \
281 .offset = 0, \
282 .copy = NULL, \
283 .mask = HVMASK_DEFAULT \
286 #define GUEST_STATE_ELEMENT_NOP_DW(i) \
287 GUEST_STATE_ELEMENT_NOP(i, 8)
288 #define GUEST_STATE_ELEMENT_NOP_W(i) \
289 GUEST_STATE_ELEMENT_NOP(i, 4)
291 #define GUEST_STATE_ELEMENT_BASE(i, s, c) { \
292 .id = (i), \
293 .size = (s), \
294 .location = get_vcpu_state_ptr, \
295 .offset = 0, \
296 .copy = (c), \
297 .mask = HVMASK_DEFAULT \
300 #define GUEST_STATE_ELEMENT_OFF(i, s, f, c) { \
301 .id = (i), \
302 .size = (s), \
303 .location = get_vcpu_state_ptr, \
304 .offset = offsetof(struct nested_ppc_state, f), \
305 .copy = (c), \
306 .mask = HVMASK_DEFAULT \
309 #define GUEST_STATE_ELEMENT_MSK(i, s, f, c, m) { \
310 .id = (i), \
311 .size = (s), \
312 .location = get_vcpu_state_ptr, \
313 .offset = offsetof(struct nested_ppc_state, f), \
314 .copy = (c), \
315 .mask = (m) \
318 #define GUEST_STATE_ELEMENT_ENV_QW(i, f) \
319 GUEST_STATE_ELEMENT_OFF(i, 16, f, copy_state_16to16)
320 #define GUEST_STATE_ELEMENT_ENV_DW(i, f) \
321 GUEST_STATE_ELEMENT_OFF(i, 8, f, copy_state_8to8)
322 #define GUEST_STATE_ELEMENT_ENV_W(i, f) \
323 GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to8)
324 #define GUEST_STATE_ELEMENT_ENV_WW(i, f) \
325 GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to4)
326 #define GSE_ENV_DWM(i, f, m) \
327 GUEST_STATE_ELEMENT_MSK(i, 8, f, copy_state_8to8, m)
329 struct guest_state_element {
330 uint16_t id;
331 uint16_t size;
332 uint8_t value[];
333 } QEMU_PACKED;
335 struct guest_state_buffer {
336 uint32_t num_elements;
337 struct guest_state_element elements[];
338 } QEMU_PACKED;
340 /* Actual buffer plus some metadata about the request */
341 struct guest_state_request {
342 struct guest_state_buffer *gsb;
343 int64_t buf;
344 int64_t len;
345 uint16_t flags;
349 * Register state for entering a nested guest with H_ENTER_NESTED.
350 * New member must be added at the end.
352 struct kvmppc_hv_guest_state {
353 uint64_t version; /* version of this structure layout, must be first */
354 uint32_t lpid;
355 uint32_t vcpu_token;
356 /* These registers are hypervisor privileged (at least for writing) */
357 uint64_t lpcr;
358 uint64_t pcr;
359 uint64_t amor;
360 uint64_t dpdes;
361 uint64_t hfscr;
362 int64_t tb_offset;
363 uint64_t dawr0;
364 uint64_t dawrx0;
365 uint64_t ciabr;
366 uint64_t hdec_expiry;
367 uint64_t purr;
368 uint64_t spurr;
369 uint64_t ic;
370 uint64_t vtb;
371 uint64_t hdar;
372 uint64_t hdsisr;
373 uint64_t heir;
374 uint64_t asdr;
375 /* These are OS privileged but need to be set late in guest entry */
376 uint64_t srr0;
377 uint64_t srr1;
378 uint64_t sprg[4];
379 uint64_t pidr;
380 uint64_t cfar;
381 uint64_t ppr;
382 /* Version 1 ends here */
383 uint64_t dawr1;
384 uint64_t dawrx1;
385 /* Version 2 ends here */
388 /* Latest version of hv_guest_state structure */
389 #define HV_GUEST_STATE_VERSION 2
391 /* Linux 64-bit powerpc pt_regs struct, used by nested HV */
392 struct kvmppc_pt_regs {
393 uint64_t gpr[32];
394 uint64_t nip;
395 uint64_t msr;
396 uint64_t orig_gpr3; /* Used for restarting system calls */
397 uint64_t ctr;
398 uint64_t link;
399 uint64_t xer;
400 uint64_t ccr;
401 uint64_t softe; /* Soft enabled/disabled */
402 uint64_t trap; /* Reason for being here */
403 uint64_t dar; /* Fault registers */
404 uint64_t dsisr; /* on 4xx/Book-E used for ESR */
405 uint64_t result; /* Result of a system call */
409 * nested_ppc_state is used to save the host CPU state before switching it to
410 * the guest CPU state, to be restored on H_ENTER_NESTED exit.
412 struct nested_ppc_state {
413 uint64_t gpr[32];
414 uint64_t lr;
415 uint64_t ctr;
416 uint64_t cfar;
417 uint64_t msr;
418 uint64_t nip;
419 uint32_t cr;
421 uint64_t xer;
423 uint64_t lpcr;
424 uint64_t lpidr;
425 uint64_t pidr;
426 uint64_t pcr;
427 uint64_t dpdes;
428 uint64_t hfscr;
429 uint64_t srr0;
430 uint64_t srr1;
431 uint64_t sprg0;
432 uint64_t sprg1;
433 uint64_t sprg2;
434 uint64_t sprg3;
435 uint64_t ppr;
437 int64_t tb_offset;
438 /* Nested PAPR API */
439 uint64_t amor;
440 uint64_t dawr0;
441 uint64_t dawrx0;
442 uint64_t ciabr;
443 uint64_t purr;
444 uint64_t spurr;
445 uint64_t ic;
446 uint64_t vtb;
447 uint64_t hdar;
448 uint64_t hdsisr;
449 uint64_t heir;
450 uint64_t asdr;
451 uint64_t dawr1;
452 uint64_t dawrx1;
453 uint64_t dexcr;
454 uint64_t hdexcr;
455 uint64_t hashkeyr;
456 uint64_t hashpkeyr;
457 ppc_vsr_t vsr[64] QEMU_ALIGNED(16);
458 uint64_t ebbhr;
459 uint64_t tar;
460 uint64_t ebbrr;
461 uint64_t bescr;
462 uint64_t iamr;
463 uint64_t amr;
464 uint64_t uamor;
465 uint64_t dscr;
466 uint64_t fscr;
467 uint64_t pspb;
468 uint64_t ctrl;
469 uint64_t vrsave;
470 uint64_t dar;
471 uint64_t dsisr;
472 uint64_t pmc1;
473 uint64_t pmc2;
474 uint64_t pmc3;
475 uint64_t pmc4;
476 uint64_t pmc5;
477 uint64_t pmc6;
478 uint64_t mmcr0;
479 uint64_t mmcr1;
480 uint64_t mmcr2;
481 uint64_t mmcra;
482 uint64_t sdar;
483 uint64_t siar;
484 uint64_t sier;
485 uint32_t vscr;
486 uint64_t fpscr;
487 int64_t dec_expiry_tb;
490 struct SpaprMachineStateNestedGuestVcpuRunBuf {
491 uint64_t addr;
492 uint64_t size;
495 typedef struct SpaprMachineStateNestedGuestVcpu {
496 bool enabled;
497 struct nested_ppc_state state;
498 struct SpaprMachineStateNestedGuestVcpuRunBuf runbufin;
499 struct SpaprMachineStateNestedGuestVcpuRunBuf runbufout;
500 int64_t tb_offset;
501 uint64_t hdecr_expiry_tb;
502 } SpaprMachineStateNestedGuestVcpu;
504 struct guest_state_element_type {
505 uint16_t id;
506 int size;
507 #define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1
508 #define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2
509 uint16_t flags;
510 void *(*location)(SpaprMachineStateNestedGuest *, target_ulong);
511 size_t offset;
512 void (*copy)(void *, void *, bool);
513 uint64_t mask;
516 void spapr_exit_nested(PowerPCCPU *cpu, int excp);
517 typedef struct SpaprMachineState SpaprMachineState;
518 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
519 target_ulong lpid, ppc_v3_pate_t *entry);
520 uint8_t spapr_nested_api(SpaprMachineState *spapr);
521 void spapr_nested_gsb_init(void);
522 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
523 target_ulong lpid, ppc_v3_pate_t *entry);
524 #endif /* HW_SPAPR_NESTED_H */