2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2008
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
35 #include <asm/kvm_booke.h>
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
45 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
47 enum emulation_result
{
48 EMULATE_DONE
, /* no further processing */
49 EMULATE_DO_MMIO
, /* kvm_run filled with MMIO request */
50 EMULATE_FAIL
, /* can't emulate this instruction */
51 EMULATE_AGAIN
, /* something went wrong. go again */
52 EMULATE_EXIT_USER
, /* emulation requires exit to user-space */
55 enum instruction_type
{
57 INST_SC
, /* system call */
61 XLATE_INST
, /* translate instruction address */
62 XLATE_DATA
/* translate data address */
65 enum xlate_readwrite
{
66 XLATE_READ
, /* check for read permissions */
67 XLATE_WRITE
/* check for write permissions */
70 extern int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
);
71 extern int __kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
);
72 extern void kvmppc_handler_highmem(void);
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
);
75 extern int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
76 unsigned int rt
, unsigned int bytes
,
77 int is_default_endian
);
78 extern int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
79 unsigned int rt
, unsigned int bytes
,
80 int is_default_endian
);
81 extern int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
82 u64 val
, unsigned int bytes
,
83 int is_default_endian
);
85 extern int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
,
86 enum instruction_type type
, u32
*inst
);
88 extern int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
90 extern int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
92 extern int kvmppc_emulate_instruction(struct kvm_run
*run
,
93 struct kvm_vcpu
*vcpu
);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu
*vcpu
);
95 extern int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu
*vcpu
);
97 extern u32
kvmppc_get_dec(struct kvm_vcpu
*vcpu
, u64 tb
);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
);
99 extern int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
);
103 /* Core-specific hooks */
105 extern void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 gvaddr
, gpa_t gpaddr
,
106 unsigned int gtlb_idx
);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu
*vcpu
, u32 pid
);
109 extern void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
);
110 extern int kvmppc_mmu_init(struct kvm_vcpu
*vcpu
);
111 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
);
112 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
);
113 extern gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int gtlb_index
,
115 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
);
116 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
);
117 extern int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
,
118 enum xlate_instdata xlid
, enum xlate_readwrite xlrw
,
119 struct kvmppc_pte
*pte
);
121 extern struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
,
123 extern void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
);
124 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
);
125 extern int kvmppc_core_check_processor_compat(void);
126 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu
*vcpu
,
127 struct kvm_translation
*tr
);
129 extern void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
130 extern void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
);
132 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
);
133 extern int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
);
134 extern void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
);
135 extern void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
);
136 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
);
137 extern void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
138 struct kvm_interrupt
*irq
);
139 extern void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
);
140 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
, ulong dear_flags
,
142 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
145 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu
*vcpu
);
146 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
148 extern void kvmppc_core_flush_tlb(struct kvm_vcpu
*vcpu
);
149 extern int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
);
151 extern int kvmppc_booke_init(void);
152 extern void kvmppc_booke_exit(void);
154 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu
*vcpu
);
155 extern int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
);
156 extern void kvmppc_map_magic(struct kvm_vcpu
*vcpu
);
158 extern long kvmppc_alloc_hpt(struct kvm
*kvm
, u32
*htab_orderp
);
159 extern long kvmppc_alloc_reset_hpt(struct kvm
*kvm
, u32
*htab_orderp
);
160 extern void kvmppc_free_hpt(struct kvm
*kvm
);
161 extern long kvmppc_prepare_vrma(struct kvm
*kvm
,
162 struct kvm_userspace_memory_region
*mem
);
163 extern void kvmppc_map_vrma(struct kvm_vcpu
*vcpu
,
164 struct kvm_memory_slot
*memslot
, unsigned long porder
);
165 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu
*vcpu
);
167 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm
*kvm
,
168 struct kvm_create_spapr_tce
*args
);
169 extern long kvmppc_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
170 unsigned long ioba
, unsigned long tce
);
171 extern long kvmppc_h_get_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
173 extern struct page
*kvm_alloc_hpt(unsigned long nr_pages
);
174 extern void kvm_release_hpt(struct page
*page
, unsigned long nr_pages
);
175 extern int kvmppc_core_init_vm(struct kvm
*kvm
);
176 extern void kvmppc_core_destroy_vm(struct kvm
*kvm
);
177 extern void kvmppc_core_free_memslot(struct kvm
*kvm
,
178 struct kvm_memory_slot
*free
,
179 struct kvm_memory_slot
*dont
);
180 extern int kvmppc_core_create_memslot(struct kvm
*kvm
,
181 struct kvm_memory_slot
*slot
,
182 unsigned long npages
);
183 extern int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
184 struct kvm_memory_slot
*memslot
,
185 const struct kvm_userspace_memory_region
*mem
);
186 extern void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
187 const struct kvm_userspace_memory_region
*mem
,
188 const struct kvm_memory_slot
*old
,
189 const struct kvm_memory_slot
*new);
190 extern int kvm_vm_ioctl_get_smmu_info(struct kvm
*kvm
,
191 struct kvm_ppc_smmu_info
*info
);
192 extern void kvmppc_core_flush_memslot(struct kvm
*kvm
,
193 struct kvm_memory_slot
*memslot
);
195 extern int kvmppc_bookehv_init(void);
196 extern void kvmppc_bookehv_exit(void);
198 extern int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
);
200 extern int kvm_vm_ioctl_get_htab_fd(struct kvm
*kvm
, struct kvm_get_htab_fd
*);
202 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
);
204 extern int kvm_vm_ioctl_rtas_define_token(struct kvm
*kvm
, void __user
*argp
);
205 extern int kvmppc_rtas_hcall(struct kvm_vcpu
*vcpu
);
206 extern void kvmppc_rtas_tokens_free(struct kvm
*kvm
);
207 extern int kvmppc_xics_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
209 extern int kvmppc_xics_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
211 extern int kvmppc_xics_int_on(struct kvm
*kvm
, u32 irq
);
212 extern int kvmppc_xics_int_off(struct kvm
*kvm
, u32 irq
);
214 void kvmppc_core_dequeue_debug(struct kvm_vcpu
*vcpu
);
215 void kvmppc_core_queue_debug(struct kvm_vcpu
*vcpu
);
217 union kvmppc_one_reg
{
229 struct module
*owner
;
230 int (*get_sregs
)(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
231 int (*set_sregs
)(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
232 int (*get_one_reg
)(struct kvm_vcpu
*vcpu
, u64 id
,
233 union kvmppc_one_reg
*val
);
234 int (*set_one_reg
)(struct kvm_vcpu
*vcpu
, u64 id
,
235 union kvmppc_one_reg
*val
);
236 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
237 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
238 void (*set_msr
)(struct kvm_vcpu
*vcpu
, u64 msr
);
239 int (*vcpu_run
)(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
240 struct kvm_vcpu
*(*vcpu_create
)(struct kvm
*kvm
, unsigned int id
);
241 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
242 int (*check_requests
)(struct kvm_vcpu
*vcpu
);
243 int (*get_dirty_log
)(struct kvm
*kvm
, struct kvm_dirty_log
*log
);
244 void (*flush_memslot
)(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
);
245 int (*prepare_memory_region
)(struct kvm
*kvm
,
246 struct kvm_memory_slot
*memslot
,
247 const struct kvm_userspace_memory_region
*mem
);
248 void (*commit_memory_region
)(struct kvm
*kvm
,
249 const struct kvm_userspace_memory_region
*mem
,
250 const struct kvm_memory_slot
*old
,
251 const struct kvm_memory_slot
*new);
252 int (*unmap_hva
)(struct kvm
*kvm
, unsigned long hva
);
253 int (*unmap_hva_range
)(struct kvm
*kvm
, unsigned long start
,
255 int (*age_hva
)(struct kvm
*kvm
, unsigned long start
, unsigned long end
);
256 int (*test_age_hva
)(struct kvm
*kvm
, unsigned long hva
);
257 void (*set_spte_hva
)(struct kvm
*kvm
, unsigned long hva
, pte_t pte
);
258 void (*mmu_destroy
)(struct kvm_vcpu
*vcpu
);
259 void (*free_memslot
)(struct kvm_memory_slot
*free
,
260 struct kvm_memory_slot
*dont
);
261 int (*create_memslot
)(struct kvm_memory_slot
*slot
,
262 unsigned long npages
);
263 int (*init_vm
)(struct kvm
*kvm
);
264 void (*destroy_vm
)(struct kvm
*kvm
);
265 int (*get_smmu_info
)(struct kvm
*kvm
, struct kvm_ppc_smmu_info
*info
);
266 int (*emulate_op
)(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
267 unsigned int inst
, int *advance
);
268 int (*emulate_mtspr
)(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
);
269 int (*emulate_mfspr
)(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
);
270 void (*fast_vcpu_kick
)(struct kvm_vcpu
*vcpu
);
271 long (*arch_vm_ioctl
)(struct file
*filp
, unsigned int ioctl
,
273 int (*hcall_implemented
)(unsigned long hcall
);
276 extern struct kvmppc_ops
*kvmppc_hv_ops
;
277 extern struct kvmppc_ops
*kvmppc_pr_ops
;
279 static inline int kvmppc_get_last_inst(struct kvm_vcpu
*vcpu
,
280 enum instruction_type type
, u32
*inst
)
282 int ret
= EMULATE_DONE
;
285 /* Load the instruction manually if it failed to do so in the
287 if (vcpu
->arch
.last_inst
== KVM_INST_FETCH_FAILED
)
288 ret
= kvmppc_load_last_inst(vcpu
, type
, &vcpu
->arch
.last_inst
);
290 /* Write fetch_failed unswapped if the fetch failed */
291 if (ret
== EMULATE_DONE
)
292 fetched_inst
= kvmppc_need_byteswap(vcpu
) ?
293 swab32(vcpu
->arch
.last_inst
) :
294 vcpu
->arch
.last_inst
;
296 fetched_inst
= vcpu
->arch
.last_inst
;
298 *inst
= fetched_inst
;
302 static inline bool is_kvmppc_hv_enabled(struct kvm
*kvm
)
304 return kvm
->arch
.kvm_ops
== kvmppc_hv_ops
;
307 extern int kvmppc_hwrng_present(void);
310 * Cuts out inst bits with ordering according to spec.
311 * That means the leftmost bit is zero. All given bits are included.
313 static inline u32
kvmppc_get_field(u64 inst
, int msb
, int lsb
)
320 mask
= (1 << (lsb
- msb
+ 1)) - 1;
321 r
= (inst
>> (63 - lsb
)) & mask
;
327 * Replaces inst bits with ordering according to spec.
329 static inline u32
kvmppc_set_field(u64 inst
, int msb
, int lsb
, int value
)
336 mask
= ((1 << (lsb
- msb
+ 1)) - 1) << (63 - lsb
);
337 r
= (inst
& ~mask
) | ((value
<< (63 - lsb
)) & mask
);
342 #define one_reg_size(id) \
343 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
345 #define get_reg_val(id, reg) ({ \
346 union kvmppc_one_reg __u; \
347 switch (one_reg_size(id)) { \
348 case 4: __u.wval = (reg); break; \
349 case 8: __u.dval = (reg); break; \
356 #define set_reg_val(id, val) ({ \
358 switch (one_reg_size(id)) { \
359 case 4: __v = (val).wval; break; \
360 case 8: __v = (val).dval; break; \
366 int kvmppc_core_get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
367 int kvmppc_core_set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
369 int kvmppc_get_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
370 int kvmppc_set_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
372 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
);
373 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
);
374 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*);
375 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*);
377 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
);
381 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
382 extern void kvm_cma_reserve(void) __init
;
383 static inline void kvmppc_set_xics_phys(int cpu
, unsigned long addr
)
385 paca
[cpu
].kvm_hstate
.xics_phys
= addr
;
388 static inline u32
kvmppc_get_xics_latch(void)
392 xirr
= get_paca()->kvm_hstate
.saved_xirr
;
393 get_paca()->kvm_hstate
.saved_xirr
= 0;
397 static inline void kvmppc_set_host_ipi(int cpu
, u8 host_ipi
)
399 paca
[cpu
].kvm_hstate
.host_ipi
= host_ipi
;
402 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu
*vcpu
)
404 vcpu
->kvm
->arch
.kvm_ops
->fast_vcpu_kick(vcpu
);
407 extern void kvm_hv_vm_activated(void);
408 extern void kvm_hv_vm_deactivated(void);
409 extern bool kvm_hv_mode_active(void);
412 static inline void __init
kvm_cma_reserve(void)
415 static inline void kvmppc_set_xics_phys(int cpu
, unsigned long addr
)
418 static inline u32
kvmppc_get_xics_latch(void)
423 static inline void kvmppc_set_host_ipi(int cpu
, u8 host_ipi
)
426 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu
*vcpu
)
431 static inline bool kvm_hv_mode_active(void) { return false; }
435 #ifdef CONFIG_KVM_XICS
436 static inline int kvmppc_xics_enabled(struct kvm_vcpu
*vcpu
)
438 return vcpu
->arch
.irq_type
== KVMPPC_IRQ_XICS
;
440 extern void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
);
441 extern int kvmppc_xics_create_icp(struct kvm_vcpu
*vcpu
, unsigned long server
);
442 extern int kvm_vm_ioctl_xics_irq(struct kvm
*kvm
, struct kvm_irq_level
*args
);
443 extern int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 cmd
);
444 extern u64
kvmppc_xics_get_icp(struct kvm_vcpu
*vcpu
);
445 extern int kvmppc_xics_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
);
446 extern int kvmppc_xics_connect_vcpu(struct kvm_device
*dev
,
447 struct kvm_vcpu
*vcpu
, u32 cpu
);
449 static inline int kvmppc_xics_enabled(struct kvm_vcpu
*vcpu
)
451 static inline void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
) { }
452 static inline int kvmppc_xics_create_icp(struct kvm_vcpu
*vcpu
,
453 unsigned long server
)
455 static inline int kvm_vm_ioctl_xics_irq(struct kvm
*kvm
,
456 struct kvm_irq_level
*args
)
458 static inline int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 cmd
)
462 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu
*vcpu
)
464 #ifdef CONFIG_KVM_BOOKE_HV
465 return mfspr(SPRN_GEPR
);
466 #elif defined(CONFIG_BOOKE)
467 return vcpu
->arch
.epr
;
473 static inline void kvmppc_set_epr(struct kvm_vcpu
*vcpu
, u32 epr
)
475 #ifdef CONFIG_KVM_BOOKE_HV
476 mtspr(SPRN_GEPR
, epr
);
477 #elif defined(CONFIG_BOOKE)
478 vcpu
->arch
.epr
= epr
;
482 #ifdef CONFIG_KVM_MPIC
484 void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
);
485 int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
487 void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
, struct kvm_vcpu
*vcpu
);
491 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
)
495 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
,
496 struct kvm_vcpu
*vcpu
, u32 cpu
)
501 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
,
502 struct kvm_vcpu
*vcpu
)
506 #endif /* CONFIG_KVM_MPIC */
508 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu
*vcpu
,
509 struct kvm_config_tlb
*cfg
);
510 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu
*vcpu
,
511 struct kvm_dirty_tlb
*cfg
);
513 long kvmppc_alloc_lpid(void);
514 void kvmppc_claim_lpid(long lpid
);
515 void kvmppc_free_lpid(long lpid
);
516 void kvmppc_init_lpid(unsigned long nr_lpids
);
518 static inline void kvmppc_mmu_flush_icache(pfn_t pfn
)
522 * We can only access pages that the kernel maps
523 * as memory. Bail out for unmapped ones.
528 /* Clear i-cache for new pages */
529 page
= pfn_to_page(pfn
);
530 if (!test_bit(PG_arch_1
, &page
->flags
)) {
531 flush_dcache_icache_page(page
);
532 set_bit(PG_arch_1
, &page
->flags
);
537 * Shared struct helpers. The shared struct can be little or big endian,
538 * depending on the guest endianness. So expose helpers to all of them.
540 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu
*vcpu
)
542 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
543 /* Only Book3S_64 PR supports bi-endian for now */
544 return vcpu
->arch
.shared_big_endian
;
545 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
546 /* Book3s_64 HV on little endian is always little endian */
553 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
554 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
556 return mfspr(bookehv_spr); \
559 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
560 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
562 mtspr(bookehv_spr, val); \
565 #define SHARED_WRAPPER_GET(reg, size) \
566 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
568 if (kvmppc_shared_big_endian(vcpu)) \
569 return be##size##_to_cpu(vcpu->arch.shared->reg); \
571 return le##size##_to_cpu(vcpu->arch.shared->reg); \
574 #define SHARED_WRAPPER_SET(reg, size) \
575 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
577 if (kvmppc_shared_big_endian(vcpu)) \
578 vcpu->arch.shared->reg = cpu_to_be##size(val); \
580 vcpu->arch.shared->reg = cpu_to_le##size(val); \
583 #define SHARED_WRAPPER(reg, size) \
584 SHARED_WRAPPER_GET(reg, size) \
585 SHARED_WRAPPER_SET(reg, size) \
587 #define SPRNG_WRAPPER(reg, bookehv_spr) \
588 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
589 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
591 #ifdef CONFIG_KVM_BOOKE_HV
593 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
594 SPRNG_WRAPPER(reg, bookehv_spr) \
598 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
599 SHARED_WRAPPER(reg, size) \
603 SHARED_WRAPPER(critical
, 64)
604 SHARED_SPRNG_WRAPPER(sprg0
, 64, SPRN_GSPRG0
)
605 SHARED_SPRNG_WRAPPER(sprg1
, 64, SPRN_GSPRG1
)
606 SHARED_SPRNG_WRAPPER(sprg2
, 64, SPRN_GSPRG2
)
607 SHARED_SPRNG_WRAPPER(sprg3
, 64, SPRN_GSPRG3
)
608 SHARED_SPRNG_WRAPPER(srr0
, 64, SPRN_GSRR0
)
609 SHARED_SPRNG_WRAPPER(srr1
, 64, SPRN_GSRR1
)
610 SHARED_SPRNG_WRAPPER(dar
, 64, SPRN_GDEAR
)
611 SHARED_SPRNG_WRAPPER(esr
, 64, SPRN_GESR
)
612 SHARED_WRAPPER_GET(msr
, 64)
613 static inline void kvmppc_set_msr_fast(struct kvm_vcpu
*vcpu
, u64 val
)
615 if (kvmppc_shared_big_endian(vcpu
))
616 vcpu
->arch
.shared
->msr
= cpu_to_be64(val
);
618 vcpu
->arch
.shared
->msr
= cpu_to_le64(val
);
620 SHARED_WRAPPER(dsisr
, 32)
621 SHARED_WRAPPER(int_pending
, 32)
622 SHARED_WRAPPER(sprg4
, 64)
623 SHARED_WRAPPER(sprg5
, 64)
624 SHARED_WRAPPER(sprg6
, 64)
625 SHARED_WRAPPER(sprg7
, 64)
627 static inline u32
kvmppc_get_sr(struct kvm_vcpu
*vcpu
, int nr
)
629 if (kvmppc_shared_big_endian(vcpu
))
630 return be32_to_cpu(vcpu
->arch
.shared
->sr
[nr
]);
632 return le32_to_cpu(vcpu
->arch
.shared
->sr
[nr
]);
635 static inline void kvmppc_set_sr(struct kvm_vcpu
*vcpu
, int nr
, u32 val
)
637 if (kvmppc_shared_big_endian(vcpu
))
638 vcpu
->arch
.shared
->sr
[nr
] = cpu_to_be32(val
);
640 vcpu
->arch
.shared
->sr
[nr
] = cpu_to_le32(val
);
644 * Please call after prepare_to_enter. This function puts the lazy ee and irq
645 * disabled tracking state back to normal mode, without actually enabling
648 static inline void kvmppc_fix_ee_before_entry(void)
654 * To avoid races, the caller must have gone directly from having
655 * interrupts fully-enabled to hard-disabled.
657 WARN_ON(local_paca
->irq_happened
!= PACA_IRQ_HARD_DIS
);
659 /* Only need to enable IRQs by hard enabling them after this */
660 local_paca
->irq_happened
= 0;
661 local_paca
->soft_enabled
= 1;
665 static inline ulong
kvmppc_get_ea_indexed(struct kvm_vcpu
*vcpu
, int ra
, int rb
)
670 ea
= kvmppc_get_gpr(vcpu
, rb
);
672 ea
+= kvmppc_get_gpr(vcpu
, ra
);
674 #if defined(CONFIG_PPC_BOOK3E_64)
676 #elif defined(CONFIG_PPC_BOOK3S_64)
680 if (!(kvmppc_get_msr(vcpu
) & msr_64bit
))
686 extern void xics_wake_cpu(int cpu
);
688 #endif /* __POWERPC_KVM_PPC_H__ */