1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright SUSE Linux Products GmbH 2009
6 * Authors: Alexander Graf <agraf@suse.de>
9 #ifndef __ASM_KVM_BOOK3S_H__
10 #define __ASM_KVM_BOOK3S_H__
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_book3s_asm.h>
27 struct kvmppc_sid_map
{
34 #define SID_MAP_BITS 9
35 #define SID_MAP_NUM (1 << SID_MAP_BITS)
36 #define SID_MAP_MASK (SID_MAP_NUM - 1)
38 #ifdef CONFIG_PPC_BOOK3S_64
39 #define SID_CONTEXTS 1
41 #define SID_CONTEXTS 128
42 #define VSID_POOL_SIZE (SID_CONTEXTS * 16)
46 struct hlist_node list_pte
;
47 struct hlist_node list_pte_long
;
48 struct hlist_node list_vpte
;
49 struct hlist_node list_vpte_long
;
50 #ifdef CONFIG_PPC_BOOK3S_64
51 struct hlist_node list_vpte_64k
;
53 struct rcu_head rcu_head
;
57 struct kvmppc_pte pte
;
62 * Struct for a virtual core.
63 * Note: entry_exit_map combines a bitmap of threads that have entered
64 * in the bottom 8 bits and a bitmap of threads that have exited in the
65 * next 8 bits. This is so that we can atomically set the entry bit
66 * iff the exit map is 0 without taking a lock.
78 struct kvm_vcpu
*runnable_threads
[MAX_SMT_THREADS
];
79 struct list_head preempt_list
;
81 struct swait_queue_head wq
;
82 spinlock_t stoltb_lock
; /* protects stolen_tb and preempt_tb */
85 struct kvm_vcpu
*runner
;
87 u64 tb_offset
; /* guest timebase - host timebase */
88 u64 tb_offset_applied
; /* timebase offset currently in force */
92 ulong dpdes
; /* doorbell state (POWER8) */
93 ulong vtb
; /* virtual timebase */
94 ulong conferring_threads
;
95 unsigned int halt_poll_ns
;
96 atomic_t online_count
;
99 struct kvmppc_vcpu_book3s
{
100 struct kvmppc_sid_map sid_map
[SID_MAP_NUM
];
106 struct kvmppc_bat ibat
[8];
107 struct kvmppc_bat dbat
[8];
114 #ifdef CONFIG_PPC_BOOK3S_32
115 u32 vsid_pool
[VSID_POOL_SIZE
];
118 u64 proto_vsid_first
;
122 int context_id
[SID_CONTEXTS
];
124 bool hior_explicit
; /* HIOR is set by ioctl, not PVR */
126 struct hlist_head hpte_hash_pte
[HPTEG_HASH_NUM_PTE
];
127 struct hlist_head hpte_hash_pte_long
[HPTEG_HASH_NUM_PTE_LONG
];
128 struct hlist_head hpte_hash_vpte
[HPTEG_HASH_NUM_VPTE
];
129 struct hlist_head hpte_hash_vpte_long
[HPTEG_HASH_NUM_VPTE_LONG
];
130 #ifdef CONFIG_PPC_BOOK3S_64
131 struct hlist_head hpte_hash_vpte_64k
[HPTEG_HASH_NUM_VPTE_64K
];
133 int hpte_cache_count
;
137 #define VSID_REAL 0x07ffffffffc00000ULL
138 #define VSID_BAT 0x07ffffffffb00000ULL
139 #define VSID_64K 0x0800000000000000ULL
140 #define VSID_1T 0x1000000000000000ULL
141 #define VSID_REAL_DR 0x2000000000000000ULL
142 #define VSID_REAL_IR 0x4000000000000000ULL
143 #define VSID_PR 0x8000000000000000ULL
145 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu
*vcpu
, ulong ea
, ulong ea_mask
);
146 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu
*vcpu
, u64 vp
, u64 vp_mask
);
147 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu
*vcpu
, ulong pa_start
, ulong pa_end
);
148 extern void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 new_msr
);
149 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu
*vcpu
);
150 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu
*vcpu
);
151 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu
*vcpu
);
152 extern int kvmppc_mmu_map_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
,
154 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
);
155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu
*vcpu
, ulong eaddr
);
156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu
*vcpu
, ulong eaddr
, ulong seg_size
);
157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu
*vcpu
);
158 extern int kvmppc_book3s_hv_page_fault(struct kvm_run
*run
,
159 struct kvm_vcpu
*vcpu
, unsigned long addr
,
160 unsigned long status
);
161 extern long kvmppc_hv_find_lock_hpte(struct kvm
*kvm
, gva_t eaddr
,
162 unsigned long slb_v
, unsigned long valid
);
163 extern int kvmppc_hv_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
164 unsigned long gpa
, gva_t ea
, int is_store
);
166 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
);
167 extern struct hpte_cache
*kvmppc_mmu_hpte_cache_next(struct kvm_vcpu
*vcpu
);
168 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache
*pte
);
169 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu
*vcpu
);
170 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu
*vcpu
);
171 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
);
172 extern int kvmppc_mmu_hpte_sysinit(void);
173 extern void kvmppc_mmu_hpte_sysexit(void);
174 extern int kvmppc_mmu_hv_init(void);
175 extern int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hc
);
177 extern int kvmppc_book3s_radix_page_fault(struct kvm_run
*run
,
178 struct kvm_vcpu
*vcpu
,
179 unsigned long ea
, unsigned long dsisr
);
180 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid
, int pid
,
181 gva_t eaddr
, void *to
, void *from
,
183 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
184 void *to
, unsigned long n
);
185 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
186 void *from
, unsigned long n
);
187 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
188 struct kvmppc_pte
*gpte
, u64 root
,
190 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
191 struct kvmppc_pte
*gpte
, u64 table
,
192 int table_index
, u64
*pte_ret_p
);
193 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
194 struct kvmppc_pte
*gpte
, bool data
, bool iswrite
);
195 extern void kvmppc_radix_tlbie_page(struct kvm
*kvm
, unsigned long addr
,
196 unsigned int pshift
, unsigned int lpid
);
197 extern void kvmppc_unmap_pte(struct kvm
*kvm
, pte_t
*pte
, unsigned long gpa
,
199 const struct kvm_memory_slot
*memslot
,
201 extern bool kvmppc_hv_handle_set_rc(struct kvm
*kvm
, pgd_t
*pgtable
,
202 bool writing
, unsigned long gpa
,
204 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu
*vcpu
,
206 struct kvm_memory_slot
*memslot
,
207 bool writing
, bool kvm_ro
,
208 pte_t
*inserted_pte
, unsigned int *levelp
);
209 extern int kvmppc_init_vm_radix(struct kvm
*kvm
);
210 extern void kvmppc_free_radix(struct kvm
*kvm
);
211 extern void kvmppc_free_pgtable_radix(struct kvm
*kvm
, pgd_t
*pgd
,
213 extern int kvmppc_radix_init(void);
214 extern void kvmppc_radix_exit(void);
215 extern int kvm_unmap_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
217 extern int kvm_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
219 extern int kvm_test_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
221 extern long kvmppc_hv_get_dirty_log_radix(struct kvm
*kvm
,
222 struct kvm_memory_slot
*memslot
, unsigned long *map
);
223 extern void kvmppc_radix_flush_memslot(struct kvm
*kvm
,
224 const struct kvm_memory_slot
*memslot
);
225 extern int kvmhv_get_rmmu_info(struct kvm
*kvm
, struct kvm_ppc_rmmu_info
*info
);
227 /* XXX remove this export when load_last_inst() is generic */
228 extern int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
, bool data
);
229 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
);
230 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
232 extern void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
);
233 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu
*vcpu
, ulong fac
);
234 extern void kvmppc_set_bat(struct kvm_vcpu
*vcpu
, struct kvmppc_bat
*bat
,
235 bool upper
, u32 val
);
236 extern void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
);
237 extern int kvmppc_emulate_paired_single(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
238 extern kvm_pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
239 bool writing
, bool *writable
);
240 extern void kvmppc_add_revmap_chain(struct kvm
*kvm
, struct revmap_entry
*rev
,
241 unsigned long *rmap
, long pte_index
, int realmode
);
242 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot
*memslot
,
243 unsigned long gfn
, unsigned long psize
);
244 extern void kvmppc_invalidate_hpte(struct kvm
*kvm
, __be64
*hptep
,
245 unsigned long pte_index
);
246 void kvmppc_clear_ref_hpte(struct kvm
*kvm
, __be64
*hptep
,
247 unsigned long pte_index
);
248 extern void *kvmppc_pin_guest_page(struct kvm
*kvm
, unsigned long addr
,
249 unsigned long *nb_ret
);
250 extern void kvmppc_unpin_guest_page(struct kvm
*kvm
, void *addr
,
251 unsigned long gpa
, bool dirty
);
252 extern long kvmppc_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
253 long pte_index
, unsigned long pteh
, unsigned long ptel
,
254 pgd_t
*pgdir
, bool realmode
, unsigned long *idx_ret
);
255 extern long kvmppc_do_h_remove(struct kvm
*kvm
, unsigned long flags
,
256 unsigned long pte_index
, unsigned long avpn
,
257 unsigned long *hpret
);
258 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm
*kvm
,
259 struct kvm_memory_slot
*memslot
, unsigned long *map
);
260 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa
*vpa
,
261 struct kvm_memory_slot
*memslot
,
263 extern void kvmppc_update_lpcr(struct kvm
*kvm
, unsigned long lpcr
,
265 extern void kvmppc_set_fscr(struct kvm_vcpu
*vcpu
, u64 fscr
);
267 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu
*vcpu
);
268 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu
*vcpu
);
269 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu
*vcpu
);
271 extern void kvmppc_entry_trampoline(void);
272 extern void kvmppc_hv_entry_trampoline(void);
273 extern u32
kvmppc_alignment_dsisr(struct kvm_vcpu
*vcpu
, unsigned int inst
);
274 extern ulong
kvmppc_alignment_dar(struct kvm_vcpu
*vcpu
, unsigned int inst
);
275 extern int kvmppc_h_pr(struct kvm_vcpu
*vcpu
, unsigned long cmd
);
276 extern void kvmppc_pr_init_default_hcalls(struct kvm
*kvm
);
277 extern int kvmppc_hcall_impl_pr(unsigned long cmd
);
278 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd
);
279 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu
*vcpu
);
280 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu
*vcpu
);
282 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
283 void kvmppc_save_tm_pr(struct kvm_vcpu
*vcpu
);
284 void kvmppc_restore_tm_pr(struct kvm_vcpu
*vcpu
);
285 void kvmppc_save_tm_sprs(struct kvm_vcpu
*vcpu
);
286 void kvmppc_restore_tm_sprs(struct kvm_vcpu
*vcpu
);
288 static inline void kvmppc_save_tm_pr(struct kvm_vcpu
*vcpu
) {}
289 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu
*vcpu
) {}
290 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu
*vcpu
) {}
291 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu
*vcpu
) {}
294 long kvmhv_nested_init(void);
295 void kvmhv_nested_exit(void);
296 void kvmhv_vm_nested_init(struct kvm
*kvm
);
297 long kvmhv_set_partition_table(struct kvm_vcpu
*vcpu
);
298 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu
*vcpu
);
299 void kvmhv_set_ptbl_entry(unsigned int lpid
, u64 dw0
, u64 dw1
);
300 void kvmhv_release_all_nested(struct kvm
*kvm
);
301 long kvmhv_enter_nested_guest(struct kvm_vcpu
*vcpu
);
302 long kvmhv_do_nested_tlbie(struct kvm_vcpu
*vcpu
);
303 int kvmhv_run_single_vcpu(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
,
304 u64 time_limit
, unsigned long lpcr
);
305 void kvmhv_save_hv_regs(struct kvm_vcpu
*vcpu
, struct hv_guest_state
*hr
);
306 void kvmhv_restore_hv_return_state(struct kvm_vcpu
*vcpu
,
307 struct hv_guest_state
*hr
);
308 long int kvmhv_nested_page_fault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
310 void kvmppc_giveup_fac(struct kvm_vcpu
*vcpu
, ulong fac
);
312 extern int kvm_irq_bypass
;
314 static inline struct kvmppc_vcpu_book3s
*to_book3s(struct kvm_vcpu
*vcpu
)
316 return vcpu
->arch
.book3s
;
319 /* Also add subarch specific defines */
321 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
322 #include <asm/kvm_book3s_32.h>
324 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
325 #include <asm/kvm_book3s_64.h>
328 static inline void kvmppc_set_gpr(struct kvm_vcpu
*vcpu
, int num
, ulong val
)
330 vcpu
->arch
.regs
.gpr
[num
] = val
;
333 static inline ulong
kvmppc_get_gpr(struct kvm_vcpu
*vcpu
, int num
)
335 return vcpu
->arch
.regs
.gpr
[num
];
338 static inline void kvmppc_set_cr(struct kvm_vcpu
*vcpu
, u32 val
)
340 vcpu
->arch
.regs
.ccr
= val
;
343 static inline u32
kvmppc_get_cr(struct kvm_vcpu
*vcpu
)
345 return vcpu
->arch
.regs
.ccr
;
348 static inline void kvmppc_set_xer(struct kvm_vcpu
*vcpu
, ulong val
)
350 vcpu
->arch
.regs
.xer
= val
;
353 static inline ulong
kvmppc_get_xer(struct kvm_vcpu
*vcpu
)
355 return vcpu
->arch
.regs
.xer
;
358 static inline void kvmppc_set_ctr(struct kvm_vcpu
*vcpu
, ulong val
)
360 vcpu
->arch
.regs
.ctr
= val
;
363 static inline ulong
kvmppc_get_ctr(struct kvm_vcpu
*vcpu
)
365 return vcpu
->arch
.regs
.ctr
;
368 static inline void kvmppc_set_lr(struct kvm_vcpu
*vcpu
, ulong val
)
370 vcpu
->arch
.regs
.link
= val
;
373 static inline ulong
kvmppc_get_lr(struct kvm_vcpu
*vcpu
)
375 return vcpu
->arch
.regs
.link
;
378 static inline void kvmppc_set_pc(struct kvm_vcpu
*vcpu
, ulong val
)
380 vcpu
->arch
.regs
.nip
= val
;
383 static inline ulong
kvmppc_get_pc(struct kvm_vcpu
*vcpu
)
385 return vcpu
->arch
.regs
.nip
;
388 static inline u64
kvmppc_get_msr(struct kvm_vcpu
*vcpu
);
389 static inline bool kvmppc_need_byteswap(struct kvm_vcpu
*vcpu
)
391 return (kvmppc_get_msr(vcpu
) & MSR_LE
) != (MSR_KERNEL
& MSR_LE
);
394 static inline ulong
kvmppc_get_fault_dar(struct kvm_vcpu
*vcpu
)
396 return vcpu
->arch
.fault_dar
;
399 static inline bool is_kvmppc_resume_guest(int r
)
401 return (r
== RESUME_GUEST
|| r
== RESUME_GUEST_NV
);
404 static inline bool is_kvmppc_hv_enabled(struct kvm
*kvm
);
405 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu
*vcpu
)
407 /* Only PR KVM supports the magic page */
408 return !is_kvmppc_hv_enabled(vcpu
->kvm
);
411 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu
*vcpu
);
412 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu
*vcpu
);
414 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
415 * instruction for the OSI hypercalls */
416 #define OSI_SC_MAGIC_R3 0x113724FA
417 #define OSI_SC_MAGIC_R4 0x77810F9B
419 #define INS_DCBZ 0x7c0007ec
420 /* TO = 31 for unconditional trap */
421 #define INS_TW 0x7fe00008
423 #define SPLIT_HACK_MASK 0xff000000
424 #define SPLIT_HACK_OFFS 0xfb000000
427 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
428 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
429 * (but not its actual threading mode, which is not available) to avoid
432 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
433 * 0) unchanged: if the guest is filling each VCORE completely then it will be
434 * using consecutive IDs and it will fill the space without any packing.
436 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
437 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
438 * added to avoid collisions.
440 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
441 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
442 * can be safely packed into the second half of each VCORE by adding an offset
445 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
446 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
447 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
449 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
450 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
451 * must be free to use.
453 * (The offsets for each block are stored in block_offsets[], indexed by the
454 * block number if the stride is 8. For cases where the guest's stride is less
455 * than 8, we can re-use the block_offsets array by multiplying the block
456 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
458 static inline u32
kvmppc_pack_vcpu_id(struct kvm
*kvm
, u32 id
)
460 const int block_offsets
[MAX_SMT_THREADS
] = {0, 4, 2, 6, 1, 5, 3, 7};
461 int stride
= kvm
->arch
.emul_smt_mode
;
462 int block
= (id
/ KVM_MAX_VCPUS
) * (MAX_SMT_THREADS
/ stride
);
465 if (WARN_ONCE(block
>= MAX_SMT_THREADS
, "VCPU ID too large to pack"))
467 packed_id
= (id
% KVM_MAX_VCPUS
) + block_offsets
[block
];
468 if (WARN_ONCE(packed_id
>= KVM_MAX_VCPUS
, "VCPU ID packing failed"))
473 #endif /* __ASM_KVM_BOOK3S_H__ */