2 * definition for kvm on s390
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #ifndef ARCH_S390_KVM_S390_H
16 #define ARCH_S390_KVM_S390_H
18 #include <linux/hrtimer.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <asm/facility.h>
23 typedef int (*intercept_handler_t
)(struct kvm_vcpu
*vcpu
);
25 /* Transactional Memory Execution related macros */
26 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
28 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
30 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
32 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
36 #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
38 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
39 "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
40 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
44 static inline int is_vcpu_stopped(struct kvm_vcpu
*vcpu
)
46 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_STOPPED
;
49 static inline int kvm_is_ucontrol(struct kvm
*kvm
)
51 #ifdef CONFIG_KVM_S390_UCONTROL
60 #define GUEST_PREFIX_SHIFT 13
61 static inline u32
kvm_s390_get_prefix(struct kvm_vcpu
*vcpu
)
63 return vcpu
->arch
.sie_block
->prefix
<< GUEST_PREFIX_SHIFT
;
66 static inline void kvm_s390_set_prefix(struct kvm_vcpu
*vcpu
, u32 prefix
)
68 vcpu
->arch
.sie_block
->prefix
= prefix
>> GUEST_PREFIX_SHIFT
;
69 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
70 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
73 typedef u8 __bitwise ar_t
;
75 static inline u64
kvm_s390_get_base_disp_s(struct kvm_vcpu
*vcpu
, ar_t
*ar
)
77 u32 base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
78 u32 disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16);
83 return (base2
? vcpu
->run
->s
.regs
.gprs
[base2
] : 0) + disp2
;
86 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu
*vcpu
,
87 u64
*address1
, u64
*address2
,
88 ar_t
*ar_b1
, ar_t
*ar_b2
)
90 u32 base1
= (vcpu
->arch
.sie_block
->ipb
& 0xf0000000) >> 28;
91 u32 disp1
= (vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16;
92 u32 base2
= (vcpu
->arch
.sie_block
->ipb
& 0xf000) >> 12;
93 u32 disp2
= vcpu
->arch
.sie_block
->ipb
& 0x0fff;
95 *address1
= (base1
? vcpu
->run
->s
.regs
.gprs
[base1
] : 0) + disp1
;
96 *address2
= (base2
? vcpu
->run
->s
.regs
.gprs
[base2
] : 0) + disp2
;
104 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu
*vcpu
, int *r1
, int *r2
)
107 *r1
= (vcpu
->arch
.sie_block
->ipb
& 0x00f00000) >> 20;
109 *r2
= (vcpu
->arch
.sie_block
->ipb
& 0x000f0000) >> 16;
112 static inline u64
kvm_s390_get_base_disp_rsy(struct kvm_vcpu
*vcpu
, ar_t
*ar
)
114 u32 base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
115 u32 disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16) +
116 ((vcpu
->arch
.sie_block
->ipb
& 0xff00) << 4);
117 /* The displacement is a 20bit _SIGNED_ value */
124 return (base2
? vcpu
->run
->s
.regs
.gprs
[base2
] : 0) + (long)(int)disp2
;
127 static inline u64
kvm_s390_get_base_disp_rs(struct kvm_vcpu
*vcpu
, ar_t
*ar
)
129 u32 base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
130 u32 disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16);
135 return (base2
? vcpu
->run
->s
.regs
.gprs
[base2
] : 0) + disp2
;
138 /* Set the condition code in the guest program status word */
139 static inline void kvm_s390_set_psw_cc(struct kvm_vcpu
*vcpu
, unsigned long cc
)
141 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3UL << 44);
142 vcpu
->arch
.sie_block
->gpsw
.mask
|= cc
<< 44;
145 /* test availability of facility in a kvm instance */
146 static inline int test_kvm_facility(struct kvm
*kvm
, unsigned long nr
)
148 return __test_facility(nr
, kvm
->arch
.model
.fac
->mask
) &&
149 __test_facility(nr
, kvm
->arch
.model
.fac
->list
);
152 static inline int set_kvm_facility(u64
*fac_list
, unsigned long nr
)
156 if (nr
>= MAX_FACILITY_BIT
)
158 ptr
= (unsigned char *) fac_list
+ (nr
>> 3);
159 *ptr
|= (0x80UL
>> (nr
& 7));
163 /* are cpu states controlled by user space */
164 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm
*kvm
)
166 return kvm
->arch
.user_cpu_state_ctrl
!= 0;
169 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
);
170 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
);
171 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
);
172 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
);
173 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
);
174 void kvm_s390_clear_float_irqs(struct kvm
*kvm
);
175 int __must_check
kvm_s390_inject_vm(struct kvm
*kvm
,
176 struct kvm_s390_interrupt
*s390int
);
177 int __must_check
kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
178 struct kvm_s390_irq
*irq
);
179 int __must_check
kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
);
180 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
181 u64 isc_mask
, u32 schid
);
182 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
183 struct kvm_s390_interrupt_info
*inti
);
184 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
);
186 /* implemented in intercept.c */
187 void kvm_s390_rewind_psw(struct kvm_vcpu
*vcpu
, int ilc
);
188 int kvm_handle_sie_intercept(struct kvm_vcpu
*vcpu
);
190 /* implemented in priv.c */
191 int is_valid_psw(psw_t
*psw
);
192 int kvm_s390_handle_b2(struct kvm_vcpu
*vcpu
);
193 int kvm_s390_handle_e5(struct kvm_vcpu
*vcpu
);
194 int kvm_s390_handle_01(struct kvm_vcpu
*vcpu
);
195 int kvm_s390_handle_b9(struct kvm_vcpu
*vcpu
);
196 int kvm_s390_handle_lpsw(struct kvm_vcpu
*vcpu
);
197 int kvm_s390_handle_stctl(struct kvm_vcpu
*vcpu
);
198 int kvm_s390_handle_lctl(struct kvm_vcpu
*vcpu
);
199 int kvm_s390_handle_eb(struct kvm_vcpu
*vcpu
);
201 /* implemented in sigp.c */
202 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
);
203 int kvm_s390_handle_sigp_pei(struct kvm_vcpu
*vcpu
);
205 /* implemented in kvm-s390.c */
206 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
);
207 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long addr
);
208 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu
*vcpu
,
210 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
);
211 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu
*vcpu
, unsigned long addr
);
212 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
);
213 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
);
214 void kvm_s390_vcpu_block(struct kvm_vcpu
*vcpu
);
215 void kvm_s390_vcpu_unblock(struct kvm_vcpu
*vcpu
);
216 void exit_sie(struct kvm_vcpu
*vcpu
);
217 void kvm_s390_sync_request(int req
, struct kvm_vcpu
*vcpu
);
218 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
);
219 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
);
220 /* is cmma enabled */
221 bool kvm_s390_cmma_enabled(struct kvm
*kvm
);
222 unsigned long kvm_s390_fac_list_mask_size(void);
223 extern unsigned long kvm_s390_fac_list_mask
[];
225 /* implemented in diag.c */
226 int kvm_s390_handle_diag(struct kvm_vcpu
*vcpu
);
227 /* implemented in interrupt.c */
228 int kvm_s390_inject_prog_irq(struct kvm_vcpu
*vcpu
,
229 struct kvm_s390_pgm_info
*pgm_info
);
231 static inline void kvm_s390_vcpu_block_all(struct kvm
*kvm
)
234 struct kvm_vcpu
*vcpu
;
236 WARN_ON(!mutex_is_locked(&kvm
->lock
));
237 kvm_for_each_vcpu(i
, vcpu
, kvm
)
238 kvm_s390_vcpu_block(vcpu
);
241 static inline void kvm_s390_vcpu_unblock_all(struct kvm
*kvm
)
244 struct kvm_vcpu
*vcpu
;
246 kvm_for_each_vcpu(i
, vcpu
, kvm
)
247 kvm_s390_vcpu_unblock(vcpu
);
251 * kvm_s390_inject_prog_cond - conditionally inject a program check
253 * @rc: original return/error code
255 * This function is supposed to be used after regular guest access functions
256 * failed, to conditionally inject a program check to a vcpu. The typical
257 * pattern would look like
259 * rc = write_guest(vcpu, addr, data, len);
261 * return kvm_s390_inject_prog_cond(vcpu, rc);
263 * A negative return code from guest access functions implies an internal error
264 * like e.g. out of memory. In these cases no program check should be injected
266 * A positive value implies that an exception happened while accessing a guest's
267 * memory. In this case all data belonging to the corresponding program check
268 * has been stored in vcpu->arch.pgm and can be injected with
269 * kvm_s390_inject_prog_irq().
271 * Returns: - the original @rc value if @rc was negative (internal error)
272 * - zero if @rc was already zero
273 * - zero or error code from injecting if @rc was positive
274 * (program check injected to @vcpu)
276 static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu
*vcpu
, int rc
)
280 return kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
283 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
284 struct kvm_s390_irq
*s390irq
);
286 /* implemented in interrupt.c */
287 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
);
288 int psw_extint_disabled(struct kvm_vcpu
*vcpu
);
289 void kvm_s390_destroy_adapters(struct kvm
*kvm
);
290 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
);
291 extern struct kvm_device_ops kvm_flic_ops
;
292 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
);
293 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
);
294 int kvm_s390_set_irq_state(struct kvm_vcpu
*vcpu
,
295 void __user
*buf
, int len
);
296 int kvm_s390_get_irq_state(struct kvm_vcpu
*vcpu
,
297 __u8 __user
*buf
, int len
);
299 /* implemented in guestdbg.c */
300 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu
*vcpu
);
301 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu
*vcpu
);
302 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu
*vcpu
);
303 int kvm_s390_import_bp_data(struct kvm_vcpu
*vcpu
,
304 struct kvm_guest_debug
*dbg
);
305 void kvm_s390_clear_bp_data(struct kvm_vcpu
*vcpu
);
306 void kvm_s390_prepare_debug_exit(struct kvm_vcpu
*vcpu
);
307 void kvm_s390_handle_per_event(struct kvm_vcpu
*vcpu
);