2 * in-kernel handling for sie intercepts
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
18 #include <asm/kvm_host.h>
23 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
25 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
26 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
27 int base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
28 int disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16) +
29 ((vcpu
->arch
.sie_block
->ipb
& 0xff00) << 4);
33 vcpu
->stat
.instruction_lctlg
++;
34 if ((vcpu
->arch
.sie_block
->ipb
& 0xff) != 0x2f)
39 useraddr
+= vcpu
->run
->s
.regs
.gprs
[base2
];
42 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
46 VCPU_EVENT(vcpu
, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1
, reg3
, base2
,
50 rc
= get_guest_u64(vcpu
, useraddr
,
51 &vcpu
->arch
.sie_block
->gcr
[reg
]);
53 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
64 static int handle_lctl(struct kvm_vcpu
*vcpu
)
66 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
67 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
68 int base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
69 int disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16);
74 vcpu
->stat
.instruction_lctl
++;
78 useraddr
+= vcpu
->run
->s
.regs
.gprs
[base2
];
81 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
83 VCPU_EVENT(vcpu
, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1
, reg3
, base2
,
88 rc
= get_guest_u32(vcpu
, useraddr
, &val
);
90 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
93 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
94 vcpu
->arch
.sie_block
->gcr
[reg
] |= val
;
103 static intercept_handler_t instruction_handlers
[256] = {
104 [0x01] = kvm_s390_handle_01
,
105 [0x83] = kvm_s390_handle_diag
,
106 [0xae] = kvm_s390_handle_sigp
,
107 [0xb2] = kvm_s390_handle_b2
,
108 [0xb7] = handle_lctl
,
109 [0xe5] = kvm_s390_handle_e5
,
110 [0xeb] = handle_lctlg
,
113 static int handle_noop(struct kvm_vcpu
*vcpu
)
115 switch (vcpu
->arch
.sie_block
->icptcode
) {
117 vcpu
->stat
.exit_null
++;
120 vcpu
->stat
.exit_external_request
++;
123 vcpu
->stat
.exit_external_interrupt
++;
131 static int handle_stop(struct kvm_vcpu
*vcpu
)
135 vcpu
->stat
.exit_stop_request
++;
136 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
138 if (vcpu
->arch
.local_int
.action_bits
& ACTION_RELOADVCPU_ON_STOP
) {
139 vcpu
->arch
.local_int
.action_bits
&= ~ACTION_RELOADVCPU_ON_STOP
;
140 rc
= SIE_INTERCEPT_RERUNVCPU
;
141 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
144 if (vcpu
->arch
.local_int
.action_bits
& ACTION_STOP_ON_STOP
) {
145 atomic_set_mask(CPUSTAT_STOPPED
,
146 &vcpu
->arch
.sie_block
->cpuflags
);
147 vcpu
->arch
.local_int
.action_bits
&= ~ACTION_STOP_ON_STOP
;
148 VCPU_EVENT(vcpu
, 3, "%s", "cpu stopped");
152 if (vcpu
->arch
.local_int
.action_bits
& ACTION_STORE_ON_STOP
) {
153 vcpu
->arch
.local_int
.action_bits
&= ~ACTION_STORE_ON_STOP
;
154 /* store status must be called unlocked. Since local_int.lock
155 * only protects local_int.* and not guest memory we can give
156 * up the lock here */
157 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
158 rc
= kvm_s390_vcpu_store_status(vcpu
,
159 KVM_S390_STORE_STATUS_NOADDR
);
163 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
167 static int handle_validity(struct kvm_vcpu
*vcpu
)
169 unsigned long vmaddr
;
170 int viwhy
= vcpu
->arch
.sie_block
->ipb
>> 16;
173 vcpu
->stat
.exit_validity
++;
175 vmaddr
= gmap_fault(vcpu
->arch
.sie_block
->prefix
,
177 if (IS_ERR_VALUE(vmaddr
)) {
181 rc
= fault_in_pages_writeable((char __user
*) vmaddr
,
184 /* user will receive sigsegv, exit to user */
188 vmaddr
= gmap_fault(vcpu
->arch
.sie_block
->prefix
+ PAGE_SIZE
,
190 if (IS_ERR_VALUE(vmaddr
)) {
194 rc
= fault_in_pages_writeable((char __user
*) vmaddr
,
197 /* user will receive sigsegv, exit to user */
206 VCPU_EVENT(vcpu
, 2, "unhandled validity intercept code %d",
211 static int handle_instruction(struct kvm_vcpu
*vcpu
)
213 intercept_handler_t handler
;
215 vcpu
->stat
.exit_instruction
++;
216 handler
= instruction_handlers
[vcpu
->arch
.sie_block
->ipa
>> 8];
218 return handler(vcpu
);
222 static int handle_prog(struct kvm_vcpu
*vcpu
)
224 vcpu
->stat
.exit_program_interruption
++;
225 return kvm_s390_inject_program_int(vcpu
, vcpu
->arch
.sie_block
->iprcc
);
228 static int handle_instruction_and_prog(struct kvm_vcpu
*vcpu
)
232 vcpu
->stat
.exit_instr_and_program
++;
233 rc
= handle_instruction(vcpu
);
234 rc2
= handle_prog(vcpu
);
236 if (rc
== -EOPNOTSUPP
)
237 vcpu
->arch
.sie_block
->icptcode
= 0x04;
243 static const intercept_handler_t intercept_funcs
[] = {
244 [0x00 >> 2] = handle_noop
,
245 [0x04 >> 2] = handle_instruction
,
246 [0x08 >> 2] = handle_prog
,
247 [0x0C >> 2] = handle_instruction_and_prog
,
248 [0x10 >> 2] = handle_noop
,
249 [0x14 >> 2] = handle_noop
,
250 [0x1C >> 2] = kvm_s390_handle_wait
,
251 [0x20 >> 2] = handle_validity
,
252 [0x28 >> 2] = handle_stop
,
255 int kvm_handle_sie_intercept(struct kvm_vcpu
*vcpu
)
257 intercept_handler_t func
;
258 u8 code
= vcpu
->arch
.sie_block
->icptcode
;
260 if (code
& 3 || (code
>> 2) >= ARRAY_SIZE(intercept_funcs
))
262 func
= intercept_funcs
[code
>> 2];