2 * intercept.c - in-kernel handling for sie intercepts
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
18 #include <asm/kvm_host.h>
23 static int handle_lctlg(struct kvm_vcpu
*vcpu
)
25 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
26 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
27 int base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
28 int disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16) +
29 ((vcpu
->arch
.sie_block
->ipb
& 0xff00) << 4);
33 vcpu
->stat
.instruction_lctlg
++;
34 if ((vcpu
->arch
.sie_block
->ipb
& 0xff) != 0x2f)
39 useraddr
+= vcpu
->arch
.guest_gprs
[base2
];
42 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
46 VCPU_EVENT(vcpu
, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1
, reg3
, base2
,
50 rc
= get_guest_u64(vcpu
, useraddr
,
51 &vcpu
->arch
.sie_block
->gcr
[reg
]);
53 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
64 static int handle_lctl(struct kvm_vcpu
*vcpu
)
66 int reg1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
67 int reg3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
68 int base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
69 int disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16);
74 vcpu
->stat
.instruction_lctl
++;
78 useraddr
+= vcpu
->arch
.guest_gprs
[base2
];
81 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
83 VCPU_EVENT(vcpu
, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1
, reg3
, base2
,
88 rc
= get_guest_u32(vcpu
, useraddr
, &val
);
90 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
93 vcpu
->arch
.sie_block
->gcr
[reg
] &= 0xffffffff00000000ul
;
94 vcpu
->arch
.sie_block
->gcr
[reg
] |= val
;
103 static intercept_handler_t instruction_handlers
[256] = {
104 [0x83] = kvm_s390_handle_diag
,
105 [0xae] = kvm_s390_handle_sigp
,
106 [0xb2] = kvm_s390_handle_b2
,
107 [0xb7] = handle_lctl
,
108 [0xe5] = kvm_s390_handle_e5
,
109 [0xeb] = handle_lctlg
,
112 static int handle_noop(struct kvm_vcpu
*vcpu
)
114 switch (vcpu
->arch
.sie_block
->icptcode
) {
116 vcpu
->stat
.exit_null
++;
119 vcpu
->stat
.exit_external_request
++;
122 vcpu
->stat
.exit_external_interrupt
++;
130 static int handle_stop(struct kvm_vcpu
*vcpu
)
134 vcpu
->stat
.exit_stop_request
++;
135 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
137 if (vcpu
->arch
.local_int
.action_bits
& ACTION_RELOADVCPU_ON_STOP
) {
138 vcpu
->arch
.local_int
.action_bits
&= ~ACTION_RELOADVCPU_ON_STOP
;
139 rc
= SIE_INTERCEPT_RERUNVCPU
;
140 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
143 if (vcpu
->arch
.local_int
.action_bits
& ACTION_STOP_ON_STOP
) {
144 atomic_set_mask(CPUSTAT_STOPPED
,
145 &vcpu
->arch
.sie_block
->cpuflags
);
146 vcpu
->arch
.local_int
.action_bits
&= ~ACTION_STOP_ON_STOP
;
147 VCPU_EVENT(vcpu
, 3, "%s", "cpu stopped");
151 if (vcpu
->arch
.local_int
.action_bits
& ACTION_STORE_ON_STOP
) {
152 vcpu
->arch
.local_int
.action_bits
&= ~ACTION_STORE_ON_STOP
;
153 /* store status must be called unlocked. Since local_int.lock
154 * only protects local_int.* and not guest memory we can give
155 * up the lock here */
156 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
157 rc
= kvm_s390_vcpu_store_status(vcpu
,
158 KVM_S390_STORE_STATUS_NOADDR
);
162 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
166 static int handle_validity(struct kvm_vcpu
*vcpu
)
168 unsigned long vmaddr
;
169 int viwhy
= vcpu
->arch
.sie_block
->ipb
>> 16;
172 vcpu
->stat
.exit_validity
++;
174 vmaddr
= gmap_fault(vcpu
->arch
.sie_block
->prefix
,
176 if (IS_ERR_VALUE(vmaddr
)) {
180 rc
= fault_in_pages_writeable((char __user
*) vmaddr
,
183 /* user will receive sigsegv, exit to user */
187 vmaddr
= gmap_fault(vcpu
->arch
.sie_block
->prefix
+ PAGE_SIZE
,
189 if (IS_ERR_VALUE(vmaddr
)) {
193 rc
= fault_in_pages_writeable((char __user
*) vmaddr
,
196 /* user will receive sigsegv, exit to user */
205 VCPU_EVENT(vcpu
, 2, "unhandled validity intercept code %d",
210 static int handle_instruction(struct kvm_vcpu
*vcpu
)
212 intercept_handler_t handler
;
214 vcpu
->stat
.exit_instruction
++;
215 handler
= instruction_handlers
[vcpu
->arch
.sie_block
->ipa
>> 8];
217 return handler(vcpu
);
221 static int handle_prog(struct kvm_vcpu
*vcpu
)
223 vcpu
->stat
.exit_program_interruption
++;
224 return kvm_s390_inject_program_int(vcpu
, vcpu
->arch
.sie_block
->iprcc
);
227 static int handle_instruction_and_prog(struct kvm_vcpu
*vcpu
)
231 vcpu
->stat
.exit_instr_and_program
++;
232 rc
= handle_instruction(vcpu
);
233 rc2
= handle_prog(vcpu
);
235 if (rc
== -EOPNOTSUPP
)
236 vcpu
->arch
.sie_block
->icptcode
= 0x04;
242 static const intercept_handler_t intercept_funcs
[] = {
243 [0x00 >> 2] = handle_noop
,
244 [0x04 >> 2] = handle_instruction
,
245 [0x08 >> 2] = handle_prog
,
246 [0x0C >> 2] = handle_instruction_and_prog
,
247 [0x10 >> 2] = handle_noop
,
248 [0x14 >> 2] = handle_noop
,
249 [0x1C >> 2] = kvm_s390_handle_wait
,
250 [0x20 >> 2] = handle_validity
,
251 [0x28 >> 2] = handle_stop
,
254 int kvm_handle_sie_intercept(struct kvm_vcpu
*vcpu
)
256 intercept_handler_t func
;
257 u8 code
= vcpu
->arch
.sie_block
->icptcode
;
259 if (code
& 3 || (code
>> 2) >= ARRAY_SIZE(intercept_funcs
))
261 func
= intercept_funcs
[code
>> 2];