2 * kvm guest debug support
4 * Copyright IBM Corp. 2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
12 #include <linux/kvm_host.h>
13 #include <linux/errno.h>
18 * Extends the address range given by *start and *stop to include the address
19 * range starting with estart and the length len. Takes care of overflowing
20 * intervals and tries to minimize the overall intervall size.
22 static void extend_address_range(u64
*start
, u64
*stop
, u64 estart
, int len
)
33 /* 0-0 range represents "not set" */
34 if ((*start
== 0) && (*stop
== 0)) {
37 } else if (*start
<= *stop
) {
38 /* increase the existing range */
44 /* "overflowing" interval, whereby *stop > *start */
45 if (estart
<= *stop
) {
48 } else if (estop
> *start
) {
52 /* minimize the range */
53 else if ((estop
- *stop
) < (*start
- estart
))
60 #define MAX_INST_SIZE 6
62 static void enable_all_hw_bp(struct kvm_vcpu
*vcpu
)
64 unsigned long start
, len
;
65 u64
*cr9
= &vcpu
->arch
.sie_block
->gcr
[9];
66 u64
*cr10
= &vcpu
->arch
.sie_block
->gcr
[10];
67 u64
*cr11
= &vcpu
->arch
.sie_block
->gcr
[11];
70 if (vcpu
->arch
.guestdbg
.nr_hw_bp
<= 0 ||
71 vcpu
->arch
.guestdbg
.hw_bp_info
== NULL
)
75 * If the guest is not interrested in branching events, we can savely
76 * limit them to the PER address range.
78 if (!(*cr9
& PER_EVENT_BRANCH
))
79 *cr9
|= PER_CONTROL_BRANCH_ADDRESS
;
80 *cr9
|= PER_EVENT_IFETCH
| PER_EVENT_BRANCH
;
82 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_bp
; i
++) {
83 start
= vcpu
->arch
.guestdbg
.hw_bp_info
[i
].addr
;
84 len
= vcpu
->arch
.guestdbg
.hw_bp_info
[i
].len
;
87 * The instruction in front of the desired bp has to
88 * report instruction-fetching events
90 if (start
< MAX_INST_SIZE
) {
94 start
-= MAX_INST_SIZE
;
98 extend_address_range(cr10
, cr11
, start
, len
);
102 static void enable_all_hw_wp(struct kvm_vcpu
*vcpu
)
104 unsigned long start
, len
;
105 u64
*cr9
= &vcpu
->arch
.sie_block
->gcr
[9];
106 u64
*cr10
= &vcpu
->arch
.sie_block
->gcr
[10];
107 u64
*cr11
= &vcpu
->arch
.sie_block
->gcr
[11];
110 if (vcpu
->arch
.guestdbg
.nr_hw_wp
<= 0 ||
111 vcpu
->arch
.guestdbg
.hw_wp_info
== NULL
)
114 /* if host uses storage alternation for special address
115 * spaces, enable all events and give all to the guest */
116 if (*cr9
& PER_EVENT_STORE
&& *cr9
& PER_CONTROL_ALTERATION
) {
117 *cr9
&= ~PER_CONTROL_ALTERATION
;
119 *cr11
= PSW_ADDR_INSN
;
121 *cr9
&= ~PER_CONTROL_ALTERATION
;
122 *cr9
|= PER_EVENT_STORE
;
124 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_wp
; i
++) {
125 start
= vcpu
->arch
.guestdbg
.hw_wp_info
[i
].addr
;
126 len
= vcpu
->arch
.guestdbg
.hw_wp_info
[i
].len
;
128 extend_address_range(cr10
, cr11
, start
, len
);
133 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu
*vcpu
)
135 vcpu
->arch
.guestdbg
.cr0
= vcpu
->arch
.sie_block
->gcr
[0];
136 vcpu
->arch
.guestdbg
.cr9
= vcpu
->arch
.sie_block
->gcr
[9];
137 vcpu
->arch
.guestdbg
.cr10
= vcpu
->arch
.sie_block
->gcr
[10];
138 vcpu
->arch
.guestdbg
.cr11
= vcpu
->arch
.sie_block
->gcr
[11];
141 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu
*vcpu
)
143 vcpu
->arch
.sie_block
->gcr
[0] = vcpu
->arch
.guestdbg
.cr0
;
144 vcpu
->arch
.sie_block
->gcr
[9] = vcpu
->arch
.guestdbg
.cr9
;
145 vcpu
->arch
.sie_block
->gcr
[10] = vcpu
->arch
.guestdbg
.cr10
;
146 vcpu
->arch
.sie_block
->gcr
[11] = vcpu
->arch
.guestdbg
.cr11
;
149 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu
*vcpu
)
152 * TODO: if guest psw has per enabled, otherwise 0s!
153 * This reduces the amount of reported events.
154 * Need to intercept all psw changes!
157 if (guestdbg_sstep_enabled(vcpu
)) {
158 /* disable timer (clock-comparator) interrupts */
159 vcpu
->arch
.sie_block
->gcr
[0] &= ~0x800ul
;
160 vcpu
->arch
.sie_block
->gcr
[9] |= PER_EVENT_IFETCH
;
161 vcpu
->arch
.sie_block
->gcr
[10] = 0;
162 vcpu
->arch
.sie_block
->gcr
[11] = PSW_ADDR_INSN
;
165 if (guestdbg_hw_bp_enabled(vcpu
)) {
166 enable_all_hw_bp(vcpu
);
167 enable_all_hw_wp(vcpu
);
170 /* TODO: Instruction-fetching-nullification not allowed for now */
171 if (vcpu
->arch
.sie_block
->gcr
[9] & PER_EVENT_NULLIFICATION
)
172 vcpu
->arch
.sie_block
->gcr
[9] &= ~PER_EVENT_NULLIFICATION
;
175 #define MAX_WP_SIZE 100
177 static int __import_wp_info(struct kvm_vcpu
*vcpu
,
178 struct kvm_hw_breakpoint
*bp_data
,
179 struct kvm_hw_wp_info_arch
*wp_info
)
182 wp_info
->len
= bp_data
->len
;
183 wp_info
->addr
= bp_data
->addr
;
184 wp_info
->phys_addr
= bp_data
->phys_addr
;
185 wp_info
->old_data
= NULL
;
187 if (wp_info
->len
< 0 || wp_info
->len
> MAX_WP_SIZE
)
190 wp_info
->old_data
= kmalloc(bp_data
->len
, GFP_KERNEL
);
191 if (!wp_info
->old_data
)
193 /* try to backup the original value */
194 ret
= read_guest(vcpu
, wp_info
->phys_addr
, wp_info
->old_data
,
197 kfree(wp_info
->old_data
);
198 wp_info
->old_data
= NULL
;
204 #define MAX_BP_COUNT 50
206 int kvm_s390_import_bp_data(struct kvm_vcpu
*vcpu
,
207 struct kvm_guest_debug
*dbg
)
209 int ret
= 0, nr_wp
= 0, nr_bp
= 0, i
, size
;
210 struct kvm_hw_breakpoint
*bp_data
= NULL
;
211 struct kvm_hw_wp_info_arch
*wp_info
= NULL
;
212 struct kvm_hw_bp_info_arch
*bp_info
= NULL
;
214 if (dbg
->arch
.nr_hw_bp
<= 0 || !dbg
->arch
.hw_bp
)
216 else if (dbg
->arch
.nr_hw_bp
> MAX_BP_COUNT
)
219 size
= dbg
->arch
.nr_hw_bp
* sizeof(struct kvm_hw_breakpoint
);
220 bp_data
= kmalloc(size
, GFP_KERNEL
);
226 if (copy_from_user(bp_data
, dbg
->arch
.hw_bp
, size
)) {
231 for (i
= 0; i
< dbg
->arch
.nr_hw_bp
; i
++) {
232 switch (bp_data
[i
].type
) {
233 case KVM_HW_WP_WRITE
:
244 size
= nr_wp
* sizeof(struct kvm_hw_wp_info_arch
);
246 wp_info
= kmalloc(size
, GFP_KERNEL
);
252 size
= nr_bp
* sizeof(struct kvm_hw_bp_info_arch
);
254 bp_info
= kmalloc(size
, GFP_KERNEL
);
261 for (nr_wp
= 0, nr_bp
= 0, i
= 0; i
< dbg
->arch
.nr_hw_bp
; i
++) {
262 switch (bp_data
[i
].type
) {
263 case KVM_HW_WP_WRITE
:
264 ret
= __import_wp_info(vcpu
, &bp_data
[i
],
271 bp_info
[nr_bp
].len
= bp_data
[i
].len
;
272 bp_info
[nr_bp
].addr
= bp_data
[i
].addr
;
278 vcpu
->arch
.guestdbg
.nr_hw_bp
= nr_bp
;
279 vcpu
->arch
.guestdbg
.hw_bp_info
= bp_info
;
280 vcpu
->arch
.guestdbg
.nr_hw_wp
= nr_wp
;
281 vcpu
->arch
.guestdbg
.hw_wp_info
= wp_info
;
290 void kvm_s390_clear_bp_data(struct kvm_vcpu
*vcpu
)
293 struct kvm_hw_wp_info_arch
*hw_wp_info
= NULL
;
295 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_wp
; i
++) {
296 hw_wp_info
= &vcpu
->arch
.guestdbg
.hw_wp_info
[i
];
297 kfree(hw_wp_info
->old_data
);
298 hw_wp_info
->old_data
= NULL
;
300 kfree(vcpu
->arch
.guestdbg
.hw_wp_info
);
301 vcpu
->arch
.guestdbg
.hw_wp_info
= NULL
;
303 kfree(vcpu
->arch
.guestdbg
.hw_bp_info
);
304 vcpu
->arch
.guestdbg
.hw_bp_info
= NULL
;
306 vcpu
->arch
.guestdbg
.nr_hw_wp
= 0;
307 vcpu
->arch
.guestdbg
.nr_hw_bp
= 0;
310 static inline int in_addr_range(u64 addr
, u64 a
, u64 b
)
313 return (addr
>= a
) && (addr
<= b
);
315 /* "overflowing" interval */
316 return (addr
<= a
) && (addr
>= b
);
319 #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
321 static struct kvm_hw_bp_info_arch
*find_hw_bp(struct kvm_vcpu
*vcpu
,
324 struct kvm_hw_bp_info_arch
*bp_info
= vcpu
->arch
.guestdbg
.hw_bp_info
;
327 if (vcpu
->arch
.guestdbg
.nr_hw_bp
== 0)
330 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_bp
; i
++) {
331 /* addr is directly the start or in the range of a bp */
332 if (addr
== bp_info
->addr
)
334 if (bp_info
->len
> 0 &&
335 in_addr_range(addr
, bp_info
->addr
, end_of_range(bp_info
)))
346 static struct kvm_hw_wp_info_arch
*any_wp_changed(struct kvm_vcpu
*vcpu
)
349 struct kvm_hw_wp_info_arch
*wp_info
= NULL
;
352 if (vcpu
->arch
.guestdbg
.nr_hw_wp
== 0)
355 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_wp
; i
++) {
356 wp_info
= &vcpu
->arch
.guestdbg
.hw_wp_info
[i
];
357 if (!wp_info
|| !wp_info
->old_data
|| wp_info
->len
<= 0)
360 temp
= kmalloc(wp_info
->len
, GFP_KERNEL
);
364 /* refetch the wp data and compare it to the old value */
365 if (!read_guest(vcpu
, wp_info
->phys_addr
, temp
,
367 if (memcmp(temp
, wp_info
->old_data
, wp_info
->len
)) {
379 void kvm_s390_prepare_debug_exit(struct kvm_vcpu
*vcpu
)
381 vcpu
->run
->exit_reason
= KVM_EXIT_DEBUG
;
382 vcpu
->guest_debug
&= ~KVM_GUESTDBG_EXIT_PENDING
;
385 #define per_bp_event(code) \
386 (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
387 #define per_write_wp_event(code) \
388 (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
390 static int debug_exit_required(struct kvm_vcpu
*vcpu
)
392 u32 perc
= (vcpu
->arch
.sie_block
->perc
<< 24);
393 struct kvm_debug_exit_arch
*debug_exit
= &vcpu
->run
->debug
.arch
;
394 struct kvm_hw_wp_info_arch
*wp_info
= NULL
;
395 struct kvm_hw_bp_info_arch
*bp_info
= NULL
;
396 unsigned long addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
397 unsigned long peraddr
= vcpu
->arch
.sie_block
->peraddr
;
399 if (guestdbg_hw_bp_enabled(vcpu
)) {
400 if (per_write_wp_event(perc
) &&
401 vcpu
->arch
.guestdbg
.nr_hw_wp
> 0) {
402 wp_info
= any_wp_changed(vcpu
);
404 debug_exit
->addr
= wp_info
->addr
;
405 debug_exit
->type
= KVM_HW_WP_WRITE
;
409 if (per_bp_event(perc
) &&
410 vcpu
->arch
.guestdbg
.nr_hw_bp
> 0) {
411 bp_info
= find_hw_bp(vcpu
, addr
);
412 /* remove duplicate events if PC==PER address */
413 if (bp_info
&& (addr
!= peraddr
)) {
414 debug_exit
->addr
= addr
;
415 debug_exit
->type
= KVM_HW_BP
;
416 vcpu
->arch
.guestdbg
.last_bp
= addr
;
419 /* breakpoint missed */
420 bp_info
= find_hw_bp(vcpu
, peraddr
);
421 if (bp_info
&& vcpu
->arch
.guestdbg
.last_bp
!= peraddr
) {
422 debug_exit
->addr
= peraddr
;
423 debug_exit
->type
= KVM_HW_BP
;
428 if (guestdbg_sstep_enabled(vcpu
) && per_bp_event(perc
)) {
429 debug_exit
->addr
= addr
;
430 debug_exit
->type
= KVM_SINGLESTEP
;
439 #define guest_per_enabled(vcpu) \
440 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
442 static void filter_guest_per_event(struct kvm_vcpu
*vcpu
)
444 u32 perc
= vcpu
->arch
.sie_block
->perc
<< 24;
445 u64 peraddr
= vcpu
->arch
.sie_block
->peraddr
;
446 u64 addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
447 u64 cr9
= vcpu
->arch
.sie_block
->gcr
[9];
448 u64 cr10
= vcpu
->arch
.sie_block
->gcr
[10];
449 u64 cr11
= vcpu
->arch
.sie_block
->gcr
[11];
450 /* filter all events, demanded by the guest */
451 u32 guest_perc
= perc
& cr9
& PER_EVENT_MASK
;
453 if (!guest_per_enabled(vcpu
))
456 /* filter "successful-branching" events */
457 if (guest_perc
& PER_EVENT_BRANCH
&&
458 cr9
& PER_CONTROL_BRANCH_ADDRESS
&&
459 !in_addr_range(addr
, cr10
, cr11
))
460 guest_perc
&= ~PER_EVENT_BRANCH
;
462 /* filter "instruction-fetching" events */
463 if (guest_perc
& PER_EVENT_IFETCH
&&
464 !in_addr_range(peraddr
, cr10
, cr11
))
465 guest_perc
&= ~PER_EVENT_IFETCH
;
467 /* All other PER events will be given to the guest */
468 /* TODO: Check alterated address/address space */
470 vcpu
->arch
.sie_block
->perc
= guest_perc
>> 24;
473 vcpu
->arch
.sie_block
->iprcc
&= ~PGM_PER
;
476 void kvm_s390_handle_per_event(struct kvm_vcpu
*vcpu
)
478 if (debug_exit_required(vcpu
))
479 vcpu
->guest_debug
|= KVM_GUESTDBG_EXIT_PENDING
;
481 filter_guest_per_event(vcpu
);