2 * kvm guest debug support
4 * Copyright IBM Corp. 2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
12 #include <linux/kvm_host.h>
13 #include <linux/errno.h>
18 * Extends the address range given by *start and *stop to include the address
19 * range starting with estart and the length len. Takes care of overflowing
20 * intervals and tries to minimize the overall interval size.
22 static void extend_address_range(u64
*start
, u64
*stop
, u64 estart
, int len
)
33 /* 0-0 range represents "not set" */
34 if ((*start
== 0) && (*stop
== 0)) {
37 } else if (*start
<= *stop
) {
38 /* increase the existing range */
44 /* "overflowing" interval, whereby *stop > *start */
45 if (estart
<= *stop
) {
48 } else if (estop
> *start
) {
52 /* minimize the range */
53 else if ((estop
- *stop
) < (*start
- estart
))
60 #define MAX_INST_SIZE 6
62 static void enable_all_hw_bp(struct kvm_vcpu
*vcpu
)
64 unsigned long start
, len
;
65 u64
*cr9
= &vcpu
->arch
.sie_block
->gcr
[9];
66 u64
*cr10
= &vcpu
->arch
.sie_block
->gcr
[10];
67 u64
*cr11
= &vcpu
->arch
.sie_block
->gcr
[11];
70 if (vcpu
->arch
.guestdbg
.nr_hw_bp
<= 0 ||
71 vcpu
->arch
.guestdbg
.hw_bp_info
== NULL
)
75 * If the guest is not interested in branching events, we can safely
76 * limit them to the PER address range.
78 if (!(*cr9
& PER_EVENT_BRANCH
))
79 *cr9
|= PER_CONTROL_BRANCH_ADDRESS
;
80 *cr9
|= PER_EVENT_IFETCH
| PER_EVENT_BRANCH
;
82 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_bp
; i
++) {
83 start
= vcpu
->arch
.guestdbg
.hw_bp_info
[i
].addr
;
84 len
= vcpu
->arch
.guestdbg
.hw_bp_info
[i
].len
;
87 * The instruction in front of the desired bp has to
88 * report instruction-fetching events
90 if (start
< MAX_INST_SIZE
) {
94 start
-= MAX_INST_SIZE
;
98 extend_address_range(cr10
, cr11
, start
, len
);
102 static void enable_all_hw_wp(struct kvm_vcpu
*vcpu
)
104 unsigned long start
, len
;
105 u64
*cr9
= &vcpu
->arch
.sie_block
->gcr
[9];
106 u64
*cr10
= &vcpu
->arch
.sie_block
->gcr
[10];
107 u64
*cr11
= &vcpu
->arch
.sie_block
->gcr
[11];
110 if (vcpu
->arch
.guestdbg
.nr_hw_wp
<= 0 ||
111 vcpu
->arch
.guestdbg
.hw_wp_info
== NULL
)
114 /* if host uses storage alternation for special address
115 * spaces, enable all events and give all to the guest */
116 if (*cr9
& PER_EVENT_STORE
&& *cr9
& PER_CONTROL_ALTERATION
) {
117 *cr9
&= ~PER_CONTROL_ALTERATION
;
121 *cr9
&= ~PER_CONTROL_ALTERATION
;
122 *cr9
|= PER_EVENT_STORE
;
124 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_wp
; i
++) {
125 start
= vcpu
->arch
.guestdbg
.hw_wp_info
[i
].addr
;
126 len
= vcpu
->arch
.guestdbg
.hw_wp_info
[i
].len
;
128 extend_address_range(cr10
, cr11
, start
, len
);
133 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu
*vcpu
)
135 vcpu
->arch
.guestdbg
.cr0
= vcpu
->arch
.sie_block
->gcr
[0];
136 vcpu
->arch
.guestdbg
.cr9
= vcpu
->arch
.sie_block
->gcr
[9];
137 vcpu
->arch
.guestdbg
.cr10
= vcpu
->arch
.sie_block
->gcr
[10];
138 vcpu
->arch
.guestdbg
.cr11
= vcpu
->arch
.sie_block
->gcr
[11];
141 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu
*vcpu
)
143 vcpu
->arch
.sie_block
->gcr
[0] = vcpu
->arch
.guestdbg
.cr0
;
144 vcpu
->arch
.sie_block
->gcr
[9] = vcpu
->arch
.guestdbg
.cr9
;
145 vcpu
->arch
.sie_block
->gcr
[10] = vcpu
->arch
.guestdbg
.cr10
;
146 vcpu
->arch
.sie_block
->gcr
[11] = vcpu
->arch
.guestdbg
.cr11
;
149 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu
*vcpu
)
152 * TODO: if guest psw has per enabled, otherwise 0s!
153 * This reduces the amount of reported events.
154 * Need to intercept all psw changes!
157 if (guestdbg_sstep_enabled(vcpu
)) {
158 /* disable timer (clock-comparator) interrupts */
159 vcpu
->arch
.sie_block
->gcr
[0] &= ~0x800ul
;
160 vcpu
->arch
.sie_block
->gcr
[9] |= PER_EVENT_IFETCH
;
161 vcpu
->arch
.sie_block
->gcr
[10] = 0;
162 vcpu
->arch
.sie_block
->gcr
[11] = -1UL;
165 if (guestdbg_hw_bp_enabled(vcpu
)) {
166 enable_all_hw_bp(vcpu
);
167 enable_all_hw_wp(vcpu
);
170 /* TODO: Instruction-fetching-nullification not allowed for now */
171 if (vcpu
->arch
.sie_block
->gcr
[9] & PER_EVENT_NULLIFICATION
)
172 vcpu
->arch
.sie_block
->gcr
[9] &= ~PER_EVENT_NULLIFICATION
;
175 #define MAX_WP_SIZE 100
177 static int __import_wp_info(struct kvm_vcpu
*vcpu
,
178 struct kvm_hw_breakpoint
*bp_data
,
179 struct kvm_hw_wp_info_arch
*wp_info
)
182 wp_info
->len
= bp_data
->len
;
183 wp_info
->addr
= bp_data
->addr
;
184 wp_info
->phys_addr
= bp_data
->phys_addr
;
185 wp_info
->old_data
= NULL
;
187 if (wp_info
->len
< 0 || wp_info
->len
> MAX_WP_SIZE
)
190 wp_info
->old_data
= kmalloc(bp_data
->len
, GFP_KERNEL
);
191 if (!wp_info
->old_data
)
193 /* try to backup the original value */
194 ret
= read_guest_abs(vcpu
, wp_info
->phys_addr
, wp_info
->old_data
,
197 kfree(wp_info
->old_data
);
198 wp_info
->old_data
= NULL
;
204 #define MAX_BP_COUNT 50
206 int kvm_s390_import_bp_data(struct kvm_vcpu
*vcpu
,
207 struct kvm_guest_debug
*dbg
)
209 int ret
= 0, nr_wp
= 0, nr_bp
= 0, i
;
210 struct kvm_hw_breakpoint
*bp_data
= NULL
;
211 struct kvm_hw_wp_info_arch
*wp_info
= NULL
;
212 struct kvm_hw_bp_info_arch
*bp_info
= NULL
;
214 if (dbg
->arch
.nr_hw_bp
<= 0 || !dbg
->arch
.hw_bp
)
216 else if (dbg
->arch
.nr_hw_bp
> MAX_BP_COUNT
)
219 bp_data
= memdup_user(dbg
->arch
.hw_bp
,
220 sizeof(*bp_data
) * dbg
->arch
.nr_hw_bp
);
222 return PTR_ERR(bp_data
);
224 for (i
= 0; i
< dbg
->arch
.nr_hw_bp
; i
++) {
225 switch (bp_data
[i
].type
) {
226 case KVM_HW_WP_WRITE
:
238 wp_info
= kmalloc_array(nr_wp
,
247 bp_info
= kmalloc_array(nr_bp
,
256 for (nr_wp
= 0, nr_bp
= 0, i
= 0; i
< dbg
->arch
.nr_hw_bp
; i
++) {
257 switch (bp_data
[i
].type
) {
258 case KVM_HW_WP_WRITE
:
259 ret
= __import_wp_info(vcpu
, &bp_data
[i
],
266 bp_info
[nr_bp
].len
= bp_data
[i
].len
;
267 bp_info
[nr_bp
].addr
= bp_data
[i
].addr
;
273 vcpu
->arch
.guestdbg
.nr_hw_bp
= nr_bp
;
274 vcpu
->arch
.guestdbg
.hw_bp_info
= bp_info
;
275 vcpu
->arch
.guestdbg
.nr_hw_wp
= nr_wp
;
276 vcpu
->arch
.guestdbg
.hw_wp_info
= wp_info
;
285 void kvm_s390_clear_bp_data(struct kvm_vcpu
*vcpu
)
288 struct kvm_hw_wp_info_arch
*hw_wp_info
= NULL
;
290 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_wp
; i
++) {
291 hw_wp_info
= &vcpu
->arch
.guestdbg
.hw_wp_info
[i
];
292 kfree(hw_wp_info
->old_data
);
293 hw_wp_info
->old_data
= NULL
;
295 kfree(vcpu
->arch
.guestdbg
.hw_wp_info
);
296 vcpu
->arch
.guestdbg
.hw_wp_info
= NULL
;
298 kfree(vcpu
->arch
.guestdbg
.hw_bp_info
);
299 vcpu
->arch
.guestdbg
.hw_bp_info
= NULL
;
301 vcpu
->arch
.guestdbg
.nr_hw_wp
= 0;
302 vcpu
->arch
.guestdbg
.nr_hw_bp
= 0;
305 static inline int in_addr_range(u64 addr
, u64 a
, u64 b
)
308 return (addr
>= a
) && (addr
<= b
);
310 /* "overflowing" interval */
311 return (addr
<= a
) && (addr
>= b
);
314 #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
316 static struct kvm_hw_bp_info_arch
*find_hw_bp(struct kvm_vcpu
*vcpu
,
319 struct kvm_hw_bp_info_arch
*bp_info
= vcpu
->arch
.guestdbg
.hw_bp_info
;
322 if (vcpu
->arch
.guestdbg
.nr_hw_bp
== 0)
325 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_bp
; i
++) {
326 /* addr is directly the start or in the range of a bp */
327 if (addr
== bp_info
->addr
)
329 if (bp_info
->len
> 0 &&
330 in_addr_range(addr
, bp_info
->addr
, end_of_range(bp_info
)))
341 static struct kvm_hw_wp_info_arch
*any_wp_changed(struct kvm_vcpu
*vcpu
)
344 struct kvm_hw_wp_info_arch
*wp_info
= NULL
;
347 if (vcpu
->arch
.guestdbg
.nr_hw_wp
== 0)
350 for (i
= 0; i
< vcpu
->arch
.guestdbg
.nr_hw_wp
; i
++) {
351 wp_info
= &vcpu
->arch
.guestdbg
.hw_wp_info
[i
];
352 if (!wp_info
|| !wp_info
->old_data
|| wp_info
->len
<= 0)
355 temp
= kmalloc(wp_info
->len
, GFP_KERNEL
);
359 /* refetch the wp data and compare it to the old value */
360 if (!read_guest_abs(vcpu
, wp_info
->phys_addr
, temp
,
362 if (memcmp(temp
, wp_info
->old_data
, wp_info
->len
)) {
374 void kvm_s390_prepare_debug_exit(struct kvm_vcpu
*vcpu
)
376 vcpu
->run
->exit_reason
= KVM_EXIT_DEBUG
;
377 vcpu
->guest_debug
&= ~KVM_GUESTDBG_EXIT_PENDING
;
380 #define PER_CODE_MASK (PER_EVENT_MASK >> 24)
381 #define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
382 #define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
383 #define PER_CODE_STORE (PER_EVENT_STORE >> 24)
384 #define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
386 #define per_bp_event(code) \
387 (code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
388 #define per_write_wp_event(code) \
389 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
391 static int debug_exit_required(struct kvm_vcpu
*vcpu
)
393 u8 perc
= vcpu
->arch
.sie_block
->perc
;
394 struct kvm_debug_exit_arch
*debug_exit
= &vcpu
->run
->debug
.arch
;
395 struct kvm_hw_wp_info_arch
*wp_info
= NULL
;
396 struct kvm_hw_bp_info_arch
*bp_info
= NULL
;
397 unsigned long addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
398 unsigned long peraddr
= vcpu
->arch
.sie_block
->peraddr
;
400 if (guestdbg_hw_bp_enabled(vcpu
)) {
401 if (per_write_wp_event(perc
) &&
402 vcpu
->arch
.guestdbg
.nr_hw_wp
> 0) {
403 wp_info
= any_wp_changed(vcpu
);
405 debug_exit
->addr
= wp_info
->addr
;
406 debug_exit
->type
= KVM_HW_WP_WRITE
;
410 if (per_bp_event(perc
) &&
411 vcpu
->arch
.guestdbg
.nr_hw_bp
> 0) {
412 bp_info
= find_hw_bp(vcpu
, addr
);
413 /* remove duplicate events if PC==PER address */
414 if (bp_info
&& (addr
!= peraddr
)) {
415 debug_exit
->addr
= addr
;
416 debug_exit
->type
= KVM_HW_BP
;
417 vcpu
->arch
.guestdbg
.last_bp
= addr
;
420 /* breakpoint missed */
421 bp_info
= find_hw_bp(vcpu
, peraddr
);
422 if (bp_info
&& vcpu
->arch
.guestdbg
.last_bp
!= peraddr
) {
423 debug_exit
->addr
= peraddr
;
424 debug_exit
->type
= KVM_HW_BP
;
429 if (guestdbg_sstep_enabled(vcpu
) && per_bp_event(perc
)) {
430 debug_exit
->addr
= addr
;
431 debug_exit
->type
= KVM_SINGLESTEP
;
440 #define guest_per_enabled(vcpu) \
441 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
443 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu
*vcpu
)
445 const u8 ilen
= kvm_s390_get_ilen(vcpu
);
446 struct kvm_s390_pgm_info pgm_info
= {
448 .per_code
= PER_CODE_IFETCH
,
449 .per_address
= __rewind_psw(vcpu
->arch
.sie_block
->gpsw
, ilen
),
453 * The PSW points to the next instruction, therefore the intercepted
454 * instruction generated a PER i-fetch event. PER address therefore
455 * points at the previous PSW address (could be an EXECUTE function).
457 return kvm_s390_inject_prog_irq(vcpu
, &pgm_info
);
460 static void filter_guest_per_event(struct kvm_vcpu
*vcpu
)
462 const u8 perc
= vcpu
->arch
.sie_block
->perc
;
463 u64 peraddr
= vcpu
->arch
.sie_block
->peraddr
;
464 u64 addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
465 u64 cr9
= vcpu
->arch
.sie_block
->gcr
[9];
466 u64 cr10
= vcpu
->arch
.sie_block
->gcr
[10];
467 u64 cr11
= vcpu
->arch
.sie_block
->gcr
[11];
468 /* filter all events, demanded by the guest */
469 u8 guest_perc
= perc
& (cr9
>> 24) & PER_CODE_MASK
;
471 if (!guest_per_enabled(vcpu
))
474 /* filter "successful-branching" events */
475 if (guest_perc
& PER_CODE_BRANCH
&&
476 cr9
& PER_CONTROL_BRANCH_ADDRESS
&&
477 !in_addr_range(addr
, cr10
, cr11
))
478 guest_perc
&= ~PER_CODE_BRANCH
;
480 /* filter "instruction-fetching" events */
481 if (guest_perc
& PER_CODE_IFETCH
&&
482 !in_addr_range(peraddr
, cr10
, cr11
))
483 guest_perc
&= ~PER_CODE_IFETCH
;
485 /* All other PER events will be given to the guest */
486 /* TODO: Check altered address/address space */
488 vcpu
->arch
.sie_block
->perc
= guest_perc
;
491 vcpu
->arch
.sie_block
->iprcc
&= ~PGM_PER
;
494 #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
495 #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
496 #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
497 #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
499 void kvm_s390_handle_per_event(struct kvm_vcpu
*vcpu
)
503 if (debug_exit_required(vcpu
))
504 vcpu
->guest_debug
|= KVM_GUESTDBG_EXIT_PENDING
;
506 filter_guest_per_event(vcpu
);
509 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
510 * a space-switch event. PER events enforce space-switch events
511 * for these instructions. So if no PER event for the guest is left,
512 * we might have to filter the space-switch element out, too.
514 if (vcpu
->arch
.sie_block
->iprcc
== PGM_SPACE_SWITCH
) {
515 vcpu
->arch
.sie_block
->iprcc
= 0;
516 new_as
= psw_bits(vcpu
->arch
.sie_block
->gpsw
).as
;
519 * If the AS changed from / to home, we had RP, SAC or SACF
520 * instruction. Check primary and home space-switch-event
521 * controls. (theoretically home -> home produced no event)
523 if (((new_as
== PSW_AS_HOME
) ^ old_as_is_home(vcpu
)) &&
524 (pssec(vcpu
) || hssec(vcpu
)))
525 vcpu
->arch
.sie_block
->iprcc
= PGM_SPACE_SWITCH
;
528 * PT, PTI, PR, PC instruction operate on primary AS only. Check
529 * if the primary-space-switch-event control was or got set.
531 if (new_as
== PSW_AS_PRIMARY
&& !old_as_is_home(vcpu
) &&
532 (pssec(vcpu
) || old_ssec(vcpu
)))
533 vcpu
->arch
.sie_block
->iprcc
= PGM_SPACE_SWITCH
;