2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu-info.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
45 unsigned int dspcontrol
;
46 union mips_instruction insn
;
47 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
49 long nextpc
= KVM_INVALID_INST
;
54 /* Read the instruction */
55 insn
.word
= kvm_get_inst((u32
*) epc
, vcpu
);
57 if (insn
.word
== KVM_INVALID_INST
)
58 return KVM_INVALID_INST
;
60 switch (insn
.i_format
.opcode
) {
61 /* jr and jalr are in r_format format. */
63 switch (insn
.r_format
.func
) {
65 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
68 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn
.i_format
.rt
) {
82 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
83 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
91 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
92 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
100 arch
->gprs
[31] = epc
+ 8;
101 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
102 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
110 arch
->gprs
[31] = epc
+ 8;
111 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
112 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
121 dspcontrol
= rddsp(0x01);
123 if (dspcontrol
>= 32)
124 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
132 /* These are unconditional and in j_format. */
134 arch
->gprs
[31] = instpc
+ 8;
139 epc
|= (insn
.j_format
.target
<< 2);
143 /* These are conditional and in i_format. */
146 if (arch
->gprs
[insn
.i_format
.rs
] ==
147 arch
->gprs
[insn
.i_format
.rt
])
148 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
156 if (arch
->gprs
[insn
.i_format
.rs
] !=
157 arch
->gprs
[insn
.i_format
.rt
])
158 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
164 case blez_op
: /* POP06 */
165 #ifndef CONFIG_CPU_MIPSR6
166 case blezl_op
: /* removed in R6 */
168 if (insn
.i_format
.rt
!= 0)
170 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
171 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
177 case bgtz_op
: /* POP07 */
178 #ifndef CONFIG_CPU_MIPSR6
179 case bgtzl_op
: /* removed in R6 */
181 if (insn
.i_format
.rt
!= 0)
183 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
184 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
190 /* And now the FPA/cp1 branch instructions. */
192 kvm_err("%s: unsupported cop1_op\n", __func__
);
195 #ifdef CONFIG_CPU_MIPSR6
196 /* R6 added the following compact branches with forbidden slots */
197 case blezl_op
: /* POP26 */
198 case bgtzl_op
: /* POP27 */
199 /* only rt == 0 isn't compact branch */
200 if (insn
.i_format
.rt
!= 0)
205 /* only rs == rt == 0 is reserved, rest are compact branches */
206 if (insn
.i_format
.rs
!= 0 || insn
.i_format
.rt
!= 0)
211 /* only rs == 0 isn't compact branch */
212 if (insn
.i_format
.rs
!= 0)
217 * If we've hit an exception on the forbidden slot, then
218 * the branch must not have been taken.
225 /* Compact branches not supported before R6 */
233 kvm_err("%s: unaligned epc\n", __func__
);
237 kvm_err("%s: DSP branch but not DSP ASE\n", __func__
);
241 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, u32 cause
)
243 unsigned long branch_pc
;
244 enum emulation_result er
= EMULATE_DONE
;
246 if (cause
& CAUSEF_BD
) {
247 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
248 if (branch_pc
== KVM_INVALID_INST
) {
251 vcpu
->arch
.pc
= branch_pc
;
252 kvm_debug("BD update_pc(): New PC: %#lx\n",
258 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
264 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
265 * @vcpu: Virtual CPU.
267 * Returns: 1 if the CP0_Count timer is disabled by either the guest
268 * CP0_Cause.DC bit or the count_ctl.DC bit.
269 * 0 otherwise (in which case CP0_Count timer is running).
271 static inline int kvm_mips_count_disabled(struct kvm_vcpu
*vcpu
)
273 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
275 return (vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) ||
276 (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
);
280 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
282 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
284 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
286 static u32
kvm_mips_ktime_to_count(struct kvm_vcpu
*vcpu
, ktime_t now
)
291 now_ns
= ktime_to_ns(now
);
292 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
294 if (delta
>= vcpu
->arch
.count_period
) {
295 /* If delta is out of safe range the bias needs adjusting */
296 periods
= div64_s64(now_ns
, vcpu
->arch
.count_period
);
297 vcpu
->arch
.count_dyn_bias
= -periods
* vcpu
->arch
.count_period
;
298 /* Recalculate delta with new bias */
299 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
303 * We've ensured that:
304 * delta < count_period
306 * Therefore the intermediate delta*count_hz will never overflow since
307 * at the boundary condition:
308 * delta = count_period
309 * delta = NSEC_PER_SEC * 2^32 / count_hz
310 * delta * count_hz = NSEC_PER_SEC * 2^32
312 return div_u64(delta
* vcpu
->arch
.count_hz
, NSEC_PER_SEC
);
316 * kvm_mips_count_time() - Get effective current time.
317 * @vcpu: Virtual CPU.
319 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
320 * except when the master disable bit is set in count_ctl, in which case it is
321 * count_resume, i.e. the time that the count was disabled.
323 * Returns: Effective monotonic ktime for CP0_Count.
325 static inline ktime_t
kvm_mips_count_time(struct kvm_vcpu
*vcpu
)
327 if (unlikely(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
328 return vcpu
->arch
.count_resume
;
334 * kvm_mips_read_count_running() - Read the current count value as if running.
335 * @vcpu: Virtual CPU.
336 * @now: Kernel time to read CP0_Count at.
338 * Returns the current guest CP0_Count register at time @now and handles if the
339 * timer interrupt is pending and hasn't been handled yet.
341 * Returns: The current value of the guest CP0_Count register.
343 static u32
kvm_mips_read_count_running(struct kvm_vcpu
*vcpu
, ktime_t now
)
345 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
346 ktime_t expires
, threshold
;
350 /* Calculate the biased and scaled guest CP0_Count */
351 count
= vcpu
->arch
.count_bias
+ kvm_mips_ktime_to_count(vcpu
, now
);
352 compare
= kvm_read_c0_guest_compare(cop0
);
355 * Find whether CP0_Count has reached the closest timer interrupt. If
356 * not, we shouldn't inject it.
358 if ((s32
)(count
- compare
) < 0)
362 * The CP0_Count we're going to return has already reached the closest
363 * timer interrupt. Quickly check if it really is a new interrupt by
364 * looking at whether the interval until the hrtimer expiry time is
365 * less than 1/4 of the timer period.
367 expires
= hrtimer_get_expires(&vcpu
->arch
.comparecount_timer
);
368 threshold
= ktime_add_ns(now
, vcpu
->arch
.count_period
/ 4);
369 if (ktime_before(expires
, threshold
)) {
371 * Cancel it while we handle it so there's no chance of
372 * interference with the timeout handler.
374 running
= hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
376 /* Nothing should be waiting on the timeout */
377 kvm_mips_callbacks
->queue_timer_int(vcpu
);
380 * Restart the timer if it was running based on the expiry time
381 * we read, so that we don't push it back 2 periods.
384 expires
= ktime_add_ns(expires
,
385 vcpu
->arch
.count_period
);
386 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expires
,
395 * kvm_mips_read_count() - Read the current count value.
396 * @vcpu: Virtual CPU.
398 * Read the current guest CP0_Count value, taking into account whether the timer
401 * Returns: The current guest CP0_Count value.
403 u32
kvm_mips_read_count(struct kvm_vcpu
*vcpu
)
405 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
407 /* If count disabled just read static copy of count */
408 if (kvm_mips_count_disabled(vcpu
))
409 return kvm_read_c0_guest_count(cop0
);
411 return kvm_mips_read_count_running(vcpu
, ktime_get());
415 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
416 * @vcpu: Virtual CPU.
417 * @count: Output pointer for CP0_Count value at point of freeze.
419 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
420 * at the point it was frozen. It is guaranteed that any pending interrupts at
421 * the point it was frozen are handled, and none after that point.
423 * This is useful where the time/CP0_Count is needed in the calculation of the
426 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
428 * Returns: The ktime at the point of freeze.
430 static ktime_t
kvm_mips_freeze_hrtimer(struct kvm_vcpu
*vcpu
, u32
*count
)
434 /* stop hrtimer before finding time */
435 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
438 /* find count at this point and handle pending hrtimer */
439 *count
= kvm_mips_read_count_running(vcpu
, now
);
445 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
446 * @vcpu: Virtual CPU.
447 * @now: ktime at point of resume.
448 * @count: CP0_Count at point of resume.
450 * Resumes the timer and updates the timer expiry based on @now and @count.
451 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
452 * parameters need to be changed.
454 * It is guaranteed that a timer interrupt immediately after resume will be
455 * handled, but not if CP_Compare is exactly at @count. That case is already
456 * handled by kvm_mips_freeze_timer().
458 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
460 static void kvm_mips_resume_hrtimer(struct kvm_vcpu
*vcpu
,
461 ktime_t now
, u32 count
)
463 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
468 /* Calculate timeout (wrap 0 to 2^32) */
469 compare
= kvm_read_c0_guest_compare(cop0
);
470 delta
= (u64
)(u32
)(compare
- count
- 1) + 1;
471 delta
= div_u64(delta
* NSEC_PER_SEC
, vcpu
->arch
.count_hz
);
472 expire
= ktime_add_ns(now
, delta
);
474 /* Update hrtimer to use new timeout */
475 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
476 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expire
, HRTIMER_MODE_ABS
);
480 * kvm_mips_write_count() - Modify the count and update timer.
481 * @vcpu: Virtual CPU.
482 * @count: Guest CP0_Count value to set.
484 * Sets the CP0_Count value and updates the timer accordingly.
486 void kvm_mips_write_count(struct kvm_vcpu
*vcpu
, u32 count
)
488 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
492 now
= kvm_mips_count_time(vcpu
);
493 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
495 if (kvm_mips_count_disabled(vcpu
))
496 /* The timer's disabled, adjust the static count */
497 kvm_write_c0_guest_count(cop0
, count
);
500 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
504 * kvm_mips_init_count() - Initialise timer.
505 * @vcpu: Virtual CPU.
507 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
508 * it going if it's enabled.
510 void kvm_mips_init_count(struct kvm_vcpu
*vcpu
)
513 vcpu
->arch
.count_hz
= 100*1000*1000;
514 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32,
515 vcpu
->arch
.count_hz
);
516 vcpu
->arch
.count_dyn_bias
= 0;
519 kvm_mips_write_count(vcpu
, 0);
523 * kvm_mips_set_count_hz() - Update the frequency of the timer.
524 * @vcpu: Virtual CPU.
525 * @count_hz: Frequency of CP0_Count timer in Hz.
527 * Change the frequency of the CP0_Count timer. This is done atomically so that
528 * CP0_Count is continuous and no timer interrupt is lost.
530 * Returns: -EINVAL if @count_hz is out of range.
533 int kvm_mips_set_count_hz(struct kvm_vcpu
*vcpu
, s64 count_hz
)
535 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
540 /* ensure the frequency is in a sensible range... */
541 if (count_hz
<= 0 || count_hz
> NSEC_PER_SEC
)
543 /* ... and has actually changed */
544 if (vcpu
->arch
.count_hz
== count_hz
)
547 /* Safely freeze timer so we can keep it continuous */
548 dc
= kvm_mips_count_disabled(vcpu
);
550 now
= kvm_mips_count_time(vcpu
);
551 count
= kvm_read_c0_guest_count(cop0
);
553 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
556 /* Update the frequency */
557 vcpu
->arch
.count_hz
= count_hz
;
558 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32, count_hz
);
559 vcpu
->arch
.count_dyn_bias
= 0;
561 /* Calculate adjusted bias so dynamic count is unchanged */
562 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
564 /* Update and resume hrtimer */
566 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
571 * kvm_mips_write_compare() - Modify compare and update timer.
572 * @vcpu: Virtual CPU.
573 * @compare: New CP0_Compare value.
574 * @ack: Whether to acknowledge timer interrupt.
576 * Update CP0_Compare to a new value and update the timeout.
577 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
578 * any pending timer interrupt is preserved.
580 void kvm_mips_write_compare(struct kvm_vcpu
*vcpu
, u32 compare
, bool ack
)
582 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
584 u32 old_compare
= kvm_read_c0_guest_compare(cop0
);
588 /* if unchanged, must just be an ack */
589 if (old_compare
== compare
) {
592 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
593 kvm_write_c0_guest_compare(cop0
, compare
);
597 /* freeze_hrtimer() takes care of timer interrupts <= count */
598 dc
= kvm_mips_count_disabled(vcpu
);
600 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
603 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
605 kvm_write_c0_guest_compare(cop0
, compare
);
607 /* resume_hrtimer() takes care of timer interrupts > count */
609 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
613 * kvm_mips_count_disable() - Disable count.
614 * @vcpu: Virtual CPU.
616 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
617 * time will be handled but not after.
619 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
620 * count_ctl.DC has been set (count disabled).
622 * Returns: The time that the timer was stopped.
624 static ktime_t
kvm_mips_count_disable(struct kvm_vcpu
*vcpu
)
626 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
631 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
633 /* Set the static count from the dynamic count, handling pending TI */
635 count
= kvm_mips_read_count_running(vcpu
, now
);
636 kvm_write_c0_guest_count(cop0
, count
);
642 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
643 * @vcpu: Virtual CPU.
645 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
646 * before the final stop time will be handled if the timer isn't disabled by
647 * count_ctl.DC, but not after.
649 * Assumes CP0_Cause.DC is clear (count enabled).
651 void kvm_mips_count_disable_cause(struct kvm_vcpu
*vcpu
)
653 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
655 kvm_set_c0_guest_cause(cop0
, CAUSEF_DC
);
656 if (!(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
657 kvm_mips_count_disable(vcpu
);
661 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
662 * @vcpu: Virtual CPU.
664 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
665 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
666 * potentially before even returning, so the caller should be careful with
667 * ordering of CP0_Cause modifications so as not to lose it.
669 * Assumes CP0_Cause.DC is set (count disabled).
671 void kvm_mips_count_enable_cause(struct kvm_vcpu
*vcpu
)
673 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
676 kvm_clear_c0_guest_cause(cop0
, CAUSEF_DC
);
679 * Set the dynamic count to match the static count.
680 * This starts the hrtimer if count_ctl.DC allows it.
681 * Otherwise it conveniently updates the biases.
683 count
= kvm_read_c0_guest_count(cop0
);
684 kvm_mips_write_count(vcpu
, count
);
688 * kvm_mips_set_count_ctl() - Update the count control KVM register.
689 * @vcpu: Virtual CPU.
690 * @count_ctl: Count control register new value.
692 * Set the count control KVM register. The timer is updated accordingly.
694 * Returns: -EINVAL if reserved bits are set.
697 int kvm_mips_set_count_ctl(struct kvm_vcpu
*vcpu
, s64 count_ctl
)
699 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
700 s64 changed
= count_ctl
^ vcpu
->arch
.count_ctl
;
705 /* Only allow defined bits to be changed */
706 if (changed
& ~(s64
)(KVM_REG_MIPS_COUNT_CTL_DC
))
709 /* Apply new value */
710 vcpu
->arch
.count_ctl
= count_ctl
;
712 /* Master CP0_Count disable */
713 if (changed
& KVM_REG_MIPS_COUNT_CTL_DC
) {
714 /* Is CP0_Cause.DC already disabling CP0_Count? */
715 if (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
) {
716 if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
)
717 /* Just record the current time */
718 vcpu
->arch
.count_resume
= ktime_get();
719 } else if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) {
720 /* disable timer and record current time */
721 vcpu
->arch
.count_resume
= kvm_mips_count_disable(vcpu
);
724 * Calculate timeout relative to static count at resume
725 * time (wrap 0 to 2^32).
727 count
= kvm_read_c0_guest_count(cop0
);
728 compare
= kvm_read_c0_guest_compare(cop0
);
729 delta
= (u64
)(u32
)(compare
- count
- 1) + 1;
730 delta
= div_u64(delta
* NSEC_PER_SEC
,
731 vcpu
->arch
.count_hz
);
732 expire
= ktime_add_ns(vcpu
->arch
.count_resume
, delta
);
734 /* Handle pending interrupt */
736 if (ktime_compare(now
, expire
) >= 0)
737 /* Nothing should be waiting on the timeout */
738 kvm_mips_callbacks
->queue_timer_int(vcpu
);
740 /* Resume hrtimer without changing bias */
741 count
= kvm_mips_read_count_running(vcpu
, now
);
742 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
750 * kvm_mips_set_count_resume() - Update the count resume KVM register.
751 * @vcpu: Virtual CPU.
752 * @count_resume: Count resume register new value.
754 * Set the count resume KVM register.
756 * Returns: -EINVAL if out of valid range (0..now).
759 int kvm_mips_set_count_resume(struct kvm_vcpu
*vcpu
, s64 count_resume
)
762 * It doesn't make sense for the resume time to be in the future, as it
763 * would be possible for the next interrupt to be more than a full
764 * period in the future.
766 if (count_resume
< 0 || count_resume
> ktime_to_ns(ktime_get()))
769 vcpu
->arch
.count_resume
= ns_to_ktime(count_resume
);
774 * kvm_mips_count_timeout() - Push timer forward on timeout.
775 * @vcpu: Virtual CPU.
777 * Handle an hrtimer event by push the hrtimer forward a period.
779 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
781 enum hrtimer_restart
kvm_mips_count_timeout(struct kvm_vcpu
*vcpu
)
783 /* Add the Count period to the current expiry time */
784 hrtimer_add_expires_ns(&vcpu
->arch
.comparecount_timer
,
785 vcpu
->arch
.count_period
);
786 return HRTIMER_RESTART
;
789 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
791 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
792 enum emulation_result er
= EMULATE_DONE
;
794 if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
795 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
796 kvm_read_c0_guest_epc(cop0
));
797 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
798 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
800 } else if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
801 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
802 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
804 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
812 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
814 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
815 vcpu
->arch
.pending_exceptions
);
817 ++vcpu
->stat
.wait_exits
;
818 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_WAIT
);
819 if (!vcpu
->arch
.pending_exceptions
) {
821 kvm_vcpu_block(vcpu
);
824 * We we are runnable, then definitely go off to user space to
825 * check if any I/O interrupts are pending.
827 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
828 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
829 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
837 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
838 * we can catch this, if things ever change
840 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
842 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
843 unsigned long pc
= vcpu
->arch
.pc
;
845 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
849 /* Write Guest TLB Entry @ Index */
850 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
852 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
853 int index
= kvm_read_c0_guest_index(cop0
);
854 struct kvm_mips_tlb
*tlb
= NULL
;
855 unsigned long pc
= vcpu
->arch
.pc
;
857 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
858 kvm_debug("%s: illegal index: %d\n", __func__
, index
);
859 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
860 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
861 kvm_read_c0_guest_entrylo0(cop0
),
862 kvm_read_c0_guest_entrylo1(cop0
),
863 kvm_read_c0_guest_pagemask(cop0
));
864 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
867 tlb
= &vcpu
->arch
.guest_tlb
[index
];
869 * Probe the shadow host TLB for the entry being overwritten, if one
870 * matches, invalidate it
872 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
874 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
875 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
876 tlb
->tlb_lo
[0] = kvm_read_c0_guest_entrylo0(cop0
);
877 tlb
->tlb_lo
[1] = kvm_read_c0_guest_entrylo1(cop0
);
879 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
880 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
881 kvm_read_c0_guest_entrylo0(cop0
),
882 kvm_read_c0_guest_entrylo1(cop0
),
883 kvm_read_c0_guest_pagemask(cop0
));
888 /* Write Guest TLB Entry @ Random Index */
889 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
891 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
892 struct kvm_mips_tlb
*tlb
= NULL
;
893 unsigned long pc
= vcpu
->arch
.pc
;
896 get_random_bytes(&index
, sizeof(index
));
897 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
899 tlb
= &vcpu
->arch
.guest_tlb
[index
];
902 * Probe the shadow host TLB for the entry being overwritten, if one
903 * matches, invalidate it
905 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
907 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
908 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
909 tlb
->tlb_lo
[0] = kvm_read_c0_guest_entrylo0(cop0
);
910 tlb
->tlb_lo
[1] = kvm_read_c0_guest_entrylo1(cop0
);
912 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
913 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
914 kvm_read_c0_guest_entrylo0(cop0
),
915 kvm_read_c0_guest_entrylo1(cop0
));
920 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
922 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
923 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
924 unsigned long pc
= vcpu
->arch
.pc
;
927 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
929 kvm_write_c0_guest_index(cop0
, index
);
931 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
938 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
939 * @vcpu: Virtual CPU.
941 * Finds the mask of bits which are writable in the guest's Config1 CP0
942 * register, by userland (currently read-only to the guest).
944 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu
*vcpu
)
946 unsigned int mask
= 0;
948 /* Permit FPU to be present if FPU is supported */
949 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
))
950 mask
|= MIPS_CONF1_FP
;
956 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
957 * @vcpu: Virtual CPU.
959 * Finds the mask of bits which are writable in the guest's Config3 CP0
960 * register, by userland (currently read-only to the guest).
962 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu
*vcpu
)
964 /* Config4 and ULRI are optional */
965 unsigned int mask
= MIPS_CONF_M
| MIPS_CONF3_ULRI
;
967 /* Permit MSA to be present if MSA is supported */
968 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
969 mask
|= MIPS_CONF3_MSA
;
975 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
976 * @vcpu: Virtual CPU.
978 * Finds the mask of bits which are writable in the guest's Config4 CP0
979 * register, by userland (currently read-only to the guest).
981 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu
*vcpu
)
983 /* Config5 is optional */
984 unsigned int mask
= MIPS_CONF_M
;
987 mask
|= (unsigned int)vcpu
->arch
.kscratch_enabled
<< 16;
993 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
994 * @vcpu: Virtual CPU.
996 * Finds the mask of bits which are writable in the guest's Config5 CP0
997 * register, by the guest itself.
999 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu
*vcpu
)
1001 unsigned int mask
= 0;
1003 /* Permit MSAEn changes if MSA supported and enabled */
1004 if (kvm_mips_guest_has_msa(&vcpu
->arch
))
1005 mask
|= MIPS_CONF5_MSAEN
;
1008 * Permit guest FPU mode changes if FPU is enabled and the relevant
1009 * feature exists according to FIR register.
1011 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
1013 mask
|= MIPS_CONF5_FRE
;
1014 /* We don't support UFR or UFE */
1020 enum emulation_result
kvm_mips_emulate_CP0(union mips_instruction inst
,
1021 u32
*opc
, u32 cause
,
1022 struct kvm_run
*run
,
1023 struct kvm_vcpu
*vcpu
)
1025 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1026 enum emulation_result er
= EMULATE_DONE
;
1028 unsigned long curr_pc
;
1031 * Update PC and hold onto current PC in case there is
1032 * an error and we want to rollback the PC
1034 curr_pc
= vcpu
->arch
.pc
;
1035 er
= update_pc(vcpu
, cause
);
1036 if (er
== EMULATE_FAIL
)
1039 if (inst
.co_format
.co
) {
1040 switch (inst
.co_format
.func
) {
1041 case tlbr_op
: /* Read indexed TLB entry */
1042 er
= kvm_mips_emul_tlbr(vcpu
);
1044 case tlbwi_op
: /* Write indexed */
1045 er
= kvm_mips_emul_tlbwi(vcpu
);
1047 case tlbwr_op
: /* Write random */
1048 er
= kvm_mips_emul_tlbwr(vcpu
);
1050 case tlbp_op
: /* TLB Probe */
1051 er
= kvm_mips_emul_tlbp(vcpu
);
1054 kvm_err("!!!COP0_RFE!!!\n");
1057 er
= kvm_mips_emul_eret(vcpu
);
1058 goto dont_update_pc
;
1060 er
= kvm_mips_emul_wait(vcpu
);
1064 rt
= inst
.c0r_format
.rt
;
1065 rd
= inst
.c0r_format
.rd
;
1066 sel
= inst
.c0r_format
.sel
;
1068 switch (inst
.c0r_format
.rs
) {
1070 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1071 cop0
->stat
[rd
][sel
]++;
1074 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1075 vcpu
->arch
.gprs
[rt
] =
1076 (s32
)kvm_mips_read_count(vcpu
);
1077 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
1078 vcpu
->arch
.gprs
[rt
] = 0x0;
1079 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1080 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1083 vcpu
->arch
.gprs
[rt
] = (s32
)cop0
->reg
[rd
][sel
];
1085 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1086 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1090 trace_kvm_hwr(vcpu
, KVM_TRACE_MFC0
,
1091 KVM_TRACE_COP0(rd
, sel
),
1092 vcpu
->arch
.gprs
[rt
]);
1096 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
1098 trace_kvm_hwr(vcpu
, KVM_TRACE_DMFC0
,
1099 KVM_TRACE_COP0(rd
, sel
),
1100 vcpu
->arch
.gprs
[rt
]);
1104 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1105 cop0
->stat
[rd
][sel
]++;
1107 trace_kvm_hwr(vcpu
, KVM_TRACE_MTC0
,
1108 KVM_TRACE_COP0(rd
, sel
),
1109 vcpu
->arch
.gprs
[rt
]);
1111 if ((rd
== MIPS_CP0_TLB_INDEX
)
1112 && (vcpu
->arch
.gprs
[rt
] >=
1113 KVM_MIPS_GUEST_TLB_SIZE
)) {
1114 kvm_err("Invalid TLB Index: %ld",
1115 vcpu
->arch
.gprs
[rt
]);
1119 #define C0_EBASE_CORE_MASK 0xff
1120 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
1121 /* Preserve CORE number */
1122 kvm_change_c0_guest_ebase(cop0
,
1123 ~(C0_EBASE_CORE_MASK
),
1124 vcpu
->arch
.gprs
[rt
]);
1125 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1126 kvm_read_c0_guest_ebase(cop0
));
1127 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
1129 vcpu
->arch
.gprs
[rt
] & KVM_ENTRYHI_ASID
;
1130 if ((KSEGX(vcpu
->arch
.gprs
[rt
]) != CKSEG0
) &&
1131 ((kvm_read_c0_guest_entryhi(cop0
) &
1132 KVM_ENTRYHI_ASID
) != nasid
)) {
1133 trace_kvm_asid_change(vcpu
,
1134 kvm_read_c0_guest_entryhi(cop0
)
1138 /* Blow away the shadow host TLBs */
1139 kvm_mips_flush_host_tlb(1);
1141 kvm_write_c0_guest_entryhi(cop0
,
1142 vcpu
->arch
.gprs
[rt
]);
1144 /* Are we writing to COUNT */
1145 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1146 kvm_mips_write_count(vcpu
, vcpu
->arch
.gprs
[rt
]);
1148 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
1149 /* If we are writing to COMPARE */
1150 /* Clear pending timer interrupt, if any */
1151 kvm_mips_write_compare(vcpu
,
1152 vcpu
->arch
.gprs
[rt
],
1154 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
1155 unsigned int old_val
, val
, change
;
1157 old_val
= kvm_read_c0_guest_status(cop0
);
1158 val
= vcpu
->arch
.gprs
[rt
];
1159 change
= val
^ old_val
;
1161 /* Make sure that the NMI bit is never set */
1165 * Don't allow CU1 or FR to be set unless FPU
1166 * capability enabled and exists in guest
1169 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1170 val
&= ~(ST0_CU1
| ST0_FR
);
1173 * Also don't allow FR to be set if host doesn't
1176 if (!(current_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
1180 /* Handle changes in FPU mode */
1184 * FPU and Vector register state is made
1185 * UNPREDICTABLE by a change of FR, so don't
1186 * even bother saving it.
1188 if (change
& ST0_FR
)
1192 * If MSA state is already live, it is undefined
1193 * how it interacts with FR=0 FPU state, and we
1194 * don't want to hit reserved instruction
1195 * exceptions trying to save the MSA state later
1196 * when CU=1 && FR=1, so play it safe and save
1199 if (change
& ST0_CU1
&& !(val
& ST0_FR
) &&
1200 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1204 * Propagate CU1 (FPU enable) changes
1205 * immediately if the FPU context is already
1206 * loaded. When disabling we leave the context
1207 * loaded so it can be quickly enabled again in
1210 if (change
& ST0_CU1
&&
1211 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)
1212 change_c0_status(ST0_CU1
, val
);
1216 kvm_write_c0_guest_status(cop0
, val
);
1218 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1220 * If FPU present, we need CU1/FR bits to take
1221 * effect fairly soon.
1223 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1224 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1226 } else if ((rd
== MIPS_CP0_CONFIG
) && (sel
== 5)) {
1227 unsigned int old_val
, val
, change
, wrmask
;
1229 old_val
= kvm_read_c0_guest_config5(cop0
);
1230 val
= vcpu
->arch
.gprs
[rt
];
1232 /* Only a few bits are writable in Config5 */
1233 wrmask
= kvm_mips_config5_wrmask(vcpu
);
1234 change
= (val
^ old_val
) & wrmask
;
1235 val
= old_val
^ change
;
1238 /* Handle changes in FPU/MSA modes */
1242 * Propagate FRE changes immediately if the FPU
1243 * context is already loaded.
1245 if (change
& MIPS_CONF5_FRE
&&
1246 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)
1247 change_c0_config5(MIPS_CONF5_FRE
, val
);
1250 * Propagate MSAEn changes immediately if the
1251 * MSA context is already loaded. When disabling
1252 * we leave the context loaded so it can be
1253 * quickly enabled again in the near future.
1255 if (change
& MIPS_CONF5_MSAEN
&&
1256 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1257 change_c0_config5(MIPS_CONF5_MSAEN
,
1262 kvm_write_c0_guest_config5(cop0
, val
);
1263 } else if ((rd
== MIPS_CP0_CAUSE
) && (sel
== 0)) {
1264 u32 old_cause
, new_cause
;
1266 old_cause
= kvm_read_c0_guest_cause(cop0
);
1267 new_cause
= vcpu
->arch
.gprs
[rt
];
1268 /* Update R/W bits */
1269 kvm_change_c0_guest_cause(cop0
, 0x08800300,
1271 /* DC bit enabling/disabling timer? */
1272 if ((old_cause
^ new_cause
) & CAUSEF_DC
) {
1273 if (new_cause
& CAUSEF_DC
)
1274 kvm_mips_count_disable_cause(vcpu
);
1276 kvm_mips_count_enable_cause(vcpu
);
1278 } else if ((rd
== MIPS_CP0_HWRENA
) && (sel
== 0)) {
1279 u32 mask
= MIPS_HWRENA_CPUNUM
|
1280 MIPS_HWRENA_SYNCISTEP
|
1284 if (kvm_read_c0_guest_config3(cop0
) &
1286 mask
|= MIPS_HWRENA_ULR
;
1287 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
] & mask
;
1289 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
1290 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1291 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1297 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1298 vcpu
->arch
.pc
, rt
, rd
, sel
);
1299 trace_kvm_hwr(vcpu
, KVM_TRACE_DMTC0
,
1300 KVM_TRACE_COP0(rd
, sel
),
1301 vcpu
->arch
.gprs
[rt
]);
1306 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1307 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
1310 vcpu
->arch
.gprs
[rt
] =
1311 kvm_read_c0_guest_status(cop0
);
1313 if (inst
.mfmc0_format
.sc
) {
1314 kvm_debug("[%#lx] mfmc0_op: EI\n",
1316 kvm_set_c0_guest_status(cop0
, ST0_IE
);
1318 kvm_debug("[%#lx] mfmc0_op: DI\n",
1320 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
1327 u32 css
= cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
1329 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
1331 * We don't support any shadow register sets, so
1332 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1338 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
1339 vcpu
->arch
.gprs
[rt
]);
1340 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
1344 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1345 vcpu
->arch
.pc
, inst
.c0r_format
.rs
);
1352 /* Rollback PC only if emulation was unsuccessful */
1353 if (er
== EMULATE_FAIL
)
1354 vcpu
->arch
.pc
= curr_pc
;
1358 * This is for special instructions whose emulation
1359 * updates the PC, so do not overwrite the PC under
1366 enum emulation_result
kvm_mips_emulate_store(union mips_instruction inst
,
1368 struct kvm_run
*run
,
1369 struct kvm_vcpu
*vcpu
)
1371 enum emulation_result er
= EMULATE_DO_MMIO
;
1374 void *data
= run
->mmio
.data
;
1375 unsigned long curr_pc
;
1378 * Update PC and hold onto current PC in case there is
1379 * an error and we want to rollback the PC
1381 curr_pc
= vcpu
->arch
.pc
;
1382 er
= update_pc(vcpu
, cause
);
1383 if (er
== EMULATE_FAIL
)
1386 rt
= inst
.i_format
.rt
;
1388 switch (inst
.i_format
.opcode
) {
1391 if (bytes
> sizeof(run
->mmio
.data
)) {
1392 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1395 run
->mmio
.phys_addr
=
1396 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1398 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1402 run
->mmio
.len
= bytes
;
1403 run
->mmio
.is_write
= 1;
1404 vcpu
->mmio_needed
= 1;
1405 vcpu
->mmio_is_write
= 1;
1406 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
1407 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1408 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
1415 if (bytes
> sizeof(run
->mmio
.data
)) {
1416 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1419 run
->mmio
.phys_addr
=
1420 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1422 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1427 run
->mmio
.len
= bytes
;
1428 run
->mmio
.is_write
= 1;
1429 vcpu
->mmio_needed
= 1;
1430 vcpu
->mmio_is_write
= 1;
1431 *(u32
*) data
= vcpu
->arch
.gprs
[rt
];
1433 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1434 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1435 vcpu
->arch
.gprs
[rt
], *(u32
*) data
);
1440 if (bytes
> sizeof(run
->mmio
.data
)) {
1441 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1444 run
->mmio
.phys_addr
=
1445 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1447 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1452 run
->mmio
.len
= bytes
;
1453 run
->mmio
.is_write
= 1;
1454 vcpu
->mmio_needed
= 1;
1455 vcpu
->mmio_is_write
= 1;
1456 *(u16
*) data
= vcpu
->arch
.gprs
[rt
];
1458 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1459 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1460 vcpu
->arch
.gprs
[rt
], *(u32
*) data
);
1464 kvm_err("Store not yet supported (inst=0x%08x)\n",
1470 /* Rollback PC if emulation was unsuccessful */
1471 if (er
== EMULATE_FAIL
)
1472 vcpu
->arch
.pc
= curr_pc
;
1477 enum emulation_result
kvm_mips_emulate_load(union mips_instruction inst
,
1478 u32 cause
, struct kvm_run
*run
,
1479 struct kvm_vcpu
*vcpu
)
1481 enum emulation_result er
= EMULATE_DO_MMIO
;
1485 rt
= inst
.i_format
.rt
;
1486 op
= inst
.i_format
.opcode
;
1488 vcpu
->arch
.pending_load_cause
= cause
;
1489 vcpu
->arch
.io_gpr
= rt
;
1494 if (bytes
> sizeof(run
->mmio
.data
)) {
1495 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1500 run
->mmio
.phys_addr
=
1501 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1503 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1508 run
->mmio
.len
= bytes
;
1509 run
->mmio
.is_write
= 0;
1510 vcpu
->mmio_needed
= 1;
1511 vcpu
->mmio_is_write
= 0;
1517 if (bytes
> sizeof(run
->mmio
.data
)) {
1518 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1523 run
->mmio
.phys_addr
=
1524 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1526 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1531 run
->mmio
.len
= bytes
;
1532 run
->mmio
.is_write
= 0;
1533 vcpu
->mmio_needed
= 1;
1534 vcpu
->mmio_is_write
= 0;
1537 vcpu
->mmio_needed
= 2;
1539 vcpu
->mmio_needed
= 1;
1546 if (bytes
> sizeof(run
->mmio
.data
)) {
1547 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1552 run
->mmio
.phys_addr
=
1553 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1555 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1560 run
->mmio
.len
= bytes
;
1561 run
->mmio
.is_write
= 0;
1562 vcpu
->mmio_is_write
= 0;
1565 vcpu
->mmio_needed
= 2;
1567 vcpu
->mmio_needed
= 1;
1572 kvm_err("Load not yet supported (inst=0x%08x)\n",
1581 enum emulation_result
kvm_mips_emulate_cache(union mips_instruction inst
,
1582 u32
*opc
, u32 cause
,
1583 struct kvm_run
*run
,
1584 struct kvm_vcpu
*vcpu
)
1586 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1587 enum emulation_result er
= EMULATE_DONE
;
1588 u32 cache
, op_inst
, op
, base
;
1590 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1592 unsigned long curr_pc
;
1595 * Update PC and hold onto current PC in case there is
1596 * an error and we want to rollback the PC
1598 curr_pc
= vcpu
->arch
.pc
;
1599 er
= update_pc(vcpu
, cause
);
1600 if (er
== EMULATE_FAIL
)
1603 base
= inst
.i_format
.rs
;
1604 op_inst
= inst
.i_format
.rt
;
1605 if (cpu_has_mips_r6
)
1606 offset
= inst
.spec3_format
.simmediate
;
1608 offset
= inst
.i_format
.simmediate
;
1609 cache
= op_inst
& CacheOp_Cache
;
1610 op
= op_inst
& CacheOp_Op
;
1612 va
= arch
->gprs
[base
] + offset
;
1614 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1615 cache
, op
, base
, arch
->gprs
[base
], offset
);
1618 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1619 * invalidate the caches entirely by stepping through all the
1622 if (op
== Index_Writeback_Inv
) {
1623 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1624 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
1625 arch
->gprs
[base
], offset
);
1627 if (cache
== Cache_D
)
1629 else if (cache
== Cache_I
)
1632 kvm_err("%s: unsupported CACHE INDEX operation\n",
1634 return EMULATE_FAIL
;
1637 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1638 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
1644 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
1645 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0 &&
1646 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
)) {
1647 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1648 __func__
, va
, vcpu
, read_c0_entryhi());
1653 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
1654 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
1657 /* If an entry already exists then skip */
1658 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0)
1662 * If address not in the guest TLB, then give the guest a fault,
1663 * the resulting handler will do the right thing
1665 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
1666 (kvm_read_c0_guest_entryhi
1667 (cop0
) & KVM_ENTRYHI_ASID
));
1670 vcpu
->arch
.host_cp0_badvaddr
= va
;
1671 vcpu
->arch
.pc
= curr_pc
;
1672 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
1675 goto dont_update_pc
;
1677 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1679 * Check if the entry is valid, if not then setup a TLB
1680 * invalid exception to the guest
1682 if (!TLB_IS_VALID(*tlb
, va
)) {
1683 vcpu
->arch
.host_cp0_badvaddr
= va
;
1684 vcpu
->arch
.pc
= curr_pc
;
1685 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1688 goto dont_update_pc
;
1691 * We fault an entry from the guest tlb to the
1694 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
)) {
1695 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1696 __func__
, va
, index
, vcpu
,
1704 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1705 cache
, op
, base
, arch
->gprs
[base
], offset
);
1713 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1714 if (op_inst
== Hit_Writeback_Inv_D
|| op_inst
== Hit_Invalidate_D
) {
1715 flush_dcache_line(va
);
1717 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1719 * Replace the CACHE instruction, with a SYNCI, not the same,
1722 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1724 } else if (op_inst
== Hit_Invalidate_I
) {
1725 flush_dcache_line(va
);
1726 flush_icache_line(va
);
1728 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1729 /* Replace the CACHE instruction, with a SYNCI */
1730 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1733 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1734 cache
, op
, base
, arch
->gprs
[base
], offset
);
1740 /* Rollback PC only if emulation was unsuccessful */
1741 if (er
== EMULATE_FAIL
)
1742 vcpu
->arch
.pc
= curr_pc
;
1746 * This is for exceptions whose emulation updates the PC, so do not
1747 * overwrite the PC under any circumstances
1753 enum emulation_result
kvm_mips_emulate_inst(u32 cause
, u32
*opc
,
1754 struct kvm_run
*run
,
1755 struct kvm_vcpu
*vcpu
)
1757 union mips_instruction inst
;
1758 enum emulation_result er
= EMULATE_DONE
;
1760 /* Fetch the instruction. */
1761 if (cause
& CAUSEF_BD
)
1764 inst
.word
= kvm_get_inst(opc
, vcpu
);
1766 switch (inst
.r_format
.opcode
) {
1768 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1773 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1780 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1783 #ifndef CONFIG_CPU_MIPSR6
1785 ++vcpu
->stat
.cache_exits
;
1786 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_CACHE
);
1787 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1791 switch (inst
.spec3_format
.func
) {
1793 ++vcpu
->stat
.cache_exits
;
1794 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_CACHE
);
1795 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
,
1806 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc
,
1808 kvm_arch_vcpu_dump_regs(vcpu
);
1816 enum emulation_result
kvm_mips_emulate_syscall(u32 cause
,
1818 struct kvm_run
*run
,
1819 struct kvm_vcpu
*vcpu
)
1821 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1822 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1823 enum emulation_result er
= EMULATE_DONE
;
1825 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1827 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1828 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1830 if (cause
& CAUSEF_BD
)
1831 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1833 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1835 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1837 kvm_change_c0_guest_cause(cop0
, (0xff),
1838 (EXCCODE_SYS
<< CAUSEB_EXCCODE
));
1840 /* Set PC to the exception entry point */
1841 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1844 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1851 enum emulation_result
kvm_mips_emulate_tlbmiss_ld(u32 cause
,
1853 struct kvm_run
*run
,
1854 struct kvm_vcpu
*vcpu
)
1856 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1857 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1858 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1859 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1861 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1863 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1864 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1866 if (cause
& CAUSEF_BD
)
1867 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1869 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1871 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1874 /* set pc to the exception entry point */
1875 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1878 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1881 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1884 kvm_change_c0_guest_cause(cop0
, (0xff),
1885 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1887 /* setup badvaddr, context and entryhi registers for the guest */
1888 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1889 /* XXXKYMA: is the context register used by linux??? */
1890 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1891 /* Blow away the shadow host TLBs */
1892 kvm_mips_flush_host_tlb(1);
1894 return EMULATE_DONE
;
1897 enum emulation_result
kvm_mips_emulate_tlbinv_ld(u32 cause
,
1899 struct kvm_run
*run
,
1900 struct kvm_vcpu
*vcpu
)
1902 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1903 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1904 unsigned long entryhi
=
1905 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1906 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1908 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1910 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1911 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1913 if (cause
& CAUSEF_BD
)
1914 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1916 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1918 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1921 /* set pc to the exception entry point */
1922 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1925 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1927 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1930 kvm_change_c0_guest_cause(cop0
, (0xff),
1931 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1933 /* setup badvaddr, context and entryhi registers for the guest */
1934 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1935 /* XXXKYMA: is the context register used by linux??? */
1936 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1937 /* Blow away the shadow host TLBs */
1938 kvm_mips_flush_host_tlb(1);
1940 return EMULATE_DONE
;
1943 enum emulation_result
kvm_mips_emulate_tlbmiss_st(u32 cause
,
1945 struct kvm_run
*run
,
1946 struct kvm_vcpu
*vcpu
)
1948 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1949 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1950 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1951 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1953 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1955 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1956 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1958 if (cause
& CAUSEF_BD
)
1959 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1961 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1963 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1966 /* Set PC to the exception entry point */
1967 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1969 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1971 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1974 kvm_change_c0_guest_cause(cop0
, (0xff),
1975 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
1977 /* setup badvaddr, context and entryhi registers for the guest */
1978 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1979 /* XXXKYMA: is the context register used by linux??? */
1980 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1981 /* Blow away the shadow host TLBs */
1982 kvm_mips_flush_host_tlb(1);
1984 return EMULATE_DONE
;
1987 enum emulation_result
kvm_mips_emulate_tlbinv_st(u32 cause
,
1989 struct kvm_run
*run
,
1990 struct kvm_vcpu
*vcpu
)
1992 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1993 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1994 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1995 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1997 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1999 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2000 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2002 if (cause
& CAUSEF_BD
)
2003 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2005 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2007 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2010 /* Set PC to the exception entry point */
2011 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2013 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2015 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2018 kvm_change_c0_guest_cause(cop0
, (0xff),
2019 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
2021 /* setup badvaddr, context and entryhi registers for the guest */
2022 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2023 /* XXXKYMA: is the context register used by linux??? */
2024 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2025 /* Blow away the shadow host TLBs */
2026 kvm_mips_flush_host_tlb(1);
2028 return EMULATE_DONE
;
2031 /* TLBMOD: store into address matching TLB with Dirty bit off */
2032 enum emulation_result
kvm_mips_handle_tlbmod(u32 cause
, u32
*opc
,
2033 struct kvm_run
*run
,
2034 struct kvm_vcpu
*vcpu
)
2036 enum emulation_result er
= EMULATE_DONE
;
2038 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2039 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2040 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2043 /* If address not in the guest TLB, then we are in trouble */
2044 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
2046 /* XXXKYMA Invalidate and retry */
2047 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
);
2048 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2050 kvm_mips_dump_guest_tlbs(vcpu
);
2051 kvm_mips_dump_host_tlbs();
2052 return EMULATE_FAIL
;
2056 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
2060 enum emulation_result
kvm_mips_emulate_tlbmod(u32 cause
,
2062 struct kvm_run
*run
,
2063 struct kvm_vcpu
*vcpu
)
2065 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2066 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2067 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2068 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2070 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2072 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2073 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2075 if (cause
& CAUSEF_BD
)
2076 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2078 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2080 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2083 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2085 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2087 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2090 kvm_change_c0_guest_cause(cop0
, (0xff),
2091 (EXCCODE_MOD
<< CAUSEB_EXCCODE
));
2093 /* setup badvaddr, context and entryhi registers for the guest */
2094 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2095 /* XXXKYMA: is the context register used by linux??? */
2096 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2097 /* Blow away the shadow host TLBs */
2098 kvm_mips_flush_host_tlb(1);
2100 return EMULATE_DONE
;
2103 enum emulation_result
kvm_mips_emulate_fpu_exc(u32 cause
,
2105 struct kvm_run
*run
,
2106 struct kvm_vcpu
*vcpu
)
2108 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2109 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2111 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2113 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2114 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2116 if (cause
& CAUSEF_BD
)
2117 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2119 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2123 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2125 kvm_change_c0_guest_cause(cop0
, (0xff),
2126 (EXCCODE_CPU
<< CAUSEB_EXCCODE
));
2127 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
2129 return EMULATE_DONE
;
2132 enum emulation_result
kvm_mips_emulate_ri_exc(u32 cause
,
2134 struct kvm_run
*run
,
2135 struct kvm_vcpu
*vcpu
)
2137 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2138 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2139 enum emulation_result er
= EMULATE_DONE
;
2141 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2143 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2144 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2146 if (cause
& CAUSEF_BD
)
2147 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2149 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2151 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
2153 kvm_change_c0_guest_cause(cop0
, (0xff),
2154 (EXCCODE_RI
<< CAUSEB_EXCCODE
));
2156 /* Set PC to the exception entry point */
2157 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2160 kvm_err("Trying to deliver RI when EXL is already set\n");
2167 enum emulation_result
kvm_mips_emulate_bp_exc(u32 cause
,
2169 struct kvm_run
*run
,
2170 struct kvm_vcpu
*vcpu
)
2172 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2173 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2174 enum emulation_result er
= EMULATE_DONE
;
2176 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2178 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2179 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2181 if (cause
& CAUSEF_BD
)
2182 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2184 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2186 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
2188 kvm_change_c0_guest_cause(cop0
, (0xff),
2189 (EXCCODE_BP
<< CAUSEB_EXCCODE
));
2191 /* Set PC to the exception entry point */
2192 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2195 kvm_err("Trying to deliver BP when EXL is already set\n");
2202 enum emulation_result
kvm_mips_emulate_trap_exc(u32 cause
,
2204 struct kvm_run
*run
,
2205 struct kvm_vcpu
*vcpu
)
2207 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2208 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2209 enum emulation_result er
= EMULATE_DONE
;
2211 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2213 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2214 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2216 if (cause
& CAUSEF_BD
)
2217 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2219 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2221 kvm_debug("Delivering TRAP @ pc %#lx\n", arch
->pc
);
2223 kvm_change_c0_guest_cause(cop0
, (0xff),
2224 (EXCCODE_TR
<< CAUSEB_EXCCODE
));
2226 /* Set PC to the exception entry point */
2227 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2230 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2237 enum emulation_result
kvm_mips_emulate_msafpe_exc(u32 cause
,
2239 struct kvm_run
*run
,
2240 struct kvm_vcpu
*vcpu
)
2242 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2243 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2244 enum emulation_result er
= EMULATE_DONE
;
2246 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2248 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2249 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2251 if (cause
& CAUSEF_BD
)
2252 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2254 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2256 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch
->pc
);
2258 kvm_change_c0_guest_cause(cop0
, (0xff),
2259 (EXCCODE_MSAFPE
<< CAUSEB_EXCCODE
));
2261 /* Set PC to the exception entry point */
2262 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2265 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2272 enum emulation_result
kvm_mips_emulate_fpe_exc(u32 cause
,
2274 struct kvm_run
*run
,
2275 struct kvm_vcpu
*vcpu
)
2277 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2278 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2279 enum emulation_result er
= EMULATE_DONE
;
2281 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2283 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2284 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2286 if (cause
& CAUSEF_BD
)
2287 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2289 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2291 kvm_debug("Delivering FPE @ pc %#lx\n", arch
->pc
);
2293 kvm_change_c0_guest_cause(cop0
, (0xff),
2294 (EXCCODE_FPE
<< CAUSEB_EXCCODE
));
2296 /* Set PC to the exception entry point */
2297 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2300 kvm_err("Trying to deliver FPE when EXL is already set\n");
2307 enum emulation_result
kvm_mips_emulate_msadis_exc(u32 cause
,
2309 struct kvm_run
*run
,
2310 struct kvm_vcpu
*vcpu
)
2312 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2313 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2314 enum emulation_result er
= EMULATE_DONE
;
2316 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2318 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2319 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2321 if (cause
& CAUSEF_BD
)
2322 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2324 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2326 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch
->pc
);
2328 kvm_change_c0_guest_cause(cop0
, (0xff),
2329 (EXCCODE_MSADIS
<< CAUSEB_EXCCODE
));
2331 /* Set PC to the exception entry point */
2332 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2335 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2342 enum emulation_result
kvm_mips_handle_ri(u32 cause
, u32
*opc
,
2343 struct kvm_run
*run
,
2344 struct kvm_vcpu
*vcpu
)
2346 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2347 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2348 enum emulation_result er
= EMULATE_DONE
;
2349 unsigned long curr_pc
;
2350 union mips_instruction inst
;
2353 * Update PC and hold onto current PC in case there is
2354 * an error and we want to rollback the PC
2356 curr_pc
= vcpu
->arch
.pc
;
2357 er
= update_pc(vcpu
, cause
);
2358 if (er
== EMULATE_FAIL
)
2361 /* Fetch the instruction. */
2362 if (cause
& CAUSEF_BD
)
2365 inst
.word
= kvm_get_inst(opc
, vcpu
);
2367 if (inst
.word
== KVM_INVALID_INST
) {
2368 kvm_err("%s: Cannot get inst @ %p\n", __func__
, opc
);
2369 return EMULATE_FAIL
;
2372 if (inst
.r_format
.opcode
== spec3_op
&&
2373 inst
.r_format
.func
== rdhwr_op
&&
2374 inst
.r_format
.rs
== 0 &&
2375 (inst
.r_format
.re
>> 3) == 0) {
2376 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2377 int rd
= inst
.r_format
.rd
;
2378 int rt
= inst
.r_format
.rt
;
2379 int sel
= inst
.r_format
.re
& 0x7;
2381 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2382 if (usermode
&& !(kvm_read_c0_guest_hwrena(cop0
) & BIT(rd
))) {
2383 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2388 case MIPS_HWR_CPUNUM
: /* CPU number */
2389 arch
->gprs
[rt
] = vcpu
->vcpu_id
;
2391 case MIPS_HWR_SYNCISTEP
: /* SYNCI length */
2392 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
2393 current_cpu_data
.icache
.linesz
);
2395 case MIPS_HWR_CC
: /* Read count register */
2396 arch
->gprs
[rt
] = (s32
)kvm_mips_read_count(vcpu
);
2398 case MIPS_HWR_CCRES
: /* Count register resolution */
2399 switch (current_cpu_data
.cputype
) {
2408 case MIPS_HWR_ULR
: /* Read UserLocal register */
2409 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
2413 kvm_debug("RDHWR %#x not supported @ %p\n", rd
, opc
);
2417 trace_kvm_hwr(vcpu
, KVM_TRACE_RDHWR
, KVM_TRACE_HWR(rd
, sel
),
2418 vcpu
->arch
.gprs
[rt
]);
2420 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2425 return EMULATE_DONE
;
2429 * Rollback PC (if in branch delay slot then the PC already points to
2430 * branch target), and pass the RI exception to the guest OS.
2432 vcpu
->arch
.pc
= curr_pc
;
2433 return kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
2436 enum emulation_result
kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
,
2437 struct kvm_run
*run
)
2439 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
2440 enum emulation_result er
= EMULATE_DONE
;
2442 if (run
->mmio
.len
> sizeof(*gpr
)) {
2443 kvm_err("Bad MMIO length: %d", run
->mmio
.len
);
2448 er
= update_pc(vcpu
, vcpu
->arch
.pending_load_cause
);
2449 if (er
== EMULATE_FAIL
)
2452 switch (run
->mmio
.len
) {
2454 *gpr
= *(s32
*) run
->mmio
.data
;
2458 if (vcpu
->mmio_needed
== 2)
2459 *gpr
= *(s16
*) run
->mmio
.data
;
2461 *gpr
= *(u16
*)run
->mmio
.data
;
2465 if (vcpu
->mmio_needed
== 2)
2466 *gpr
= *(s8
*) run
->mmio
.data
;
2468 *gpr
= *(u8
*) run
->mmio
.data
;
2472 if (vcpu
->arch
.pending_load_cause
& CAUSEF_BD
)
2473 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2474 vcpu
->arch
.pc
, run
->mmio
.len
, vcpu
->arch
.io_gpr
, *gpr
,
2481 static enum emulation_result
kvm_mips_emulate_exc(u32 cause
,
2483 struct kvm_run
*run
,
2484 struct kvm_vcpu
*vcpu
)
2486 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2487 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2488 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2489 enum emulation_result er
= EMULATE_DONE
;
2491 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2493 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2494 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2496 if (cause
& CAUSEF_BD
)
2497 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2499 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2501 kvm_change_c0_guest_cause(cop0
, (0xff),
2502 (exccode
<< CAUSEB_EXCCODE
));
2504 /* Set PC to the exception entry point */
2505 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2506 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2508 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2509 exccode
, kvm_read_c0_guest_epc(cop0
),
2510 kvm_read_c0_guest_badvaddr(cop0
));
2512 kvm_err("Trying to deliver EXC when EXL is already set\n");
2519 enum emulation_result
kvm_mips_check_privilege(u32 cause
,
2521 struct kvm_run
*run
,
2522 struct kvm_vcpu
*vcpu
)
2524 enum emulation_result er
= EMULATE_DONE
;
2525 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2526 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
2528 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2537 case EXCCODE_MSAFPE
:
2539 case EXCCODE_MSADIS
:
2543 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
2544 er
= EMULATE_PRIV_FAIL
;
2552 * We we are accessing Guest kernel space, then send an
2553 * address error exception to the guest
2555 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2556 kvm_debug("%s: LD MISS @ %#lx\n", __func__
,
2559 cause
|= (EXCCODE_ADEL
<< CAUSEB_EXCCODE
);
2560 er
= EMULATE_PRIV_FAIL
;
2566 * We we are accessing Guest kernel space, then send an
2567 * address error exception to the guest
2569 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2570 kvm_debug("%s: ST MISS @ %#lx\n", __func__
,
2573 cause
|= (EXCCODE_ADES
<< CAUSEB_EXCCODE
);
2574 er
= EMULATE_PRIV_FAIL
;
2579 kvm_debug("%s: address error ST @ %#lx\n", __func__
,
2581 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2583 cause
|= (EXCCODE_TLBS
<< CAUSEB_EXCCODE
);
2585 er
= EMULATE_PRIV_FAIL
;
2588 kvm_debug("%s: address error LD @ %#lx\n", __func__
,
2590 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2592 cause
|= (EXCCODE_TLBL
<< CAUSEB_EXCCODE
);
2594 er
= EMULATE_PRIV_FAIL
;
2597 er
= EMULATE_PRIV_FAIL
;
2602 if (er
== EMULATE_PRIV_FAIL
)
2603 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
2609 * User Address (UA) fault, this could happen if
2610 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2611 * case we pass on the fault to the guest kernel and let it handle it.
2612 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2613 * case we inject the TLB from the Guest TLB into the shadow host TLB
2615 enum emulation_result
kvm_mips_handle_tlbmiss(u32 cause
,
2617 struct kvm_run
*run
,
2618 struct kvm_vcpu
*vcpu
)
2620 enum emulation_result er
= EMULATE_DONE
;
2621 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2622 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
2625 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2626 vcpu
->arch
.host_cp0_badvaddr
);
2629 * KVM would not have got the exception if this entry was valid in the
2630 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2631 * send the guest an exception. The guest exc handler should then inject
2632 * an entry into the guest TLB.
2634 index
= kvm_mips_guest_tlb_lookup(vcpu
,
2636 (kvm_read_c0_guest_entryhi(vcpu
->arch
.cop0
) &
2639 if (exccode
== EXCCODE_TLBL
) {
2640 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
2641 } else if (exccode
== EXCCODE_TLBS
) {
2642 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
2644 kvm_err("%s: invalid exc code: %d\n", __func__
,
2649 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
2652 * Check if the entry is valid, if not then setup a TLB invalid
2653 * exception to the guest
2655 if (!TLB_IS_VALID(*tlb
, va
)) {
2656 if (exccode
== EXCCODE_TLBL
) {
2657 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
2659 } else if (exccode
== EXCCODE_TLBS
) {
2660 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
2663 kvm_err("%s: invalid exc code: %d\n", __func__
,
2668 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2669 tlb
->tlb_hi
, tlb
->tlb_lo
[0], tlb
->tlb_lo
[1]);
2671 * OK we have a Guest TLB entry, now inject it into the
2674 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
)) {
2675 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2676 __func__
, va
, index
, vcpu
,