2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
44 unsigned int dspcontrol
;
45 union mips_instruction insn
;
46 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
48 long nextpc
= KVM_INVALID_INST
;
54 * Read the instruction
56 insn
.word
= kvm_get_inst((uint32_t *) epc
, vcpu
);
58 if (insn
.word
== KVM_INVALID_INST
)
59 return KVM_INVALID_INST
;
61 switch (insn
.i_format
.opcode
) {
63 * jr and jalr are in r_format format.
66 switch (insn
.r_format
.func
) {
68 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
71 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
82 switch (insn
.i_format
.rt
) {
85 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
86 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
94 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
95 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
103 arch
->gprs
[31] = epc
+ 8;
104 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
105 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
113 arch
->gprs
[31] = epc
+ 8;
114 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
115 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
124 dspcontrol
= rddsp(0x01);
126 if (dspcontrol
>= 32) {
127 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
136 * These are unconditional and in j_format.
139 arch
->gprs
[31] = instpc
+ 8;
144 epc
|= (insn
.j_format
.target
<< 2);
149 * These are conditional and in i_format.
153 if (arch
->gprs
[insn
.i_format
.rs
] ==
154 arch
->gprs
[insn
.i_format
.rt
])
155 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
163 if (arch
->gprs
[insn
.i_format
.rs
] !=
164 arch
->gprs
[insn
.i_format
.rt
])
165 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
171 case blez_op
: /* not really i_format */
173 /* rt field assumed to be zero */
174 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
175 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
183 /* rt field assumed to be zero */
184 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
185 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
192 * And now the FPA/cp1 branch instructions.
195 printk("%s: unsupported cop1_op\n", __func__
);
202 printk("%s: unaligned epc\n", __func__
);
206 printk("%s: DSP branch but not DSP ASE\n", __func__
);
210 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, uint32_t cause
)
212 unsigned long branch_pc
;
213 enum emulation_result er
= EMULATE_DONE
;
215 if (cause
& CAUSEF_BD
) {
216 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
217 if (branch_pc
== KVM_INVALID_INST
) {
220 vcpu
->arch
.pc
= branch_pc
;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
231 /* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
235 enum emulation_result
kvm_mips_emulate_count(struct kvm_vcpu
*vcpu
)
237 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
238 enum emulation_result er
= EMULATE_DONE
;
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
)) {
242 hrtimer_try_to_cancel(&vcpu
->arch
.comparecount_timer
);
243 hrtimer_start(&vcpu
->arch
.comparecount_timer
,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL
);
246 hrtimer_try_to_cancel(&vcpu
->arch
.comparecount_timer
);
252 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
254 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
255 enum emulation_result er
= EMULATE_DONE
;
257 if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
259 kvm_read_c0_guest_epc(cop0
));
260 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
261 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
263 } else if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
264 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
265 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
275 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
277 enum emulation_result er
= EMULATE_DONE
;
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
280 vcpu
->arch
.pending_exceptions
);
282 ++vcpu
->stat
.wait_exits
;
283 trace_kvm_exit(vcpu
, WAIT_EXITS
);
284 if (!vcpu
->arch
.pending_exceptions
) {
286 kvm_vcpu_block(vcpu
);
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
291 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
292 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
293 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
303 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
305 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
306 enum emulation_result er
= EMULATE_FAIL
;
307 uint32_t pc
= vcpu
->arch
.pc
;
309 printk("[%#x] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
313 /* Write Guest TLB Entry @ Index */
314 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
316 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
317 int index
= kvm_read_c0_guest_index(cop0
);
318 enum emulation_result er
= EMULATE_DONE
;
319 struct kvm_mips_tlb
*tlb
= NULL
;
320 uint32_t pc
= vcpu
->arch
.pc
;
322 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
323 printk("%s: illegal index: %d\n", __func__
, index
);
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
327 kvm_read_c0_guest_entrylo0(cop0
),
328 kvm_read_c0_guest_entrylo1(cop0
),
329 kvm_read_c0_guest_pagemask(cop0
));
330 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
333 tlb
= &vcpu
->arch
.guest_tlb
[index
];
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
339 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
340 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
341 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
342 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
347 kvm_read_c0_guest_entrylo0(cop0
), kvm_read_c0_guest_entrylo1(cop0
),
348 kvm_read_c0_guest_pagemask(cop0
));
353 /* Write Guest TLB Entry @ Random Index */
354 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
356 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
357 enum emulation_result er
= EMULATE_DONE
;
358 struct kvm_mips_tlb
*tlb
= NULL
;
359 uint32_t pc
= vcpu
->arch
.pc
;
363 get_random_bytes(&index
, sizeof(index
));
364 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
366 index
= jiffies
% KVM_MIPS_GUEST_TLB_SIZE
;
369 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
370 printk("%s: illegal index: %d\n", __func__
, index
);
374 tlb
= &vcpu
->arch
.guest_tlb
[index
];
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
381 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
382 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
383 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
384 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
389 kvm_read_c0_guest_entrylo0(cop0
),
390 kvm_read_c0_guest_entrylo1(cop0
));
395 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
397 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
398 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
399 enum emulation_result er
= EMULATE_DONE
;
400 uint32_t pc
= vcpu
->arch
.pc
;
403 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
405 kvm_write_c0_guest_index(cop0
, index
);
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
413 enum emulation_result
414 kvm_mips_emulate_CP0(uint32_t inst
, uint32_t *opc
, uint32_t cause
,
415 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
417 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
418 enum emulation_result er
= EMULATE_DONE
;
419 int32_t rt
, rd
, copz
, sel
, co_bit
, op
;
420 uint32_t pc
= vcpu
->arch
.pc
;
421 unsigned long curr_pc
;
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
427 curr_pc
= vcpu
->arch
.pc
;
428 er
= update_pc(vcpu
, cause
);
429 if (er
== EMULATE_FAIL
) {
433 copz
= (inst
>> 21) & 0x1f;
434 rt
= (inst
>> 16) & 0x1f;
435 rd
= (inst
>> 11) & 0x1f;
437 co_bit
= (inst
>> 25) & 1;
439 /* Verify that the register is valid */
440 if (rd
> MIPS_CP0_DESAVE
) {
441 printk("Invalid rd: %d\n", rd
);
450 case tlbr_op
: /* Read indexed TLB entry */
451 er
= kvm_mips_emul_tlbr(vcpu
);
453 case tlbwi_op
: /* Write indexed */
454 er
= kvm_mips_emul_tlbwi(vcpu
);
456 case tlbwr_op
: /* Write random */
457 er
= kvm_mips_emul_tlbwr(vcpu
);
459 case tlbp_op
: /* TLB Probe */
460 er
= kvm_mips_emul_tlbp(vcpu
);
463 printk("!!!COP0_RFE!!!\n");
466 er
= kvm_mips_emul_eret(vcpu
);
470 er
= kvm_mips_emul_wait(vcpu
);
476 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477 cop0
->stat
[rd
][sel
]++;
480 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
481 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482 vcpu
->arch
.gprs
[rt
] = (read_c0_count() >> 2);
483 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
484 vcpu
->arch
.gprs
[rt
] = 0x0;
485 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
490 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
492 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
493 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
498 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499 pc
, rd
, sel
, rt
, vcpu
->arch
.gprs
[rt
]);
504 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
508 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509 cop0
->stat
[rd
][sel
]++;
511 if ((rd
== MIPS_CP0_TLB_INDEX
)
512 && (vcpu
->arch
.gprs
[rt
] >=
513 KVM_MIPS_GUEST_TLB_SIZE
)) {
514 printk("Invalid TLB Index: %ld",
515 vcpu
->arch
.gprs
[rt
]);
519 #define C0_EBASE_CORE_MASK 0xff
520 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
521 /* Preserve CORE number */
522 kvm_change_c0_guest_ebase(cop0
,
523 ~(C0_EBASE_CORE_MASK
),
524 vcpu
->arch
.gprs
[rt
]);
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0
));
527 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
529 vcpu
->arch
.gprs
[rt
] & ASID_MASK
;
530 if ((KSEGX(vcpu
->arch
.gprs
[rt
]) != CKSEG0
)
532 ((kvm_read_c0_guest_entryhi(cop0
) &
533 ASID_MASK
) != nasid
)) {
536 ("MTCz, change ASID from %#lx to %#lx\n",
537 kvm_read_c0_guest_entryhi(cop0
) &
539 vcpu
->arch
.gprs
[rt
] & ASID_MASK
);
541 /* Blow away the shadow host TLBs */
542 kvm_mips_flush_host_tlb(1);
544 kvm_write_c0_guest_entryhi(cop0
,
545 vcpu
->arch
.gprs
[rt
]);
547 /* Are we writing to COUNT */
548 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
549 /* Linux doesn't seem to write into COUNT, we throw an error
550 * if we notice a write to COUNT
552 /*er = EMULATE_FAIL; */
554 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
555 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
556 pc
, kvm_read_c0_guest_compare(cop0
),
557 vcpu
->arch
.gprs
[rt
]);
559 /* If we are writing to COMPARE */
560 /* Clear pending timer interrupt, if any */
561 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
562 kvm_write_c0_guest_compare(cop0
,
563 vcpu
->arch
.gprs
[rt
]);
564 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
565 kvm_write_c0_guest_status(cop0
,
566 vcpu
->arch
.gprs
[rt
]);
567 /* Make sure that CU1 and NMI bits are never set */
568 kvm_clear_c0_guest_status(cop0
,
569 (ST0_CU1
| ST0_NMI
));
571 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
572 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
575 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
576 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
577 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
581 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc
,
582 rd
, sel
, cop0
->reg
[rd
][sel
]);
587 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
588 vcpu
->arch
.pc
, rt
, rd
, sel
);
593 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
594 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
597 vcpu
->arch
.gprs
[rt
] =
598 kvm_read_c0_guest_status(cop0
);
602 kvm_debug("[%#lx] mfmcz_op: EI\n",
604 kvm_set_c0_guest_status(cop0
, ST0_IE
);
606 kvm_debug("[%#lx] mfmcz_op: DI\n",
608 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
616 cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
618 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
619 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
624 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
625 vcpu
->arch
.gprs
[rt
]);
626 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
631 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
632 vcpu
->arch
.pc
, copz
);
640 * Rollback PC only if emulation was unsuccessful
642 if (er
== EMULATE_FAIL
) {
643 vcpu
->arch
.pc
= curr_pc
;
648 * This is for special instructions whose emulation
649 * updates the PC, so do not overwrite the PC under
656 enum emulation_result
657 kvm_mips_emulate_store(uint32_t inst
, uint32_t cause
,
658 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
660 enum emulation_result er
= EMULATE_DO_MMIO
;
661 int32_t op
, base
, rt
, offset
;
663 void *data
= run
->mmio
.data
;
664 unsigned long curr_pc
;
667 * Update PC and hold onto current PC in case there is
668 * an error and we want to rollback the PC
670 curr_pc
= vcpu
->arch
.pc
;
671 er
= update_pc(vcpu
, cause
);
672 if (er
== EMULATE_FAIL
)
675 rt
= (inst
>> 16) & 0x1f;
676 base
= (inst
>> 21) & 0x1f;
677 offset
= inst
& 0xffff;
678 op
= (inst
>> 26) & 0x3f;
683 if (bytes
> sizeof(run
->mmio
.data
)) {
684 kvm_err("%s: bad MMIO length: %d\n", __func__
,
687 run
->mmio
.phys_addr
=
688 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
690 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
694 run
->mmio
.len
= bytes
;
695 run
->mmio
.is_write
= 1;
696 vcpu
->mmio_needed
= 1;
697 vcpu
->mmio_is_write
= 1;
698 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
699 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
700 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
707 if (bytes
> sizeof(run
->mmio
.data
)) {
708 kvm_err("%s: bad MMIO length: %d\n", __func__
,
711 run
->mmio
.phys_addr
=
712 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
714 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
719 run
->mmio
.len
= bytes
;
720 run
->mmio
.is_write
= 1;
721 vcpu
->mmio_needed
= 1;
722 vcpu
->mmio_is_write
= 1;
723 *(uint32_t *) data
= vcpu
->arch
.gprs
[rt
];
725 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
726 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
727 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
732 if (bytes
> sizeof(run
->mmio
.data
)) {
733 kvm_err("%s: bad MMIO length: %d\n", __func__
,
736 run
->mmio
.phys_addr
=
737 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
739 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
744 run
->mmio
.len
= bytes
;
745 run
->mmio
.is_write
= 1;
746 vcpu
->mmio_needed
= 1;
747 vcpu
->mmio_is_write
= 1;
748 *(uint16_t *) data
= vcpu
->arch
.gprs
[rt
];
750 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
751 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
752 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
756 printk("Store not yet supported");
762 * Rollback PC if emulation was unsuccessful
764 if (er
== EMULATE_FAIL
) {
765 vcpu
->arch
.pc
= curr_pc
;
771 enum emulation_result
772 kvm_mips_emulate_load(uint32_t inst
, uint32_t cause
,
773 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
775 enum emulation_result er
= EMULATE_DO_MMIO
;
776 int32_t op
, base
, rt
, offset
;
779 rt
= (inst
>> 16) & 0x1f;
780 base
= (inst
>> 21) & 0x1f;
781 offset
= inst
& 0xffff;
782 op
= (inst
>> 26) & 0x3f;
784 vcpu
->arch
.pending_load_cause
= cause
;
785 vcpu
->arch
.io_gpr
= rt
;
790 if (bytes
> sizeof(run
->mmio
.data
)) {
791 kvm_err("%s: bad MMIO length: %d\n", __func__
,
796 run
->mmio
.phys_addr
=
797 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
799 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
804 run
->mmio
.len
= bytes
;
805 run
->mmio
.is_write
= 0;
806 vcpu
->mmio_needed
= 1;
807 vcpu
->mmio_is_write
= 0;
813 if (bytes
> sizeof(run
->mmio
.data
)) {
814 kvm_err("%s: bad MMIO length: %d\n", __func__
,
819 run
->mmio
.phys_addr
=
820 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
822 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
827 run
->mmio
.len
= bytes
;
828 run
->mmio
.is_write
= 0;
829 vcpu
->mmio_needed
= 1;
830 vcpu
->mmio_is_write
= 0;
833 vcpu
->mmio_needed
= 2;
835 vcpu
->mmio_needed
= 1;
842 if (bytes
> sizeof(run
->mmio
.data
)) {
843 kvm_err("%s: bad MMIO length: %d\n", __func__
,
848 run
->mmio
.phys_addr
=
849 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
851 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
856 run
->mmio
.len
= bytes
;
857 run
->mmio
.is_write
= 0;
858 vcpu
->mmio_is_write
= 0;
861 vcpu
->mmio_needed
= 2;
863 vcpu
->mmio_needed
= 1;
868 printk("Load not yet supported");
876 int kvm_mips_sync_icache(unsigned long va
, struct kvm_vcpu
*vcpu
)
878 unsigned long offset
= (va
& ~PAGE_MASK
);
879 struct kvm
*kvm
= vcpu
->kvm
;
884 gfn
= va
>> PAGE_SHIFT
;
886 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
887 printk("%s: Invalid gfn: %#llx\n", __func__
, gfn
);
888 kvm_mips_dump_host_tlbs();
889 kvm_arch_vcpu_dump_regs(vcpu
);
892 pfn
= kvm
->arch
.guest_pmap
[gfn
];
893 pa
= (pfn
<< PAGE_SHIFT
) | offset
;
895 printk("%s: va: %#lx, unmapped: %#x\n", __func__
, va
, CKSEG0ADDR(pa
));
897 mips32_SyncICache(CKSEG0ADDR(pa
), 32);
901 #define MIPS_CACHE_OP_INDEX_INV 0x0
902 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
903 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
904 #define MIPS_CACHE_OP_IMP 0x3
905 #define MIPS_CACHE_OP_HIT_INV 0x4
906 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
907 #define MIPS_CACHE_OP_HIT_HB 0x6
908 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
910 #define MIPS_CACHE_ICACHE 0x0
911 #define MIPS_CACHE_DCACHE 0x1
912 #define MIPS_CACHE_SEC 0x3
914 enum emulation_result
915 kvm_mips_emulate_cache(uint32_t inst
, uint32_t *opc
, uint32_t cause
,
916 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
918 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
919 extern void (*r4k_blast_dcache
) (void);
920 extern void (*r4k_blast_icache
) (void);
921 enum emulation_result er
= EMULATE_DONE
;
922 int32_t offset
, cache
, op_inst
, op
, base
;
923 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
925 unsigned long curr_pc
;
928 * Update PC and hold onto current PC in case there is
929 * an error and we want to rollback the PC
931 curr_pc
= vcpu
->arch
.pc
;
932 er
= update_pc(vcpu
, cause
);
933 if (er
== EMULATE_FAIL
)
936 base
= (inst
>> 21) & 0x1f;
937 op_inst
= (inst
>> 16) & 0x1f;
938 offset
= inst
& 0xffff;
939 cache
= (inst
>> 16) & 0x3;
940 op
= (inst
>> 18) & 0x7;
942 va
= arch
->gprs
[base
] + offset
;
944 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
945 cache
, op
, base
, arch
->gprs
[base
], offset
);
947 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
948 * the caches entirely by stepping through all the ways/indexes
950 if (op
== MIPS_CACHE_OP_INDEX_INV
) {
952 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
953 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
954 arch
->gprs
[base
], offset
);
956 if (cache
== MIPS_CACHE_DCACHE
)
958 else if (cache
== MIPS_CACHE_ICACHE
)
961 printk("%s: unsupported CACHE INDEX operation\n",
966 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
967 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
973 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
975 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0) {
976 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
);
978 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
979 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
982 /* If an entry already exists then skip */
983 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0) {
987 /* If address not in the guest TLB, then give the guest a fault, the
988 * resulting handler will do the right thing
990 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
991 (kvm_read_c0_guest_entryhi
992 (cop0
) & ASID_MASK
));
995 vcpu
->arch
.host_cp0_entryhi
= (va
& VPN2_MASK
);
996 vcpu
->arch
.host_cp0_badvaddr
= va
;
997 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
1000 goto dont_update_pc
;
1002 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1003 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1004 if (!TLB_IS_VALID(*tlb
, va
)) {
1005 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1008 goto dont_update_pc
;
1010 /* We fault an entry from the guest tlb to the shadow host TLB */
1011 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
,
1018 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1019 cache
, op
, base
, arch
->gprs
[base
], offset
);
1022 goto dont_update_pc
;
1027 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1028 if (cache
== MIPS_CACHE_DCACHE
1029 && (op
== MIPS_CACHE_OP_FILL_WB_INV
1030 || op
== MIPS_CACHE_OP_HIT_INV
)) {
1031 flush_dcache_line(va
);
1033 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1034 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1035 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1037 } else if (op
== MIPS_CACHE_OP_HIT_INV
&& cache
== MIPS_CACHE_ICACHE
) {
1038 flush_dcache_line(va
);
1039 flush_icache_line(va
);
1041 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1042 /* Replace the CACHE instruction, with a SYNCI */
1043 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1047 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1048 cache
, op
, base
, arch
->gprs
[base
], offset
);
1051 goto dont_update_pc
;
1060 vcpu
->arch
.pc
= curr_pc
;
1065 enum emulation_result
1066 kvm_mips_emulate_inst(unsigned long cause
, uint32_t *opc
,
1067 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1069 enum emulation_result er
= EMULATE_DONE
;
1073 * Fetch the instruction.
1075 if (cause
& CAUSEF_BD
) {
1079 inst
= kvm_get_inst(opc
, vcpu
);
1081 switch (((union mips_instruction
)inst
).r_format
.opcode
) {
1083 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1088 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1095 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1099 ++vcpu
->stat
.cache_exits
;
1100 trace_kvm_exit(vcpu
, CACHE_EXITS
);
1101 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1105 printk("Instruction emulation not supported (%p/%#x)\n", opc
,
1107 kvm_arch_vcpu_dump_regs(vcpu
);
1115 enum emulation_result
1116 kvm_mips_emulate_syscall(unsigned long cause
, uint32_t *opc
,
1117 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1119 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1120 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1121 enum emulation_result er
= EMULATE_DONE
;
1123 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1125 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1126 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1128 if (cause
& CAUSEF_BD
)
1129 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1131 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1133 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1135 kvm_change_c0_guest_cause(cop0
, (0xff),
1136 (T_SYSCALL
<< CAUSEB_EXCCODE
));
1138 /* Set PC to the exception entry point */
1139 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1142 printk("Trying to deliver SYSCALL when EXL is already set\n");
1149 enum emulation_result
1150 kvm_mips_emulate_tlbmiss_ld(unsigned long cause
, uint32_t *opc
,
1151 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1153 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1154 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1155 enum emulation_result er
= EMULATE_DONE
;
1156 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1157 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1159 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1161 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1162 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1164 if (cause
& CAUSEF_BD
)
1165 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1167 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1169 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1172 /* set pc to the exception entry point */
1173 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1176 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1179 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1182 kvm_change_c0_guest_cause(cop0
, (0xff),
1183 (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
));
1185 /* setup badvaddr, context and entryhi registers for the guest */
1186 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1187 /* XXXKYMA: is the context register used by linux??? */
1188 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1189 /* Blow away the shadow host TLBs */
1190 kvm_mips_flush_host_tlb(1);
1195 enum emulation_result
1196 kvm_mips_emulate_tlbinv_ld(unsigned long cause
, uint32_t *opc
,
1197 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1199 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1200 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1201 enum emulation_result er
= EMULATE_DONE
;
1202 unsigned long entryhi
=
1203 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1204 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1206 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1208 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1209 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1211 if (cause
& CAUSEF_BD
)
1212 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1214 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1216 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1219 /* set pc to the exception entry point */
1220 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1223 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1225 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1228 kvm_change_c0_guest_cause(cop0
, (0xff),
1229 (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
));
1231 /* setup badvaddr, context and entryhi registers for the guest */
1232 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1233 /* XXXKYMA: is the context register used by linux??? */
1234 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1235 /* Blow away the shadow host TLBs */
1236 kvm_mips_flush_host_tlb(1);
1241 enum emulation_result
1242 kvm_mips_emulate_tlbmiss_st(unsigned long cause
, uint32_t *opc
,
1243 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1245 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1246 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1247 enum emulation_result er
= EMULATE_DONE
;
1248 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1249 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1251 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1253 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1254 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1256 if (cause
& CAUSEF_BD
)
1257 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1259 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1261 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1264 /* Set PC to the exception entry point */
1265 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1267 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1269 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1272 kvm_change_c0_guest_cause(cop0
, (0xff),
1273 (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
));
1275 /* setup badvaddr, context and entryhi registers for the guest */
1276 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1277 /* XXXKYMA: is the context register used by linux??? */
1278 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1279 /* Blow away the shadow host TLBs */
1280 kvm_mips_flush_host_tlb(1);
1285 enum emulation_result
1286 kvm_mips_emulate_tlbinv_st(unsigned long cause
, uint32_t *opc
,
1287 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1289 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1290 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1291 enum emulation_result er
= EMULATE_DONE
;
1292 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1293 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1295 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1297 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1298 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1300 if (cause
& CAUSEF_BD
)
1301 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1303 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1305 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1308 /* Set PC to the exception entry point */
1309 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1311 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1313 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1316 kvm_change_c0_guest_cause(cop0
, (0xff),
1317 (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
));
1319 /* setup badvaddr, context and entryhi registers for the guest */
1320 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1321 /* XXXKYMA: is the context register used by linux??? */
1322 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1323 /* Blow away the shadow host TLBs */
1324 kvm_mips_flush_host_tlb(1);
1329 /* TLBMOD: store into address matching TLB with Dirty bit off */
1330 enum emulation_result
1331 kvm_mips_handle_tlbmod(unsigned long cause
, uint32_t *opc
,
1332 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1334 enum emulation_result er
= EMULATE_DONE
;
1338 * If address not in the guest TLB, then we are in trouble
1340 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
1342 /* XXXKYMA Invalidate and retry */
1343 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
);
1344 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1346 kvm_mips_dump_guest_tlbs(vcpu
);
1347 kvm_mips_dump_host_tlbs();
1348 return EMULATE_FAIL
;
1352 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
1356 enum emulation_result
1357 kvm_mips_emulate_tlbmod(unsigned long cause
, uint32_t *opc
,
1358 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1360 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1361 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1362 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1363 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1364 enum emulation_result er
= EMULATE_DONE
;
1366 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1368 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1369 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1371 if (cause
& CAUSEF_BD
)
1372 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1374 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1376 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1379 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1381 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1383 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1386 kvm_change_c0_guest_cause(cop0
, (0xff), (T_TLB_MOD
<< CAUSEB_EXCCODE
));
1388 /* setup badvaddr, context and entryhi registers for the guest */
1389 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1390 /* XXXKYMA: is the context register used by linux??? */
1391 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1392 /* Blow away the shadow host TLBs */
1393 kvm_mips_flush_host_tlb(1);
1398 enum emulation_result
1399 kvm_mips_emulate_fpu_exc(unsigned long cause
, uint32_t *opc
,
1400 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1402 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1403 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1404 enum emulation_result er
= EMULATE_DONE
;
1406 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1408 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1409 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1411 if (cause
& CAUSEF_BD
)
1412 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1414 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1418 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1420 kvm_change_c0_guest_cause(cop0
, (0xff),
1421 (T_COP_UNUSABLE
<< CAUSEB_EXCCODE
));
1422 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
1427 enum emulation_result
1428 kvm_mips_emulate_ri_exc(unsigned long cause
, uint32_t *opc
,
1429 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1431 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1432 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1433 enum emulation_result er
= EMULATE_DONE
;
1435 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1437 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1438 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1440 if (cause
& CAUSEF_BD
)
1441 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1443 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1445 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
1447 kvm_change_c0_guest_cause(cop0
, (0xff),
1448 (T_RES_INST
<< CAUSEB_EXCCODE
));
1450 /* Set PC to the exception entry point */
1451 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1454 kvm_err("Trying to deliver RI when EXL is already set\n");
1461 enum emulation_result
1462 kvm_mips_emulate_bp_exc(unsigned long cause
, uint32_t *opc
,
1463 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1465 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1466 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1467 enum emulation_result er
= EMULATE_DONE
;
1469 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1471 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1472 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1474 if (cause
& CAUSEF_BD
)
1475 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1477 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1479 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
1481 kvm_change_c0_guest_cause(cop0
, (0xff),
1482 (T_BREAK
<< CAUSEB_EXCCODE
));
1484 /* Set PC to the exception entry point */
1485 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1488 printk("Trying to deliver BP when EXL is already set\n");
1496 * ll/sc, rdhwr, sync emulation
1499 #define OPCODE 0xfc000000
1500 #define BASE 0x03e00000
1501 #define RT 0x001f0000
1502 #define OFFSET 0x0000ffff
1503 #define LL 0xc0000000
1504 #define SC 0xe0000000
1505 #define SPEC0 0x00000000
1506 #define SPEC3 0x7c000000
1507 #define RD 0x0000f800
1508 #define FUNC 0x0000003f
1509 #define SYNC 0x0000000f
1510 #define RDHWR 0x0000003b
1512 enum emulation_result
1513 kvm_mips_handle_ri(unsigned long cause
, uint32_t *opc
,
1514 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1516 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1517 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1518 enum emulation_result er
= EMULATE_DONE
;
1519 unsigned long curr_pc
;
1523 * Update PC and hold onto current PC in case there is
1524 * an error and we want to rollback the PC
1526 curr_pc
= vcpu
->arch
.pc
;
1527 er
= update_pc(vcpu
, cause
);
1528 if (er
== EMULATE_FAIL
)
1532 * Fetch the instruction.
1534 if (cause
& CAUSEF_BD
)
1537 inst
= kvm_get_inst(opc
, vcpu
);
1539 if (inst
== KVM_INVALID_INST
) {
1540 printk("%s: Cannot get inst @ %p\n", __func__
, opc
);
1541 return EMULATE_FAIL
;
1544 if ((inst
& OPCODE
) == SPEC3
&& (inst
& FUNC
) == RDHWR
) {
1545 int rd
= (inst
& RD
) >> 11;
1546 int rt
= (inst
& RT
) >> 16;
1548 case 0: /* CPU number */
1551 case 1: /* SYNCI length */
1552 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
1553 current_cpu_data
.icache
.linesz
);
1555 case 2: /* Read count register */
1556 printk("RDHWR: Cont register\n");
1557 arch
->gprs
[rt
] = kvm_read_c0_guest_count(cop0
);
1559 case 3: /* Count register resolution */
1560 switch (current_cpu_data
.cputype
) {
1571 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
1573 /* UserLocal not implemented */
1574 er
= kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
1579 printk("RDHWR not supported\n");
1584 printk("Emulate RI not supported @ %p: %#x\n", opc
, inst
);
1589 * Rollback PC only if emulation was unsuccessful
1591 if (er
== EMULATE_FAIL
) {
1592 vcpu
->arch
.pc
= curr_pc
;
1597 enum emulation_result
1598 kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1600 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
1601 enum emulation_result er
= EMULATE_DONE
;
1602 unsigned long curr_pc
;
1604 if (run
->mmio
.len
> sizeof(*gpr
)) {
1605 printk("Bad MMIO length: %d", run
->mmio
.len
);
1611 * Update PC and hold onto current PC in case there is
1612 * an error and we want to rollback the PC
1614 curr_pc
= vcpu
->arch
.pc
;
1615 er
= update_pc(vcpu
, vcpu
->arch
.pending_load_cause
);
1616 if (er
== EMULATE_FAIL
)
1619 switch (run
->mmio
.len
) {
1621 *gpr
= *(int32_t *) run
->mmio
.data
;
1625 if (vcpu
->mmio_needed
== 2)
1626 *gpr
= *(int16_t *) run
->mmio
.data
;
1628 *gpr
= *(int16_t *) run
->mmio
.data
;
1632 if (vcpu
->mmio_needed
== 2)
1633 *gpr
= *(int8_t *) run
->mmio
.data
;
1635 *gpr
= *(u8
*) run
->mmio
.data
;
1639 if (vcpu
->arch
.pending_load_cause
& CAUSEF_BD
)
1641 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1642 vcpu
->arch
.pc
, run
->mmio
.len
, vcpu
->arch
.io_gpr
, *gpr
,
1649 static enum emulation_result
1650 kvm_mips_emulate_exc(unsigned long cause
, uint32_t *opc
,
1651 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1653 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1654 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1655 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1656 enum emulation_result er
= EMULATE_DONE
;
1658 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1660 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1661 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1663 if (cause
& CAUSEF_BD
)
1664 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1666 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1668 kvm_change_c0_guest_cause(cop0
, (0xff),
1669 (exccode
<< CAUSEB_EXCCODE
));
1671 /* Set PC to the exception entry point */
1672 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1673 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1675 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1676 exccode
, kvm_read_c0_guest_epc(cop0
),
1677 kvm_read_c0_guest_badvaddr(cop0
));
1679 printk("Trying to deliver EXC when EXL is already set\n");
1686 enum emulation_result
1687 kvm_mips_check_privilege(unsigned long cause
, uint32_t *opc
,
1688 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1690 enum emulation_result er
= EMULATE_DONE
;
1691 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1692 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1694 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
1704 case T_COP_UNUSABLE
:
1705 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
1706 er
= EMULATE_PRIV_FAIL
;
1713 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1714 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
1715 printk("%s: LD MISS @ %#lx\n", __func__
,
1718 cause
|= (T_ADDR_ERR_LD
<< CAUSEB_EXCCODE
);
1719 er
= EMULATE_PRIV_FAIL
;
1724 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1725 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
1726 printk("%s: ST MISS @ %#lx\n", __func__
,
1729 cause
|= (T_ADDR_ERR_ST
<< CAUSEB_EXCCODE
);
1730 er
= EMULATE_PRIV_FAIL
;
1735 printk("%s: address error ST @ %#lx\n", __func__
,
1737 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
1739 cause
|= (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
);
1741 er
= EMULATE_PRIV_FAIL
;
1744 printk("%s: address error LD @ %#lx\n", __func__
,
1746 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
1748 cause
|= (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
);
1750 er
= EMULATE_PRIV_FAIL
;
1753 er
= EMULATE_PRIV_FAIL
;
1758 if (er
== EMULATE_PRIV_FAIL
) {
1759 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
1764 /* User Address (UA) fault, this could happen if
1765 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1766 * case we pass on the fault to the guest kernel and let it handle it.
1767 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1768 * case we inject the TLB from the Guest TLB into the shadow host TLB
1770 enum emulation_result
1771 kvm_mips_handle_tlbmiss(unsigned long cause
, uint32_t *opc
,
1772 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1774 enum emulation_result er
= EMULATE_DONE
;
1775 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1776 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
1779 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1780 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.host_cp0_entryhi
);
1782 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1783 * Check the Guest TLB, if the entry is not there then send the guest an
1784 * exception. The guest exc handler should then inject an entry into the
1787 index
= kvm_mips_guest_tlb_lookup(vcpu
,
1789 (kvm_read_c0_guest_entryhi
1790 (vcpu
->arch
.cop0
) & ASID_MASK
));
1792 if (exccode
== T_TLB_LD_MISS
) {
1793 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
1794 } else if (exccode
== T_TLB_ST_MISS
) {
1795 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
1797 printk("%s: invalid exc code: %d\n", __func__
, exccode
);
1801 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1803 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1804 if (!TLB_IS_VALID(*tlb
, va
)) {
1805 if (exccode
== T_TLB_LD_MISS
) {
1806 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
1808 } else if (exccode
== T_TLB_ST_MISS
) {
1809 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
1812 printk("%s: invalid exc code: %d\n", __func__
,
1819 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1820 tlb
->tlb_hi
, tlb
->tlb_lo0
, tlb
->tlb_lo1
);
1822 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1823 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, NULL
,