2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
44 unsigned int dspcontrol
;
45 union mips_instruction insn
;
46 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
48 long nextpc
= KVM_INVALID_INST
;
54 * Read the instruction
56 insn
.word
= kvm_get_inst((uint32_t *) epc
, vcpu
);
58 if (insn
.word
== KVM_INVALID_INST
)
59 return KVM_INVALID_INST
;
61 switch (insn
.i_format
.opcode
) {
63 * jr and jalr are in r_format format.
66 switch (insn
.r_format
.func
) {
68 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
71 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
82 switch (insn
.i_format
.rt
) {
85 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
86 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
94 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
95 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
103 arch
->gprs
[31] = epc
+ 8;
104 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
105 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
113 arch
->gprs
[31] = epc
+ 8;
114 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
115 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
124 dspcontrol
= rddsp(0x01);
126 if (dspcontrol
>= 32) {
127 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
136 * These are unconditional and in j_format.
139 arch
->gprs
[31] = instpc
+ 8;
144 epc
|= (insn
.j_format
.target
<< 2);
149 * These are conditional and in i_format.
153 if (arch
->gprs
[insn
.i_format
.rs
] ==
154 arch
->gprs
[insn
.i_format
.rt
])
155 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
163 if (arch
->gprs
[insn
.i_format
.rs
] !=
164 arch
->gprs
[insn
.i_format
.rt
])
165 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
171 case blez_op
: /* not really i_format */
173 /* rt field assumed to be zero */
174 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
175 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
183 /* rt field assumed to be zero */
184 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
185 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
192 * And now the FPA/cp1 branch instructions.
195 printk("%s: unsupported cop1_op\n", __func__
);
202 printk("%s: unaligned epc\n", __func__
);
206 printk("%s: DSP branch but not DSP ASE\n", __func__
);
210 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, uint32_t cause
)
212 unsigned long branch_pc
;
213 enum emulation_result er
= EMULATE_DONE
;
215 if (cause
& CAUSEF_BD
) {
216 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
217 if (branch_pc
== KVM_INVALID_INST
) {
220 vcpu
->arch
.pc
= branch_pc
;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
231 /* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
235 enum emulation_result
kvm_mips_emulate_count(struct kvm_vcpu
*vcpu
)
237 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
238 enum emulation_result er
= EMULATE_DONE
;
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
)) {
242 hrtimer_try_to_cancel(&vcpu
->arch
.comparecount_timer
);
243 hrtimer_start(&vcpu
->arch
.comparecount_timer
,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL
);
246 hrtimer_try_to_cancel(&vcpu
->arch
.comparecount_timer
);
252 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
254 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
255 enum emulation_result er
= EMULATE_DONE
;
257 if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
259 kvm_read_c0_guest_epc(cop0
));
260 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
261 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
263 } else if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
264 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
265 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
275 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
277 enum emulation_result er
= EMULATE_DONE
;
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
280 vcpu
->arch
.pending_exceptions
);
282 ++vcpu
->stat
.wait_exits
;
283 trace_kvm_exit(vcpu
, WAIT_EXITS
);
284 if (!vcpu
->arch
.pending_exceptions
) {
286 kvm_vcpu_block(vcpu
);
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
291 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
292 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
293 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
303 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
305 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
306 enum emulation_result er
= EMULATE_FAIL
;
307 uint32_t pc
= vcpu
->arch
.pc
;
309 printk("[%#x] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
313 /* Write Guest TLB Entry @ Index */
314 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
316 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
317 int index
= kvm_read_c0_guest_index(cop0
);
318 enum emulation_result er
= EMULATE_DONE
;
319 struct kvm_mips_tlb
*tlb
= NULL
;
320 uint32_t pc
= vcpu
->arch
.pc
;
322 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
323 printk("%s: illegal index: %d\n", __func__
, index
);
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
327 kvm_read_c0_guest_entrylo0(cop0
),
328 kvm_read_c0_guest_entrylo1(cop0
),
329 kvm_read_c0_guest_pagemask(cop0
));
330 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
333 tlb
= &vcpu
->arch
.guest_tlb
[index
];
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
339 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
340 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
341 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
342 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
347 kvm_read_c0_guest_entrylo0(cop0
), kvm_read_c0_guest_entrylo1(cop0
),
348 kvm_read_c0_guest_pagemask(cop0
));
353 /* Write Guest TLB Entry @ Random Index */
354 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
356 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
357 enum emulation_result er
= EMULATE_DONE
;
358 struct kvm_mips_tlb
*tlb
= NULL
;
359 uint32_t pc
= vcpu
->arch
.pc
;
363 get_random_bytes(&index
, sizeof(index
));
364 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
366 index
= jiffies
% KVM_MIPS_GUEST_TLB_SIZE
;
369 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
370 printk("%s: illegal index: %d\n", __func__
, index
);
374 tlb
= &vcpu
->arch
.guest_tlb
[index
];
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
381 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
382 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
383 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
384 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
389 kvm_read_c0_guest_entrylo0(cop0
),
390 kvm_read_c0_guest_entrylo1(cop0
));
395 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
397 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
398 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
399 enum emulation_result er
= EMULATE_DONE
;
400 uint32_t pc
= vcpu
->arch
.pc
;
403 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
405 kvm_write_c0_guest_index(cop0
, index
);
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
413 enum emulation_result
414 kvm_mips_emulate_CP0(uint32_t inst
, uint32_t *opc
, uint32_t cause
,
415 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
417 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
418 enum emulation_result er
= EMULATE_DONE
;
419 int32_t rt
, rd
, copz
, sel
, co_bit
, op
;
420 uint32_t pc
= vcpu
->arch
.pc
;
421 unsigned long curr_pc
;
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
427 curr_pc
= vcpu
->arch
.pc
;
428 er
= update_pc(vcpu
, cause
);
429 if (er
== EMULATE_FAIL
) {
433 copz
= (inst
>> 21) & 0x1f;
434 rt
= (inst
>> 16) & 0x1f;
435 rd
= (inst
>> 11) & 0x1f;
437 co_bit
= (inst
>> 25) & 1;
439 /* Verify that the register is valid */
440 if (rd
> MIPS_CP0_DESAVE
) {
441 printk("Invalid rd: %d\n", rd
);
450 case tlbr_op
: /* Read indexed TLB entry */
451 er
= kvm_mips_emul_tlbr(vcpu
);
453 case tlbwi_op
: /* Write indexed */
454 er
= kvm_mips_emul_tlbwi(vcpu
);
456 case tlbwr_op
: /* Write random */
457 er
= kvm_mips_emul_tlbwr(vcpu
);
459 case tlbp_op
: /* TLB Probe */
460 er
= kvm_mips_emul_tlbp(vcpu
);
463 printk("!!!COP0_RFE!!!\n");
466 er
= kvm_mips_emul_eret(vcpu
);
470 er
= kvm_mips_emul_wait(vcpu
);
476 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477 cop0
->stat
[rd
][sel
]++;
480 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
481 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482 vcpu
->arch
.gprs
[rt
] = (read_c0_count() >> 2);
483 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
484 vcpu
->arch
.gprs
[rt
] = 0x0;
485 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
490 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
492 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
493 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
498 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499 pc
, rd
, sel
, rt
, vcpu
->arch
.gprs
[rt
]);
504 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
508 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509 cop0
->stat
[rd
][sel
]++;
511 if ((rd
== MIPS_CP0_TLB_INDEX
)
512 && (vcpu
->arch
.gprs
[rt
] >=
513 KVM_MIPS_GUEST_TLB_SIZE
)) {
514 printk("Invalid TLB Index: %ld",
515 vcpu
->arch
.gprs
[rt
]);
519 #define C0_EBASE_CORE_MASK 0xff
520 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
521 /* Preserve CORE number */
522 kvm_change_c0_guest_ebase(cop0
,
523 ~(C0_EBASE_CORE_MASK
),
524 vcpu
->arch
.gprs
[rt
]);
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0
));
527 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
529 vcpu
->arch
.gprs
[rt
] & ASID_MASK
;
530 if ((KSEGX(vcpu
->arch
.gprs
[rt
]) != CKSEG0
)
532 ((kvm_read_c0_guest_entryhi(cop0
) &
533 ASID_MASK
) != nasid
)) {
536 ("MTCz, change ASID from %#lx to %#lx\n",
537 kvm_read_c0_guest_entryhi(cop0
) &
539 vcpu
->arch
.gprs
[rt
] & ASID_MASK
);
541 /* Blow away the shadow host TLBs */
542 kvm_mips_flush_host_tlb(1);
544 kvm_write_c0_guest_entryhi(cop0
,
545 vcpu
->arch
.gprs
[rt
]);
547 /* Are we writing to COUNT */
548 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
549 /* Linux doesn't seem to write into COUNT, we throw an error
550 * if we notice a write to COUNT
552 /*er = EMULATE_FAIL; */
554 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
555 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
556 pc
, kvm_read_c0_guest_compare(cop0
),
557 vcpu
->arch
.gprs
[rt
]);
559 /* If we are writing to COMPARE */
560 /* Clear pending timer interrupt, if any */
561 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
562 kvm_write_c0_guest_compare(cop0
,
563 vcpu
->arch
.gprs
[rt
]);
564 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
565 kvm_write_c0_guest_status(cop0
,
566 vcpu
->arch
.gprs
[rt
]);
567 /* Make sure that CU1 and NMI bits are never set */
568 kvm_clear_c0_guest_status(cop0
,
569 (ST0_CU1
| ST0_NMI
));
571 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
572 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
575 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
576 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
577 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
581 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc
,
582 rd
, sel
, cop0
->reg
[rd
][sel
]);
587 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
588 vcpu
->arch
.pc
, rt
, rd
, sel
);
593 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
594 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
597 vcpu
->arch
.gprs
[rt
] =
598 kvm_read_c0_guest_status(cop0
);
602 kvm_debug("[%#lx] mfmcz_op: EI\n",
604 kvm_set_c0_guest_status(cop0
, ST0_IE
);
606 kvm_debug("[%#lx] mfmcz_op: DI\n",
608 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
616 cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
618 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
619 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
624 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
625 vcpu
->arch
.gprs
[rt
]);
626 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
631 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
632 vcpu
->arch
.pc
, copz
);
640 * Rollback PC only if emulation was unsuccessful
642 if (er
== EMULATE_FAIL
) {
643 vcpu
->arch
.pc
= curr_pc
;
648 * This is for special instructions whose emulation
649 * updates the PC, so do not overwrite the PC under
656 enum emulation_result
657 kvm_mips_emulate_store(uint32_t inst
, uint32_t cause
,
658 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
660 enum emulation_result er
= EMULATE_DO_MMIO
;
661 int32_t op
, base
, rt
, offset
;
663 void *data
= run
->mmio
.data
;
664 unsigned long curr_pc
;
667 * Update PC and hold onto current PC in case there is
668 * an error and we want to rollback the PC
670 curr_pc
= vcpu
->arch
.pc
;
671 er
= update_pc(vcpu
, cause
);
672 if (er
== EMULATE_FAIL
)
675 rt
= (inst
>> 16) & 0x1f;
676 base
= (inst
>> 21) & 0x1f;
677 offset
= inst
& 0xffff;
678 op
= (inst
>> 26) & 0x3f;
683 if (bytes
> sizeof(run
->mmio
.data
)) {
684 kvm_err("%s: bad MMIO length: %d\n", __func__
,
687 run
->mmio
.phys_addr
=
688 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
690 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
694 run
->mmio
.len
= bytes
;
695 run
->mmio
.is_write
= 1;
696 vcpu
->mmio_needed
= 1;
697 vcpu
->mmio_is_write
= 1;
698 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
699 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
700 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
707 if (bytes
> sizeof(run
->mmio
.data
)) {
708 kvm_err("%s: bad MMIO length: %d\n", __func__
,
711 run
->mmio
.phys_addr
=
712 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
714 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
719 run
->mmio
.len
= bytes
;
720 run
->mmio
.is_write
= 1;
721 vcpu
->mmio_needed
= 1;
722 vcpu
->mmio_is_write
= 1;
723 *(uint32_t *) data
= vcpu
->arch
.gprs
[rt
];
725 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
726 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
727 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
732 if (bytes
> sizeof(run
->mmio
.data
)) {
733 kvm_err("%s: bad MMIO length: %d\n", __func__
,
736 run
->mmio
.phys_addr
=
737 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
739 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
744 run
->mmio
.len
= bytes
;
745 run
->mmio
.is_write
= 1;
746 vcpu
->mmio_needed
= 1;
747 vcpu
->mmio_is_write
= 1;
748 *(uint16_t *) data
= vcpu
->arch
.gprs
[rt
];
750 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
751 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
752 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
756 printk("Store not yet supported");
762 * Rollback PC if emulation was unsuccessful
764 if (er
== EMULATE_FAIL
) {
765 vcpu
->arch
.pc
= curr_pc
;
771 enum emulation_result
772 kvm_mips_emulate_load(uint32_t inst
, uint32_t cause
,
773 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
775 enum emulation_result er
= EMULATE_DO_MMIO
;
776 int32_t op
, base
, rt
, offset
;
779 rt
= (inst
>> 16) & 0x1f;
780 base
= (inst
>> 21) & 0x1f;
781 offset
= inst
& 0xffff;
782 op
= (inst
>> 26) & 0x3f;
784 vcpu
->arch
.pending_load_cause
= cause
;
785 vcpu
->arch
.io_gpr
= rt
;
790 if (bytes
> sizeof(run
->mmio
.data
)) {
791 kvm_err("%s: bad MMIO length: %d\n", __func__
,
796 run
->mmio
.phys_addr
=
797 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
799 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
804 run
->mmio
.len
= bytes
;
805 run
->mmio
.is_write
= 0;
806 vcpu
->mmio_needed
= 1;
807 vcpu
->mmio_is_write
= 0;
813 if (bytes
> sizeof(run
->mmio
.data
)) {
814 kvm_err("%s: bad MMIO length: %d\n", __func__
,
819 run
->mmio
.phys_addr
=
820 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
822 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
827 run
->mmio
.len
= bytes
;
828 run
->mmio
.is_write
= 0;
829 vcpu
->mmio_needed
= 1;
830 vcpu
->mmio_is_write
= 0;
833 vcpu
->mmio_needed
= 2;
835 vcpu
->mmio_needed
= 1;
842 if (bytes
> sizeof(run
->mmio
.data
)) {
843 kvm_err("%s: bad MMIO length: %d\n", __func__
,
848 run
->mmio
.phys_addr
=
849 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
851 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
856 run
->mmio
.len
= bytes
;
857 run
->mmio
.is_write
= 0;
858 vcpu
->mmio_is_write
= 0;
861 vcpu
->mmio_needed
= 2;
863 vcpu
->mmio_needed
= 1;
868 printk("Load not yet supported");
876 int kvm_mips_sync_icache(unsigned long va
, struct kvm_vcpu
*vcpu
)
878 unsigned long offset
= (va
& ~PAGE_MASK
);
879 struct kvm
*kvm
= vcpu
->kvm
;
884 gfn
= va
>> PAGE_SHIFT
;
886 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
887 printk("%s: Invalid gfn: %#llx\n", __func__
, gfn
);
888 kvm_mips_dump_host_tlbs();
889 kvm_arch_vcpu_dump_regs(vcpu
);
892 pfn
= kvm
->arch
.guest_pmap
[gfn
];
893 pa
= (pfn
<< PAGE_SHIFT
) | offset
;
895 printk("%s: va: %#lx, unmapped: %#x\n", __func__
, va
, CKSEG0ADDR(pa
));
897 mips32_SyncICache(CKSEG0ADDR(pa
), 32);
901 #define MIPS_CACHE_OP_INDEX_INV 0x0
902 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
903 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
904 #define MIPS_CACHE_OP_IMP 0x3
905 #define MIPS_CACHE_OP_HIT_INV 0x4
906 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
907 #define MIPS_CACHE_OP_HIT_HB 0x6
908 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
910 #define MIPS_CACHE_ICACHE 0x0
911 #define MIPS_CACHE_DCACHE 0x1
912 #define MIPS_CACHE_SEC 0x3
914 enum emulation_result
915 kvm_mips_emulate_cache(uint32_t inst
, uint32_t *opc
, uint32_t cause
,
916 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
918 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
919 extern void (*r4k_blast_dcache
) (void);
920 extern void (*r4k_blast_icache
) (void);
921 enum emulation_result er
= EMULATE_DONE
;
922 int32_t offset
, cache
, op_inst
, op
, base
;
923 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
925 unsigned long curr_pc
;
928 * Update PC and hold onto current PC in case there is
929 * an error and we want to rollback the PC
931 curr_pc
= vcpu
->arch
.pc
;
932 er
= update_pc(vcpu
, cause
);
933 if (er
== EMULATE_FAIL
)
936 base
= (inst
>> 21) & 0x1f;
937 op_inst
= (inst
>> 16) & 0x1f;
938 offset
= (int16_t)inst
;
939 cache
= (inst
>> 16) & 0x3;
940 op
= (inst
>> 18) & 0x7;
942 va
= arch
->gprs
[base
] + offset
;
944 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
945 cache
, op
, base
, arch
->gprs
[base
], offset
);
947 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
948 * the caches entirely by stepping through all the ways/indexes
950 if (op
== MIPS_CACHE_OP_INDEX_INV
) {
952 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
953 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
954 arch
->gprs
[base
], offset
);
956 if (cache
== MIPS_CACHE_DCACHE
)
958 else if (cache
== MIPS_CACHE_ICACHE
)
961 printk("%s: unsupported CACHE INDEX operation\n",
966 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
967 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
973 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
975 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0 &&
976 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
)) {
977 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
978 __func__
, va
, vcpu
, read_c0_entryhi());
983 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
984 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
987 /* If an entry already exists then skip */
988 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0) {
992 /* If address not in the guest TLB, then give the guest a fault, the
993 * resulting handler will do the right thing
995 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
996 (kvm_read_c0_guest_entryhi
997 (cop0
) & ASID_MASK
));
1000 vcpu
->arch
.host_cp0_entryhi
= (va
& VPN2_MASK
);
1001 vcpu
->arch
.host_cp0_badvaddr
= va
;
1002 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
1005 goto dont_update_pc
;
1007 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1008 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1009 if (!TLB_IS_VALID(*tlb
, va
)) {
1010 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1013 goto dont_update_pc
;
1015 /* We fault an entry from the guest tlb to the shadow host TLB */
1016 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
,
1018 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1019 __func__
, va
, index
, vcpu
,
1028 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1029 cache
, op
, base
, arch
->gprs
[base
], offset
);
1032 goto dont_update_pc
;
1037 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1038 if (cache
== MIPS_CACHE_DCACHE
1039 && (op
== MIPS_CACHE_OP_FILL_WB_INV
1040 || op
== MIPS_CACHE_OP_HIT_INV
)) {
1041 flush_dcache_line(va
);
1043 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1044 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1045 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1047 } else if (op
== MIPS_CACHE_OP_HIT_INV
&& cache
== MIPS_CACHE_ICACHE
) {
1048 flush_dcache_line(va
);
1049 flush_icache_line(va
);
1051 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1052 /* Replace the CACHE instruction, with a SYNCI */
1053 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1057 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1058 cache
, op
, base
, arch
->gprs
[base
], offset
);
1061 goto dont_update_pc
;
1070 vcpu
->arch
.pc
= curr_pc
;
1075 enum emulation_result
1076 kvm_mips_emulate_inst(unsigned long cause
, uint32_t *opc
,
1077 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1079 enum emulation_result er
= EMULATE_DONE
;
1083 * Fetch the instruction.
1085 if (cause
& CAUSEF_BD
) {
1089 inst
= kvm_get_inst(opc
, vcpu
);
1091 switch (((union mips_instruction
)inst
).r_format
.opcode
) {
1093 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1098 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1105 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1109 ++vcpu
->stat
.cache_exits
;
1110 trace_kvm_exit(vcpu
, CACHE_EXITS
);
1111 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1115 printk("Instruction emulation not supported (%p/%#x)\n", opc
,
1117 kvm_arch_vcpu_dump_regs(vcpu
);
1125 enum emulation_result
1126 kvm_mips_emulate_syscall(unsigned long cause
, uint32_t *opc
,
1127 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1129 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1130 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1131 enum emulation_result er
= EMULATE_DONE
;
1133 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1135 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1136 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1138 if (cause
& CAUSEF_BD
)
1139 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1141 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1143 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1145 kvm_change_c0_guest_cause(cop0
, (0xff),
1146 (T_SYSCALL
<< CAUSEB_EXCCODE
));
1148 /* Set PC to the exception entry point */
1149 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1152 printk("Trying to deliver SYSCALL when EXL is already set\n");
1159 enum emulation_result
1160 kvm_mips_emulate_tlbmiss_ld(unsigned long cause
, uint32_t *opc
,
1161 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1163 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1164 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1165 enum emulation_result er
= EMULATE_DONE
;
1166 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1167 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1169 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1171 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1172 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1174 if (cause
& CAUSEF_BD
)
1175 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1177 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1179 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1182 /* set pc to the exception entry point */
1183 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1186 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1189 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1192 kvm_change_c0_guest_cause(cop0
, (0xff),
1193 (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
));
1195 /* setup badvaddr, context and entryhi registers for the guest */
1196 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1197 /* XXXKYMA: is the context register used by linux??? */
1198 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1199 /* Blow away the shadow host TLBs */
1200 kvm_mips_flush_host_tlb(1);
1205 enum emulation_result
1206 kvm_mips_emulate_tlbinv_ld(unsigned long cause
, uint32_t *opc
,
1207 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1209 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1210 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1211 enum emulation_result er
= EMULATE_DONE
;
1212 unsigned long entryhi
=
1213 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1214 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1216 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1218 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1219 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1221 if (cause
& CAUSEF_BD
)
1222 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1224 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1226 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1229 /* set pc to the exception entry point */
1230 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1233 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1235 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1238 kvm_change_c0_guest_cause(cop0
, (0xff),
1239 (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
));
1241 /* setup badvaddr, context and entryhi registers for the guest */
1242 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1243 /* XXXKYMA: is the context register used by linux??? */
1244 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1245 /* Blow away the shadow host TLBs */
1246 kvm_mips_flush_host_tlb(1);
1251 enum emulation_result
1252 kvm_mips_emulate_tlbmiss_st(unsigned long cause
, uint32_t *opc
,
1253 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1255 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1256 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1257 enum emulation_result er
= EMULATE_DONE
;
1258 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1259 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1261 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1263 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1264 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1266 if (cause
& CAUSEF_BD
)
1267 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1269 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1271 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1274 /* Set PC to the exception entry point */
1275 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1277 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1279 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1282 kvm_change_c0_guest_cause(cop0
, (0xff),
1283 (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
));
1285 /* setup badvaddr, context and entryhi registers for the guest */
1286 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1287 /* XXXKYMA: is the context register used by linux??? */
1288 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1289 /* Blow away the shadow host TLBs */
1290 kvm_mips_flush_host_tlb(1);
1295 enum emulation_result
1296 kvm_mips_emulate_tlbinv_st(unsigned long cause
, uint32_t *opc
,
1297 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1299 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1300 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1301 enum emulation_result er
= EMULATE_DONE
;
1302 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1303 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1305 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1307 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1308 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1310 if (cause
& CAUSEF_BD
)
1311 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1313 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1315 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1318 /* Set PC to the exception entry point */
1319 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1321 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1323 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1326 kvm_change_c0_guest_cause(cop0
, (0xff),
1327 (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
));
1329 /* setup badvaddr, context and entryhi registers for the guest */
1330 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1331 /* XXXKYMA: is the context register used by linux??? */
1332 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1333 /* Blow away the shadow host TLBs */
1334 kvm_mips_flush_host_tlb(1);
1339 /* TLBMOD: store into address matching TLB with Dirty bit off */
1340 enum emulation_result
1341 kvm_mips_handle_tlbmod(unsigned long cause
, uint32_t *opc
,
1342 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1344 enum emulation_result er
= EMULATE_DONE
;
1348 * If address not in the guest TLB, then we are in trouble
1350 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
1352 /* XXXKYMA Invalidate and retry */
1353 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
);
1354 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1356 kvm_mips_dump_guest_tlbs(vcpu
);
1357 kvm_mips_dump_host_tlbs();
1358 return EMULATE_FAIL
;
1362 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
1366 enum emulation_result
1367 kvm_mips_emulate_tlbmod(unsigned long cause
, uint32_t *opc
,
1368 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1370 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1371 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1372 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1373 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1374 enum emulation_result er
= EMULATE_DONE
;
1376 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1378 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1379 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1381 if (cause
& CAUSEF_BD
)
1382 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1384 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1386 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1389 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1391 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1393 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1396 kvm_change_c0_guest_cause(cop0
, (0xff), (T_TLB_MOD
<< CAUSEB_EXCCODE
));
1398 /* setup badvaddr, context and entryhi registers for the guest */
1399 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1400 /* XXXKYMA: is the context register used by linux??? */
1401 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1402 /* Blow away the shadow host TLBs */
1403 kvm_mips_flush_host_tlb(1);
1408 enum emulation_result
1409 kvm_mips_emulate_fpu_exc(unsigned long cause
, uint32_t *opc
,
1410 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1412 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1413 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1414 enum emulation_result er
= EMULATE_DONE
;
1416 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1418 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1419 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1421 if (cause
& CAUSEF_BD
)
1422 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1424 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1428 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1430 kvm_change_c0_guest_cause(cop0
, (0xff),
1431 (T_COP_UNUSABLE
<< CAUSEB_EXCCODE
));
1432 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
1437 enum emulation_result
1438 kvm_mips_emulate_ri_exc(unsigned long cause
, uint32_t *opc
,
1439 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1441 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1442 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1443 enum emulation_result er
= EMULATE_DONE
;
1445 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1447 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1448 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1450 if (cause
& CAUSEF_BD
)
1451 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1453 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1455 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
1457 kvm_change_c0_guest_cause(cop0
, (0xff),
1458 (T_RES_INST
<< CAUSEB_EXCCODE
));
1460 /* Set PC to the exception entry point */
1461 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1464 kvm_err("Trying to deliver RI when EXL is already set\n");
1471 enum emulation_result
1472 kvm_mips_emulate_bp_exc(unsigned long cause
, uint32_t *opc
,
1473 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1475 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1476 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1477 enum emulation_result er
= EMULATE_DONE
;
1479 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1481 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1482 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1484 if (cause
& CAUSEF_BD
)
1485 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1487 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1489 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
1491 kvm_change_c0_guest_cause(cop0
, (0xff),
1492 (T_BREAK
<< CAUSEB_EXCCODE
));
1494 /* Set PC to the exception entry point */
1495 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1498 printk("Trying to deliver BP when EXL is already set\n");
1506 * ll/sc, rdhwr, sync emulation
1509 #define OPCODE 0xfc000000
1510 #define BASE 0x03e00000
1511 #define RT 0x001f0000
1512 #define OFFSET 0x0000ffff
1513 #define LL 0xc0000000
1514 #define SC 0xe0000000
1515 #define SPEC0 0x00000000
1516 #define SPEC3 0x7c000000
1517 #define RD 0x0000f800
1518 #define FUNC 0x0000003f
1519 #define SYNC 0x0000000f
1520 #define RDHWR 0x0000003b
1522 enum emulation_result
1523 kvm_mips_handle_ri(unsigned long cause
, uint32_t *opc
,
1524 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1526 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1527 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1528 enum emulation_result er
= EMULATE_DONE
;
1529 unsigned long curr_pc
;
1533 * Update PC and hold onto current PC in case there is
1534 * an error and we want to rollback the PC
1536 curr_pc
= vcpu
->arch
.pc
;
1537 er
= update_pc(vcpu
, cause
);
1538 if (er
== EMULATE_FAIL
)
1542 * Fetch the instruction.
1544 if (cause
& CAUSEF_BD
)
1547 inst
= kvm_get_inst(opc
, vcpu
);
1549 if (inst
== KVM_INVALID_INST
) {
1550 printk("%s: Cannot get inst @ %p\n", __func__
, opc
);
1551 return EMULATE_FAIL
;
1554 if ((inst
& OPCODE
) == SPEC3
&& (inst
& FUNC
) == RDHWR
) {
1555 int rd
= (inst
& RD
) >> 11;
1556 int rt
= (inst
& RT
) >> 16;
1558 case 0: /* CPU number */
1561 case 1: /* SYNCI length */
1562 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
1563 current_cpu_data
.icache
.linesz
);
1565 case 2: /* Read count register */
1566 printk("RDHWR: Cont register\n");
1567 arch
->gprs
[rt
] = kvm_read_c0_guest_count(cop0
);
1569 case 3: /* Count register resolution */
1570 switch (current_cpu_data
.cputype
) {
1581 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
1583 /* UserLocal not implemented */
1589 kvm_debug("RDHWR %#x not supported @ %p\n", rd
, opc
);
1594 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc
, inst
);
1599 * Rollback PC only if emulation was unsuccessful
1601 if (er
== EMULATE_FAIL
) {
1602 vcpu
->arch
.pc
= curr_pc
;
1603 er
= kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
1608 enum emulation_result
1609 kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1611 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
1612 enum emulation_result er
= EMULATE_DONE
;
1613 unsigned long curr_pc
;
1615 if (run
->mmio
.len
> sizeof(*gpr
)) {
1616 printk("Bad MMIO length: %d", run
->mmio
.len
);
1622 * Update PC and hold onto current PC in case there is
1623 * an error and we want to rollback the PC
1625 curr_pc
= vcpu
->arch
.pc
;
1626 er
= update_pc(vcpu
, vcpu
->arch
.pending_load_cause
);
1627 if (er
== EMULATE_FAIL
)
1630 switch (run
->mmio
.len
) {
1632 *gpr
= *(int32_t *) run
->mmio
.data
;
1636 if (vcpu
->mmio_needed
== 2)
1637 *gpr
= *(int16_t *) run
->mmio
.data
;
1639 *gpr
= *(uint16_t *)run
->mmio
.data
;
1643 if (vcpu
->mmio_needed
== 2)
1644 *gpr
= *(int8_t *) run
->mmio
.data
;
1646 *gpr
= *(u8
*) run
->mmio
.data
;
1650 if (vcpu
->arch
.pending_load_cause
& CAUSEF_BD
)
1652 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1653 vcpu
->arch
.pc
, run
->mmio
.len
, vcpu
->arch
.io_gpr
, *gpr
,
1660 static enum emulation_result
1661 kvm_mips_emulate_exc(unsigned long cause
, uint32_t *opc
,
1662 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1664 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1665 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1666 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1667 enum emulation_result er
= EMULATE_DONE
;
1669 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1671 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1672 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1674 if (cause
& CAUSEF_BD
)
1675 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1677 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1679 kvm_change_c0_guest_cause(cop0
, (0xff),
1680 (exccode
<< CAUSEB_EXCCODE
));
1682 /* Set PC to the exception entry point */
1683 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1684 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1686 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1687 exccode
, kvm_read_c0_guest_epc(cop0
),
1688 kvm_read_c0_guest_badvaddr(cop0
));
1690 printk("Trying to deliver EXC when EXL is already set\n");
1697 enum emulation_result
1698 kvm_mips_check_privilege(unsigned long cause
, uint32_t *opc
,
1699 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1701 enum emulation_result er
= EMULATE_DONE
;
1702 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1703 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1705 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
1715 case T_COP_UNUSABLE
:
1716 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
1717 er
= EMULATE_PRIV_FAIL
;
1724 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1725 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
1726 printk("%s: LD MISS @ %#lx\n", __func__
,
1729 cause
|= (T_ADDR_ERR_LD
<< CAUSEB_EXCCODE
);
1730 er
= EMULATE_PRIV_FAIL
;
1735 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1736 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
1737 printk("%s: ST MISS @ %#lx\n", __func__
,
1740 cause
|= (T_ADDR_ERR_ST
<< CAUSEB_EXCCODE
);
1741 er
= EMULATE_PRIV_FAIL
;
1746 printk("%s: address error ST @ %#lx\n", __func__
,
1748 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
1750 cause
|= (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
);
1752 er
= EMULATE_PRIV_FAIL
;
1755 printk("%s: address error LD @ %#lx\n", __func__
,
1757 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
1759 cause
|= (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
);
1761 er
= EMULATE_PRIV_FAIL
;
1764 er
= EMULATE_PRIV_FAIL
;
1769 if (er
== EMULATE_PRIV_FAIL
) {
1770 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
1775 /* User Address (UA) fault, this could happen if
1776 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1777 * case we pass on the fault to the guest kernel and let it handle it.
1778 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1779 * case we inject the TLB from the Guest TLB into the shadow host TLB
1781 enum emulation_result
1782 kvm_mips_handle_tlbmiss(unsigned long cause
, uint32_t *opc
,
1783 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1785 enum emulation_result er
= EMULATE_DONE
;
1786 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1787 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
1790 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1791 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.host_cp0_entryhi
);
1793 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1794 * Check the Guest TLB, if the entry is not there then send the guest an
1795 * exception. The guest exc handler should then inject an entry into the
1798 index
= kvm_mips_guest_tlb_lookup(vcpu
,
1800 (kvm_read_c0_guest_entryhi
1801 (vcpu
->arch
.cop0
) & ASID_MASK
));
1803 if (exccode
== T_TLB_LD_MISS
) {
1804 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
1805 } else if (exccode
== T_TLB_ST_MISS
) {
1806 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
1808 printk("%s: invalid exc code: %d\n", __func__
, exccode
);
1812 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1814 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1815 if (!TLB_IS_VALID(*tlb
, va
)) {
1816 if (exccode
== T_TLB_LD_MISS
) {
1817 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
1819 } else if (exccode
== T_TLB_ST_MISS
) {
1820 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
1823 printk("%s: invalid exc code: %d\n", __func__
,
1830 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1831 tlb
->tlb_hi
, tlb
->tlb_lo0
, tlb
->tlb_lo1
);
1833 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1834 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
,
1836 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1837 __func__
, va
, index
, vcpu
,