2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
44 unsigned int dspcontrol
;
45 union mips_instruction insn
;
46 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
48 long nextpc
= KVM_INVALID_INST
;
54 * Read the instruction
56 insn
.word
= kvm_get_inst((uint32_t *) epc
, vcpu
);
58 if (insn
.word
== KVM_INVALID_INST
)
59 return KVM_INVALID_INST
;
61 switch (insn
.i_format
.opcode
) {
63 * jr and jalr are in r_format format.
66 switch (insn
.r_format
.func
) {
68 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
71 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
82 switch (insn
.i_format
.rt
) {
85 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
86 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
94 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
95 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
103 arch
->gprs
[31] = epc
+ 8;
104 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
105 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
113 arch
->gprs
[31] = epc
+ 8;
114 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
115 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
124 dspcontrol
= rddsp(0x01);
126 if (dspcontrol
>= 32) {
127 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
136 * These are unconditional and in j_format.
139 arch
->gprs
[31] = instpc
+ 8;
144 epc
|= (insn
.j_format
.target
<< 2);
149 * These are conditional and in i_format.
153 if (arch
->gprs
[insn
.i_format
.rs
] ==
154 arch
->gprs
[insn
.i_format
.rt
])
155 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
163 if (arch
->gprs
[insn
.i_format
.rs
] !=
164 arch
->gprs
[insn
.i_format
.rt
])
165 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
171 case blez_op
: /* not really i_format */
173 /* rt field assumed to be zero */
174 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
175 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
183 /* rt field assumed to be zero */
184 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
185 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
192 * And now the FPA/cp1 branch instructions.
195 printk("%s: unsupported cop1_op\n", __func__
);
202 printk("%s: unaligned epc\n", __func__
);
206 printk("%s: DSP branch but not DSP ASE\n", __func__
);
210 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, uint32_t cause
)
212 unsigned long branch_pc
;
213 enum emulation_result er
= EMULATE_DONE
;
215 if (cause
& CAUSEF_BD
) {
216 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
217 if (branch_pc
== KVM_INVALID_INST
) {
220 vcpu
->arch
.pc
= branch_pc
;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
231 /* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
235 enum emulation_result
kvm_mips_emulate_count(struct kvm_vcpu
*vcpu
)
237 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
238 enum emulation_result er
= EMULATE_DONE
;
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
)) {
242 hrtimer_try_to_cancel(&vcpu
->arch
.comparecount_timer
);
243 hrtimer_start(&vcpu
->arch
.comparecount_timer
,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL
);
246 hrtimer_try_to_cancel(&vcpu
->arch
.comparecount_timer
);
252 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
254 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
255 enum emulation_result er
= EMULATE_DONE
;
257 if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
259 kvm_read_c0_guest_epc(cop0
));
260 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
261 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
263 } else if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
264 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
265 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
275 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
277 enum emulation_result er
= EMULATE_DONE
;
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
280 vcpu
->arch
.pending_exceptions
);
282 ++vcpu
->stat
.wait_exits
;
283 trace_kvm_exit(vcpu
, WAIT_EXITS
);
284 if (!vcpu
->arch
.pending_exceptions
) {
286 kvm_vcpu_block(vcpu
);
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
291 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
292 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
293 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
303 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
305 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
306 enum emulation_result er
= EMULATE_FAIL
;
307 uint32_t pc
= vcpu
->arch
.pc
;
309 printk("[%#x] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
313 /* Write Guest TLB Entry @ Index */
314 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
316 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
317 int index
= kvm_read_c0_guest_index(cop0
);
318 enum emulation_result er
= EMULATE_DONE
;
319 struct kvm_mips_tlb
*tlb
= NULL
;
320 uint32_t pc
= vcpu
->arch
.pc
;
322 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
323 printk("%s: illegal index: %d\n", __func__
, index
);
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
327 kvm_read_c0_guest_entrylo0(cop0
),
328 kvm_read_c0_guest_entrylo1(cop0
),
329 kvm_read_c0_guest_pagemask(cop0
));
330 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
333 tlb
= &vcpu
->arch
.guest_tlb
[index
];
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
339 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
340 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
341 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
342 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
347 kvm_read_c0_guest_entrylo0(cop0
), kvm_read_c0_guest_entrylo1(cop0
),
348 kvm_read_c0_guest_pagemask(cop0
));
353 /* Write Guest TLB Entry @ Random Index */
354 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
356 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
357 enum emulation_result er
= EMULATE_DONE
;
358 struct kvm_mips_tlb
*tlb
= NULL
;
359 uint32_t pc
= vcpu
->arch
.pc
;
363 get_random_bytes(&index
, sizeof(index
));
364 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
366 index
= jiffies
% KVM_MIPS_GUEST_TLB_SIZE
;
369 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
370 printk("%s: illegal index: %d\n", __func__
, index
);
374 tlb
= &vcpu
->arch
.guest_tlb
[index
];
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
381 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
382 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
383 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
384 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
389 kvm_read_c0_guest_entrylo0(cop0
),
390 kvm_read_c0_guest_entrylo1(cop0
));
395 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
397 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
398 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
399 enum emulation_result er
= EMULATE_DONE
;
400 uint32_t pc
= vcpu
->arch
.pc
;
403 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
405 kvm_write_c0_guest_index(cop0
, index
);
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
413 enum emulation_result
414 kvm_mips_emulate_CP0(uint32_t inst
, uint32_t *opc
, uint32_t cause
,
415 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
417 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
418 enum emulation_result er
= EMULATE_DONE
;
419 int32_t rt
, rd
, copz
, sel
, co_bit
, op
;
420 uint32_t pc
= vcpu
->arch
.pc
;
421 unsigned long curr_pc
;
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
427 curr_pc
= vcpu
->arch
.pc
;
428 er
= update_pc(vcpu
, cause
);
429 if (er
== EMULATE_FAIL
) {
433 copz
= (inst
>> 21) & 0x1f;
434 rt
= (inst
>> 16) & 0x1f;
435 rd
= (inst
>> 11) & 0x1f;
437 co_bit
= (inst
>> 25) & 1;
443 case tlbr_op
: /* Read indexed TLB entry */
444 er
= kvm_mips_emul_tlbr(vcpu
);
446 case tlbwi_op
: /* Write indexed */
447 er
= kvm_mips_emul_tlbwi(vcpu
);
449 case tlbwr_op
: /* Write random */
450 er
= kvm_mips_emul_tlbwr(vcpu
);
452 case tlbp_op
: /* TLB Probe */
453 er
= kvm_mips_emul_tlbp(vcpu
);
456 printk("!!!COP0_RFE!!!\n");
459 er
= kvm_mips_emul_eret(vcpu
);
463 er
= kvm_mips_emul_wait(vcpu
);
469 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
470 cop0
->stat
[rd
][sel
]++;
473 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
474 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
475 vcpu
->arch
.gprs
[rt
] = (read_c0_count() >> 2);
476 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
477 vcpu
->arch
.gprs
[rt
] = 0x0;
478 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
479 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
483 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
485 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
491 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
492 pc
, rd
, sel
, rt
, vcpu
->arch
.gprs
[rt
]);
497 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
501 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
502 cop0
->stat
[rd
][sel
]++;
504 if ((rd
== MIPS_CP0_TLB_INDEX
)
505 && (vcpu
->arch
.gprs
[rt
] >=
506 KVM_MIPS_GUEST_TLB_SIZE
)) {
507 printk("Invalid TLB Index: %ld",
508 vcpu
->arch
.gprs
[rt
]);
512 #define C0_EBASE_CORE_MASK 0xff
513 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
514 /* Preserve CORE number */
515 kvm_change_c0_guest_ebase(cop0
,
516 ~(C0_EBASE_CORE_MASK
),
517 vcpu
->arch
.gprs
[rt
]);
518 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
519 kvm_read_c0_guest_ebase(cop0
));
520 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
522 vcpu
->arch
.gprs
[rt
] & ASID_MASK
;
523 if ((KSEGX(vcpu
->arch
.gprs
[rt
]) != CKSEG0
)
525 ((kvm_read_c0_guest_entryhi(cop0
) &
526 ASID_MASK
) != nasid
)) {
529 ("MTCz, change ASID from %#lx to %#lx\n",
530 kvm_read_c0_guest_entryhi(cop0
) &
532 vcpu
->arch
.gprs
[rt
] & ASID_MASK
);
534 /* Blow away the shadow host TLBs */
535 kvm_mips_flush_host_tlb(1);
537 kvm_write_c0_guest_entryhi(cop0
,
538 vcpu
->arch
.gprs
[rt
]);
540 /* Are we writing to COUNT */
541 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
542 /* Linux doesn't seem to write into COUNT, we throw an error
543 * if we notice a write to COUNT
545 /*er = EMULATE_FAIL; */
547 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
548 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
549 pc
, kvm_read_c0_guest_compare(cop0
),
550 vcpu
->arch
.gprs
[rt
]);
552 /* If we are writing to COMPARE */
553 /* Clear pending timer interrupt, if any */
554 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
555 kvm_write_c0_guest_compare(cop0
,
556 vcpu
->arch
.gprs
[rt
]);
557 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
558 kvm_write_c0_guest_status(cop0
,
559 vcpu
->arch
.gprs
[rt
]);
560 /* Make sure that CU1 and NMI bits are never set */
561 kvm_clear_c0_guest_status(cop0
,
562 (ST0_CU1
| ST0_NMI
));
564 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
565 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
568 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
569 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
570 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
574 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc
,
575 rd
, sel
, cop0
->reg
[rd
][sel
]);
580 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
581 vcpu
->arch
.pc
, rt
, rd
, sel
);
586 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
587 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
590 vcpu
->arch
.gprs
[rt
] =
591 kvm_read_c0_guest_status(cop0
);
595 kvm_debug("[%#lx] mfmcz_op: EI\n",
597 kvm_set_c0_guest_status(cop0
, ST0_IE
);
599 kvm_debug("[%#lx] mfmcz_op: DI\n",
601 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
609 cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
611 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
612 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
617 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
618 vcpu
->arch
.gprs
[rt
]);
619 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
624 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
625 vcpu
->arch
.pc
, copz
);
633 * Rollback PC only if emulation was unsuccessful
635 if (er
== EMULATE_FAIL
) {
636 vcpu
->arch
.pc
= curr_pc
;
641 * This is for special instructions whose emulation
642 * updates the PC, so do not overwrite the PC under
649 enum emulation_result
650 kvm_mips_emulate_store(uint32_t inst
, uint32_t cause
,
651 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
653 enum emulation_result er
= EMULATE_DO_MMIO
;
654 int32_t op
, base
, rt
, offset
;
656 void *data
= run
->mmio
.data
;
657 unsigned long curr_pc
;
660 * Update PC and hold onto current PC in case there is
661 * an error and we want to rollback the PC
663 curr_pc
= vcpu
->arch
.pc
;
664 er
= update_pc(vcpu
, cause
);
665 if (er
== EMULATE_FAIL
)
668 rt
= (inst
>> 16) & 0x1f;
669 base
= (inst
>> 21) & 0x1f;
670 offset
= inst
& 0xffff;
671 op
= (inst
>> 26) & 0x3f;
676 if (bytes
> sizeof(run
->mmio
.data
)) {
677 kvm_err("%s: bad MMIO length: %d\n", __func__
,
680 run
->mmio
.phys_addr
=
681 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
683 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
687 run
->mmio
.len
= bytes
;
688 run
->mmio
.is_write
= 1;
689 vcpu
->mmio_needed
= 1;
690 vcpu
->mmio_is_write
= 1;
691 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
692 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
693 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
700 if (bytes
> sizeof(run
->mmio
.data
)) {
701 kvm_err("%s: bad MMIO length: %d\n", __func__
,
704 run
->mmio
.phys_addr
=
705 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
707 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
712 run
->mmio
.len
= bytes
;
713 run
->mmio
.is_write
= 1;
714 vcpu
->mmio_needed
= 1;
715 vcpu
->mmio_is_write
= 1;
716 *(uint32_t *) data
= vcpu
->arch
.gprs
[rt
];
718 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
719 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
720 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
725 if (bytes
> sizeof(run
->mmio
.data
)) {
726 kvm_err("%s: bad MMIO length: %d\n", __func__
,
729 run
->mmio
.phys_addr
=
730 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
732 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
737 run
->mmio
.len
= bytes
;
738 run
->mmio
.is_write
= 1;
739 vcpu
->mmio_needed
= 1;
740 vcpu
->mmio_is_write
= 1;
741 *(uint16_t *) data
= vcpu
->arch
.gprs
[rt
];
743 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
744 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
745 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
749 printk("Store not yet supported");
755 * Rollback PC if emulation was unsuccessful
757 if (er
== EMULATE_FAIL
) {
758 vcpu
->arch
.pc
= curr_pc
;
764 enum emulation_result
765 kvm_mips_emulate_load(uint32_t inst
, uint32_t cause
,
766 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
768 enum emulation_result er
= EMULATE_DO_MMIO
;
769 int32_t op
, base
, rt
, offset
;
772 rt
= (inst
>> 16) & 0x1f;
773 base
= (inst
>> 21) & 0x1f;
774 offset
= inst
& 0xffff;
775 op
= (inst
>> 26) & 0x3f;
777 vcpu
->arch
.pending_load_cause
= cause
;
778 vcpu
->arch
.io_gpr
= rt
;
783 if (bytes
> sizeof(run
->mmio
.data
)) {
784 kvm_err("%s: bad MMIO length: %d\n", __func__
,
789 run
->mmio
.phys_addr
=
790 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
792 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
797 run
->mmio
.len
= bytes
;
798 run
->mmio
.is_write
= 0;
799 vcpu
->mmio_needed
= 1;
800 vcpu
->mmio_is_write
= 0;
806 if (bytes
> sizeof(run
->mmio
.data
)) {
807 kvm_err("%s: bad MMIO length: %d\n", __func__
,
812 run
->mmio
.phys_addr
=
813 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
815 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
820 run
->mmio
.len
= bytes
;
821 run
->mmio
.is_write
= 0;
822 vcpu
->mmio_needed
= 1;
823 vcpu
->mmio_is_write
= 0;
826 vcpu
->mmio_needed
= 2;
828 vcpu
->mmio_needed
= 1;
835 if (bytes
> sizeof(run
->mmio
.data
)) {
836 kvm_err("%s: bad MMIO length: %d\n", __func__
,
841 run
->mmio
.phys_addr
=
842 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
844 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
849 run
->mmio
.len
= bytes
;
850 run
->mmio
.is_write
= 0;
851 vcpu
->mmio_is_write
= 0;
854 vcpu
->mmio_needed
= 2;
856 vcpu
->mmio_needed
= 1;
861 printk("Load not yet supported");
869 int kvm_mips_sync_icache(unsigned long va
, struct kvm_vcpu
*vcpu
)
871 unsigned long offset
= (va
& ~PAGE_MASK
);
872 struct kvm
*kvm
= vcpu
->kvm
;
877 gfn
= va
>> PAGE_SHIFT
;
879 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
880 printk("%s: Invalid gfn: %#llx\n", __func__
, gfn
);
881 kvm_mips_dump_host_tlbs();
882 kvm_arch_vcpu_dump_regs(vcpu
);
885 pfn
= kvm
->arch
.guest_pmap
[gfn
];
886 pa
= (pfn
<< PAGE_SHIFT
) | offset
;
888 printk("%s: va: %#lx, unmapped: %#x\n", __func__
, va
, CKSEG0ADDR(pa
));
890 mips32_SyncICache(CKSEG0ADDR(pa
), 32);
894 #define MIPS_CACHE_OP_INDEX_INV 0x0
895 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
896 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
897 #define MIPS_CACHE_OP_IMP 0x3
898 #define MIPS_CACHE_OP_HIT_INV 0x4
899 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
900 #define MIPS_CACHE_OP_HIT_HB 0x6
901 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
903 #define MIPS_CACHE_ICACHE 0x0
904 #define MIPS_CACHE_DCACHE 0x1
905 #define MIPS_CACHE_SEC 0x3
907 enum emulation_result
908 kvm_mips_emulate_cache(uint32_t inst
, uint32_t *opc
, uint32_t cause
,
909 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
911 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
912 extern void (*r4k_blast_dcache
) (void);
913 extern void (*r4k_blast_icache
) (void);
914 enum emulation_result er
= EMULATE_DONE
;
915 int32_t offset
, cache
, op_inst
, op
, base
;
916 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
918 unsigned long curr_pc
;
921 * Update PC and hold onto current PC in case there is
922 * an error and we want to rollback the PC
924 curr_pc
= vcpu
->arch
.pc
;
925 er
= update_pc(vcpu
, cause
);
926 if (er
== EMULATE_FAIL
)
929 base
= (inst
>> 21) & 0x1f;
930 op_inst
= (inst
>> 16) & 0x1f;
931 offset
= inst
& 0xffff;
932 cache
= (inst
>> 16) & 0x3;
933 op
= (inst
>> 18) & 0x7;
935 va
= arch
->gprs
[base
] + offset
;
937 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
938 cache
, op
, base
, arch
->gprs
[base
], offset
);
940 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
941 * the caches entirely by stepping through all the ways/indexes
943 if (op
== MIPS_CACHE_OP_INDEX_INV
) {
945 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
946 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
947 arch
->gprs
[base
], offset
);
949 if (cache
== MIPS_CACHE_DCACHE
)
951 else if (cache
== MIPS_CACHE_ICACHE
)
954 printk("%s: unsupported CACHE INDEX operation\n",
959 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
960 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
966 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
968 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0) {
969 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
);
971 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
972 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
975 /* If an entry already exists then skip */
976 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0) {
980 /* If address not in the guest TLB, then give the guest a fault, the
981 * resulting handler will do the right thing
983 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
984 (kvm_read_c0_guest_entryhi
985 (cop0
) & ASID_MASK
));
988 vcpu
->arch
.host_cp0_entryhi
= (va
& VPN2_MASK
);
989 vcpu
->arch
.host_cp0_badvaddr
= va
;
990 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
995 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
996 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
997 if (!TLB_IS_VALID(*tlb
, va
)) {
998 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1001 goto dont_update_pc
;
1003 /* We fault an entry from the guest tlb to the shadow host TLB */
1004 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
,
1011 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1012 cache
, op
, base
, arch
->gprs
[base
], offset
);
1015 goto dont_update_pc
;
1020 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1021 if (cache
== MIPS_CACHE_DCACHE
1022 && (op
== MIPS_CACHE_OP_FILL_WB_INV
1023 || op
== MIPS_CACHE_OP_HIT_INV
)) {
1024 flush_dcache_line(va
);
1026 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1027 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1028 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1030 } else if (op
== MIPS_CACHE_OP_HIT_INV
&& cache
== MIPS_CACHE_ICACHE
) {
1031 flush_dcache_line(va
);
1032 flush_icache_line(va
);
1034 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1035 /* Replace the CACHE instruction, with a SYNCI */
1036 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1040 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1041 cache
, op
, base
, arch
->gprs
[base
], offset
);
1044 goto dont_update_pc
;
1053 vcpu
->arch
.pc
= curr_pc
;
1058 enum emulation_result
1059 kvm_mips_emulate_inst(unsigned long cause
, uint32_t *opc
,
1060 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1062 enum emulation_result er
= EMULATE_DONE
;
1066 * Fetch the instruction.
1068 if (cause
& CAUSEF_BD
) {
1072 inst
= kvm_get_inst(opc
, vcpu
);
1074 switch (((union mips_instruction
)inst
).r_format
.opcode
) {
1076 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1081 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1088 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1092 ++vcpu
->stat
.cache_exits
;
1093 trace_kvm_exit(vcpu
, CACHE_EXITS
);
1094 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1098 printk("Instruction emulation not supported (%p/%#x)\n", opc
,
1100 kvm_arch_vcpu_dump_regs(vcpu
);
1108 enum emulation_result
1109 kvm_mips_emulate_syscall(unsigned long cause
, uint32_t *opc
,
1110 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1112 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1113 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1114 enum emulation_result er
= EMULATE_DONE
;
1116 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1118 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1119 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1121 if (cause
& CAUSEF_BD
)
1122 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1124 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1126 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1128 kvm_change_c0_guest_cause(cop0
, (0xff),
1129 (T_SYSCALL
<< CAUSEB_EXCCODE
));
1131 /* Set PC to the exception entry point */
1132 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1135 printk("Trying to deliver SYSCALL when EXL is already set\n");
1142 enum emulation_result
1143 kvm_mips_emulate_tlbmiss_ld(unsigned long cause
, uint32_t *opc
,
1144 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1146 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1147 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1148 enum emulation_result er
= EMULATE_DONE
;
1149 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1150 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1152 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1154 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1155 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1157 if (cause
& CAUSEF_BD
)
1158 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1160 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1162 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1165 /* set pc to the exception entry point */
1166 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1169 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1172 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1175 kvm_change_c0_guest_cause(cop0
, (0xff),
1176 (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
));
1178 /* setup badvaddr, context and entryhi registers for the guest */
1179 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1180 /* XXXKYMA: is the context register used by linux??? */
1181 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1182 /* Blow away the shadow host TLBs */
1183 kvm_mips_flush_host_tlb(1);
1188 enum emulation_result
1189 kvm_mips_emulate_tlbinv_ld(unsigned long cause
, uint32_t *opc
,
1190 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1192 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1193 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1194 enum emulation_result er
= EMULATE_DONE
;
1195 unsigned long entryhi
=
1196 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1197 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1199 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1201 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1202 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1204 if (cause
& CAUSEF_BD
)
1205 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1207 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1209 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1212 /* set pc to the exception entry point */
1213 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1216 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1218 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1221 kvm_change_c0_guest_cause(cop0
, (0xff),
1222 (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
));
1224 /* setup badvaddr, context and entryhi registers for the guest */
1225 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1226 /* XXXKYMA: is the context register used by linux??? */
1227 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1228 /* Blow away the shadow host TLBs */
1229 kvm_mips_flush_host_tlb(1);
1234 enum emulation_result
1235 kvm_mips_emulate_tlbmiss_st(unsigned long cause
, uint32_t *opc
,
1236 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1238 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1239 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1240 enum emulation_result er
= EMULATE_DONE
;
1241 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1242 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1244 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1246 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1247 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1249 if (cause
& CAUSEF_BD
)
1250 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1252 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1254 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1257 /* Set PC to the exception entry point */
1258 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1260 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1262 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1265 kvm_change_c0_guest_cause(cop0
, (0xff),
1266 (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
));
1268 /* setup badvaddr, context and entryhi registers for the guest */
1269 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1270 /* XXXKYMA: is the context register used by linux??? */
1271 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1272 /* Blow away the shadow host TLBs */
1273 kvm_mips_flush_host_tlb(1);
1278 enum emulation_result
1279 kvm_mips_emulate_tlbinv_st(unsigned long cause
, uint32_t *opc
,
1280 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1282 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1283 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1284 enum emulation_result er
= EMULATE_DONE
;
1285 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1286 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1288 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1290 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1291 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1293 if (cause
& CAUSEF_BD
)
1294 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1296 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1298 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1301 /* Set PC to the exception entry point */
1302 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1304 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1306 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1309 kvm_change_c0_guest_cause(cop0
, (0xff),
1310 (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
));
1312 /* setup badvaddr, context and entryhi registers for the guest */
1313 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1314 /* XXXKYMA: is the context register used by linux??? */
1315 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1316 /* Blow away the shadow host TLBs */
1317 kvm_mips_flush_host_tlb(1);
1322 /* TLBMOD: store into address matching TLB with Dirty bit off */
1323 enum emulation_result
1324 kvm_mips_handle_tlbmod(unsigned long cause
, uint32_t *opc
,
1325 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1327 enum emulation_result er
= EMULATE_DONE
;
1331 * If address not in the guest TLB, then we are in trouble
1333 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
1335 /* XXXKYMA Invalidate and retry */
1336 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
);
1337 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1339 kvm_mips_dump_guest_tlbs(vcpu
);
1340 kvm_mips_dump_host_tlbs();
1341 return EMULATE_FAIL
;
1345 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
1349 enum emulation_result
1350 kvm_mips_emulate_tlbmod(unsigned long cause
, uint32_t *opc
,
1351 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1353 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1354 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1355 (kvm_read_c0_guest_entryhi(cop0
) & ASID_MASK
);
1356 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1357 enum emulation_result er
= EMULATE_DONE
;
1359 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1361 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1362 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1364 if (cause
& CAUSEF_BD
)
1365 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1367 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1369 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1372 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1374 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1376 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1379 kvm_change_c0_guest_cause(cop0
, (0xff), (T_TLB_MOD
<< CAUSEB_EXCCODE
));
1381 /* setup badvaddr, context and entryhi registers for the guest */
1382 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1383 /* XXXKYMA: is the context register used by linux??? */
1384 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1385 /* Blow away the shadow host TLBs */
1386 kvm_mips_flush_host_tlb(1);
1391 enum emulation_result
1392 kvm_mips_emulate_fpu_exc(unsigned long cause
, uint32_t *opc
,
1393 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1395 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1396 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1397 enum emulation_result er
= EMULATE_DONE
;
1399 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1401 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1402 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1404 if (cause
& CAUSEF_BD
)
1405 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1407 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1411 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1413 kvm_change_c0_guest_cause(cop0
, (0xff),
1414 (T_COP_UNUSABLE
<< CAUSEB_EXCCODE
));
1415 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
1420 enum emulation_result
1421 kvm_mips_emulate_ri_exc(unsigned long cause
, uint32_t *opc
,
1422 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1424 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1425 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1426 enum emulation_result er
= EMULATE_DONE
;
1428 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1430 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1431 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1433 if (cause
& CAUSEF_BD
)
1434 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1436 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1438 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
1440 kvm_change_c0_guest_cause(cop0
, (0xff),
1441 (T_RES_INST
<< CAUSEB_EXCCODE
));
1443 /* Set PC to the exception entry point */
1444 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1447 kvm_err("Trying to deliver RI when EXL is already set\n");
1454 enum emulation_result
1455 kvm_mips_emulate_bp_exc(unsigned long cause
, uint32_t *opc
,
1456 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1458 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1459 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1460 enum emulation_result er
= EMULATE_DONE
;
1462 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1464 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1465 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1467 if (cause
& CAUSEF_BD
)
1468 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1470 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1472 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
1474 kvm_change_c0_guest_cause(cop0
, (0xff),
1475 (T_BREAK
<< CAUSEB_EXCCODE
));
1477 /* Set PC to the exception entry point */
1478 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1481 printk("Trying to deliver BP when EXL is already set\n");
1489 * ll/sc, rdhwr, sync emulation
1492 #define OPCODE 0xfc000000
1493 #define BASE 0x03e00000
1494 #define RT 0x001f0000
1495 #define OFFSET 0x0000ffff
1496 #define LL 0xc0000000
1497 #define SC 0xe0000000
1498 #define SPEC0 0x00000000
1499 #define SPEC3 0x7c000000
1500 #define RD 0x0000f800
1501 #define FUNC 0x0000003f
1502 #define SYNC 0x0000000f
1503 #define RDHWR 0x0000003b
1505 enum emulation_result
1506 kvm_mips_handle_ri(unsigned long cause
, uint32_t *opc
,
1507 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1509 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1510 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1511 enum emulation_result er
= EMULATE_DONE
;
1512 unsigned long curr_pc
;
1516 * Update PC and hold onto current PC in case there is
1517 * an error and we want to rollback the PC
1519 curr_pc
= vcpu
->arch
.pc
;
1520 er
= update_pc(vcpu
, cause
);
1521 if (er
== EMULATE_FAIL
)
1525 * Fetch the instruction.
1527 if (cause
& CAUSEF_BD
)
1530 inst
= kvm_get_inst(opc
, vcpu
);
1532 if (inst
== KVM_INVALID_INST
) {
1533 printk("%s: Cannot get inst @ %p\n", __func__
, opc
);
1534 return EMULATE_FAIL
;
1537 if ((inst
& OPCODE
) == SPEC3
&& (inst
& FUNC
) == RDHWR
) {
1538 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
1539 int rd
= (inst
& RD
) >> 11;
1540 int rt
= (inst
& RT
) >> 16;
1541 /* If usermode, check RDHWR rd is allowed by guest HWREna */
1542 if (usermode
&& !(kvm_read_c0_guest_hwrena(cop0
) & BIT(rd
))) {
1543 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
1548 case 0: /* CPU number */
1551 case 1: /* SYNCI length */
1552 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
1553 current_cpu_data
.icache
.linesz
);
1555 case 2: /* Read count register */
1556 printk("RDHWR: Cont register\n");
1557 arch
->gprs
[rt
] = kvm_read_c0_guest_count(cop0
);
1559 case 3: /* Count register resolution */
1560 switch (current_cpu_data
.cputype
) {
1570 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
1574 kvm_debug("RDHWR %#x not supported @ %p\n", rd
, opc
);
1578 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc
, inst
);
1582 return EMULATE_DONE
;
1586 * Rollback PC (if in branch delay slot then the PC already points to
1587 * branch target), and pass the RI exception to the guest OS.
1589 vcpu
->arch
.pc
= curr_pc
;
1590 return kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
1593 enum emulation_result
1594 kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1596 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
1597 enum emulation_result er
= EMULATE_DONE
;
1598 unsigned long curr_pc
;
1600 if (run
->mmio
.len
> sizeof(*gpr
)) {
1601 printk("Bad MMIO length: %d", run
->mmio
.len
);
1607 * Update PC and hold onto current PC in case there is
1608 * an error and we want to rollback the PC
1610 curr_pc
= vcpu
->arch
.pc
;
1611 er
= update_pc(vcpu
, vcpu
->arch
.pending_load_cause
);
1612 if (er
== EMULATE_FAIL
)
1615 switch (run
->mmio
.len
) {
1617 *gpr
= *(int32_t *) run
->mmio
.data
;
1621 if (vcpu
->mmio_needed
== 2)
1622 *gpr
= *(int16_t *) run
->mmio
.data
;
1624 *gpr
= *(int16_t *) run
->mmio
.data
;
1628 if (vcpu
->mmio_needed
== 2)
1629 *gpr
= *(int8_t *) run
->mmio
.data
;
1631 *gpr
= *(u8
*) run
->mmio
.data
;
1635 if (vcpu
->arch
.pending_load_cause
& CAUSEF_BD
)
1637 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1638 vcpu
->arch
.pc
, run
->mmio
.len
, vcpu
->arch
.io_gpr
, *gpr
,
1645 static enum emulation_result
1646 kvm_mips_emulate_exc(unsigned long cause
, uint32_t *opc
,
1647 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1649 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1650 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1651 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1652 enum emulation_result er
= EMULATE_DONE
;
1654 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1656 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1657 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1659 if (cause
& CAUSEF_BD
)
1660 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1662 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1664 kvm_change_c0_guest_cause(cop0
, (0xff),
1665 (exccode
<< CAUSEB_EXCCODE
));
1667 /* Set PC to the exception entry point */
1668 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1669 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1671 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1672 exccode
, kvm_read_c0_guest_epc(cop0
),
1673 kvm_read_c0_guest_badvaddr(cop0
));
1675 printk("Trying to deliver EXC when EXL is already set\n");
1682 enum emulation_result
1683 kvm_mips_check_privilege(unsigned long cause
, uint32_t *opc
,
1684 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1686 enum emulation_result er
= EMULATE_DONE
;
1687 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1688 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1690 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
1700 case T_COP_UNUSABLE
:
1701 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
1702 er
= EMULATE_PRIV_FAIL
;
1709 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1710 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
1711 printk("%s: LD MISS @ %#lx\n", __func__
,
1714 cause
|= (T_ADDR_ERR_LD
<< CAUSEB_EXCCODE
);
1715 er
= EMULATE_PRIV_FAIL
;
1720 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1721 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
1722 printk("%s: ST MISS @ %#lx\n", __func__
,
1725 cause
|= (T_ADDR_ERR_ST
<< CAUSEB_EXCCODE
);
1726 er
= EMULATE_PRIV_FAIL
;
1731 printk("%s: address error ST @ %#lx\n", __func__
,
1733 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
1735 cause
|= (T_TLB_ST_MISS
<< CAUSEB_EXCCODE
);
1737 er
= EMULATE_PRIV_FAIL
;
1740 printk("%s: address error LD @ %#lx\n", __func__
,
1742 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
1744 cause
|= (T_TLB_LD_MISS
<< CAUSEB_EXCCODE
);
1746 er
= EMULATE_PRIV_FAIL
;
1749 er
= EMULATE_PRIV_FAIL
;
1754 if (er
== EMULATE_PRIV_FAIL
) {
1755 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
1760 /* User Address (UA) fault, this could happen if
1761 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1762 * case we pass on the fault to the guest kernel and let it handle it.
1763 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1764 * case we inject the TLB from the Guest TLB into the shadow host TLB
1766 enum emulation_result
1767 kvm_mips_handle_tlbmiss(unsigned long cause
, uint32_t *opc
,
1768 struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1770 enum emulation_result er
= EMULATE_DONE
;
1771 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1772 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
1775 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1776 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.host_cp0_entryhi
);
1778 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1779 * Check the Guest TLB, if the entry is not there then send the guest an
1780 * exception. The guest exc handler should then inject an entry into the
1783 index
= kvm_mips_guest_tlb_lookup(vcpu
,
1785 (kvm_read_c0_guest_entryhi
1786 (vcpu
->arch
.cop0
) & ASID_MASK
));
1788 if (exccode
== T_TLB_LD_MISS
) {
1789 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
1790 } else if (exccode
== T_TLB_ST_MISS
) {
1791 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
1793 printk("%s: invalid exc code: %d\n", __func__
, exccode
);
1797 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1799 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1800 if (!TLB_IS_VALID(*tlb
, va
)) {
1801 if (exccode
== T_TLB_LD_MISS
) {
1802 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
1804 } else if (exccode
== T_TLB_ST_MISS
) {
1805 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
1808 printk("%s: invalid exc code: %d\n", __func__
,
1815 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1816 tlb
->tlb_hi
, tlb
->tlb_lo0
, tlb
->tlb_lo1
);
1818 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1819 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, NULL
,