1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
24 static int kvm_emu_cpucfg(struct kvm_vcpu
*vcpu
, larch_inst inst
)
27 unsigned int index
, ret
;
29 if (inst
.reg2_format
.opcode
!= cpucfg_op
)
32 rd
= inst
.reg2_format
.rd
;
33 rj
= inst
.reg2_format
.rj
;
34 ++vcpu
->stat
.cpucfg_exits
;
35 index
= vcpu
->arch
.gprs
[rj
];
38 * By LoongArch Reference Manual 2.2.10.5
39 * Return value is 0 for undefined CPUCFG index
41 * Disable preemption since hw gcsr is accessed
45 case 0 ... (KVM_MAX_CPUCFG_REGS
- 1):
46 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.cpucfg
[index
];
49 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 vcpu
->arch
.gprs
[rd
] = *(unsigned int *)KVM_SIGNATURE
;
52 case CPUCFG_KVM_FEATURE
:
53 ret
= vcpu
->kvm
->arch
.pv_features
& LOONGARCH_PV_FEAT_MASK
;
54 vcpu
->arch
.gprs
[rd
] = ret
;
57 vcpu
->arch
.gprs
[rd
] = 0;
65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu
*vcpu
, int csrid
)
67 unsigned long val
= 0;
68 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
71 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 * For undefined CSR id, return value is 0
74 if (get_gcsr_flag(csrid
) & SW_GCSR
)
75 val
= kvm_read_sw_gcsr(csr
, csrid
);
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid
, vcpu
->arch
.pc
);
82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu
*vcpu
, int csrid
, unsigned long val
)
84 unsigned long old
= 0;
85 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
87 if (get_gcsr_flag(csrid
) & SW_GCSR
) {
88 old
= kvm_read_sw_gcsr(csr
, csrid
);
89 kvm_write_sw_gcsr(csr
, csrid
, val
);
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid
, vcpu
->arch
.pc
);
96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu
*vcpu
, int csrid
,
97 unsigned long csr_mask
, unsigned long val
)
99 unsigned long old
= 0;
100 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
102 if (get_gcsr_flag(csrid
) & SW_GCSR
) {
103 old
= kvm_read_sw_gcsr(csr
, csrid
);
104 val
= (old
& ~csr_mask
) | (val
& csr_mask
);
105 kvm_write_sw_gcsr(csr
, csrid
, val
);
106 old
= old
& csr_mask
;
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid
, vcpu
->arch
.pc
);
113 static int kvm_handle_csr(struct kvm_vcpu
*vcpu
, larch_inst inst
)
115 unsigned int rd
, rj
, csrid
;
116 unsigned long csr_mask
, val
= 0;
122 * rj != 0,1 means csrxchg
124 rd
= inst
.reg2csr_format
.rd
;
125 rj
= inst
.reg2csr_format
.rj
;
126 csrid
= inst
.reg2csr_format
.csr
;
128 if (csrid
>= LOONGARCH_CSR_PERFCTRL0
&& csrid
<= vcpu
->arch
.max_pmu_csrid
) {
129 if (kvm_guest_has_pmu(&vcpu
->arch
)) {
131 kvm_make_request(KVM_REQ_PMU
, vcpu
);
136 /* Process CSR ops */
138 case 0: /* process csrrd */
139 val
= kvm_emu_read_csr(vcpu
, csrid
);
140 vcpu
->arch
.gprs
[rd
] = val
;
142 case 1: /* process csrwr */
143 val
= vcpu
->arch
.gprs
[rd
];
144 val
= kvm_emu_write_csr(vcpu
, csrid
, val
);
145 vcpu
->arch
.gprs
[rd
] = val
;
147 default: /* process csrxchg */
148 val
= vcpu
->arch
.gprs
[rd
];
149 csr_mask
= vcpu
->arch
.gprs
[rj
];
150 val
= kvm_emu_xchg_csr(vcpu
, csrid
, csr_mask
, val
);
151 vcpu
->arch
.gprs
[rd
] = val
;
157 int kvm_emu_iocsr(larch_inst inst
, struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
161 u32 addr
, rd
, rj
, opcode
;
164 * Each IOCSR with different opcode
166 rd
= inst
.reg2_format
.rd
;
167 rj
= inst
.reg2_format
.rj
;
168 opcode
= inst
.reg2_format
.opcode
;
169 addr
= vcpu
->arch
.gprs
[rj
];
170 run
->iocsr_io
.phys_addr
= addr
;
171 run
->iocsr_io
.is_write
= 0;
172 val
= &vcpu
->arch
.gprs
[rd
];
174 /* LoongArch is Little endian */
177 run
->iocsr_io
.len
= 1;
180 run
->iocsr_io
.len
= 2;
183 run
->iocsr_io
.len
= 4;
186 run
->iocsr_io
.len
= 8;
189 run
->iocsr_io
.len
= 1;
190 run
->iocsr_io
.is_write
= 1;
193 run
->iocsr_io
.len
= 2;
194 run
->iocsr_io
.is_write
= 1;
197 run
->iocsr_io
.len
= 4;
198 run
->iocsr_io
.is_write
= 1;
201 run
->iocsr_io
.len
= 8;
202 run
->iocsr_io
.is_write
= 1;
208 if (run
->iocsr_io
.is_write
) {
209 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
210 ret
= kvm_io_bus_write(vcpu
, KVM_IOCSR_BUS
, addr
, run
->iocsr_io
.len
, val
);
211 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
215 ret
= EMULATE_DO_IOCSR
;
216 /* Save data and let user space to write it */
217 memcpy(run
->iocsr_io
.data
, val
, run
->iocsr_io
.len
);
219 trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE
, run
->iocsr_io
.len
, addr
, val
);
221 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
222 ret
= kvm_io_bus_read(vcpu
, KVM_IOCSR_BUS
, addr
, run
->iocsr_io
.len
, val
);
223 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
227 ret
= EMULATE_DO_IOCSR
;
228 /* Save register id for iocsr read completion */
229 vcpu
->arch
.io_gpr
= rd
;
231 trace_kvm_iocsr(KVM_TRACE_IOCSR_READ
, run
->iocsr_io
.len
, addr
, NULL
);
237 int kvm_complete_iocsr_read(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
239 enum emulation_result er
= EMULATE_DONE
;
240 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
242 switch (run
->iocsr_io
.len
) {
244 *gpr
= *(s8
*)run
->iocsr_io
.data
;
247 *gpr
= *(s16
*)run
->iocsr_io
.data
;
250 *gpr
= *(s32
*)run
->iocsr_io
.data
;
253 *gpr
= *(s64
*)run
->iocsr_io
.data
;
256 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
257 run
->iocsr_io
.len
, vcpu
->arch
.badv
);
265 int kvm_emu_idle(struct kvm_vcpu
*vcpu
)
267 ++vcpu
->stat
.idle_exits
;
268 trace_kvm_exit_idle(vcpu
, KVM_TRACE_EXIT_IDLE
);
270 if (!kvm_arch_vcpu_runnable(vcpu
))
276 static int kvm_trap_handle_gspr(struct kvm_vcpu
*vcpu
)
278 unsigned long curr_pc
;
280 enum emulation_result er
= EMULATE_DONE
;
281 struct kvm_run
*run
= vcpu
->run
;
283 /* Fetch the instruction */
284 inst
.word
= vcpu
->arch
.badi
;
285 curr_pc
= vcpu
->arch
.pc
;
286 update_pc(&vcpu
->arch
);
288 trace_kvm_exit_gspr(vcpu
, inst
.word
);
290 switch (((inst
.word
>> 24) & 0xff)) {
291 case 0x0: /* CPUCFG GSPR */
292 er
= kvm_emu_cpucfg(vcpu
, inst
);
294 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
295 er
= kvm_handle_csr(vcpu
, inst
);
297 case 0x6: /* Cache, Idle and IOCSR GSPR */
298 switch (((inst
.word
>> 22) & 0x3ff)) {
299 case 0x18: /* Cache GSPR */
301 trace_kvm_exit_cache(vcpu
, KVM_TRACE_EXIT_CACHE
);
303 case 0x19: /* Idle/IOCSR GSPR */
304 switch (((inst
.word
>> 15) & 0x1ffff)) {
305 case 0xc90: /* IOCSR GSPR */
306 er
= kvm_emu_iocsr(inst
, run
, vcpu
);
308 case 0xc91: /* Idle GSPR */
309 er
= kvm_emu_idle(vcpu
);
326 /* Rollback PC only if emulation was unsuccessful */
327 if (er
== EMULATE_FAIL
) {
328 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
329 curr_pc
, __func__
, inst
.word
);
331 kvm_arch_vcpu_dump_regs(vcpu
);
332 vcpu
->arch
.pc
= curr_pc
;
340 * 1) Execute CPUCFG instruction;
341 * 2) Execute CACOP/IDLE instructions;
342 * 3) Access to unimplemented CSRs/IOCSRs.
344 static int kvm_handle_gspr(struct kvm_vcpu
*vcpu
)
346 int ret
= RESUME_GUEST
;
347 enum emulation_result er
= EMULATE_DONE
;
349 er
= kvm_trap_handle_gspr(vcpu
);
351 if (er
== EMULATE_DONE
) {
353 } else if (er
== EMULATE_DO_MMIO
) {
354 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
356 } else if (er
== EMULATE_DO_IOCSR
) {
357 vcpu
->run
->exit_reason
= KVM_EXIT_LOONGARCH_IOCSR
;
360 kvm_queue_exception(vcpu
, EXCCODE_INE
, 0);
367 int kvm_emu_mmio_read(struct kvm_vcpu
*vcpu
, larch_inst inst
)
370 unsigned int op8
, opcode
, rd
;
371 struct kvm_run
*run
= vcpu
->run
;
373 run
->mmio
.phys_addr
= vcpu
->arch
.badv
;
374 vcpu
->mmio_needed
= 2; /* signed */
375 op8
= (inst
.word
>> 24) & 0xff;
376 ret
= EMULATE_DO_MMIO
;
379 case 0x24 ... 0x27: /* ldptr.w/d process */
380 rd
= inst
.reg2i14_format
.rd
;
381 opcode
= inst
.reg2i14_format
.opcode
;
394 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
395 rd
= inst
.reg2i12_format
.rd
;
396 opcode
= inst
.reg2i12_format
.opcode
;
403 vcpu
->mmio_needed
= 1; /* unsigned */
410 vcpu
->mmio_needed
= 1; /* unsigned */
417 vcpu
->mmio_needed
= 1; /* unsigned */
428 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
429 rd
= inst
.reg3_format
.rd
;
430 opcode
= inst
.reg3_format
.opcode
;
438 vcpu
->mmio_needed
= 1; /* unsigned */
445 vcpu
->mmio_needed
= 1; /* unsigned */
452 vcpu
->mmio_needed
= 1; /* unsigned */
466 if (ret
== EMULATE_DO_MMIO
) {
467 trace_kvm_mmio(KVM_TRACE_MMIO_READ
, run
->mmio
.len
, run
->mmio
.phys_addr
, NULL
);
470 * If mmio device such as PCH-PIC is emulated in KVM,
471 * it need not return to user space to handle the mmio
474 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
475 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, vcpu
->arch
.badv
,
476 run
->mmio
.len
, &vcpu
->arch
.gprs
[rd
]);
477 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
479 update_pc(&vcpu
->arch
);
480 vcpu
->mmio_needed
= 0;
484 /* Set for kvm_complete_mmio_read() use */
485 vcpu
->arch
.io_gpr
= rd
;
486 run
->mmio
.is_write
= 0;
487 vcpu
->mmio_is_write
= 0;
488 return EMULATE_DO_MMIO
;
491 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
492 inst
.word
, vcpu
->arch
.pc
, vcpu
->arch
.badv
);
493 kvm_arch_vcpu_dump_regs(vcpu
);
494 vcpu
->mmio_needed
= 0;
499 int kvm_complete_mmio_read(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
501 enum emulation_result er
= EMULATE_DONE
;
502 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
504 /* Update with new PC */
505 update_pc(&vcpu
->arch
);
506 switch (run
->mmio
.len
) {
508 if (vcpu
->mmio_needed
== 2)
509 *gpr
= *(s8
*)run
->mmio
.data
;
511 *gpr
= *(u8
*)run
->mmio
.data
;
514 if (vcpu
->mmio_needed
== 2)
515 *gpr
= *(s16
*)run
->mmio
.data
;
517 *gpr
= *(u16
*)run
->mmio
.data
;
520 if (vcpu
->mmio_needed
== 2)
521 *gpr
= *(s32
*)run
->mmio
.data
;
523 *gpr
= *(u32
*)run
->mmio
.data
;
526 *gpr
= *(s64
*)run
->mmio
.data
;
529 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
530 run
->mmio
.len
, vcpu
->arch
.badv
);
535 trace_kvm_mmio(KVM_TRACE_MMIO_READ
, run
->mmio
.len
,
536 run
->mmio
.phys_addr
, run
->mmio
.data
);
541 int kvm_emu_mmio_write(struct kvm_vcpu
*vcpu
, larch_inst inst
)
544 unsigned int rd
, op8
, opcode
;
545 unsigned long curr_pc
, rd_val
= 0;
546 struct kvm_run
*run
= vcpu
->run
;
547 void *data
= run
->mmio
.data
;
550 * Update PC and hold onto current PC in case there is
551 * an error and we want to rollback the PC
553 curr_pc
= vcpu
->arch
.pc
;
554 update_pc(&vcpu
->arch
);
556 op8
= (inst
.word
>> 24) & 0xff;
557 run
->mmio
.phys_addr
= vcpu
->arch
.badv
;
558 ret
= EMULATE_DO_MMIO
;
560 case 0x24 ... 0x27: /* stptr.w/d process */
561 rd
= inst
.reg2i14_format
.rd
;
562 opcode
= inst
.reg2i14_format
.opcode
;
567 *(unsigned int *)data
= vcpu
->arch
.gprs
[rd
];
571 *(unsigned long *)data
= vcpu
->arch
.gprs
[rd
];
578 case 0x28 ... 0x2e: /* st.b/h/w/d process */
579 rd
= inst
.reg2i12_format
.rd
;
580 opcode
= inst
.reg2i12_format
.opcode
;
581 rd_val
= vcpu
->arch
.gprs
[rd
];
586 *(unsigned char *)data
= rd_val
;
590 *(unsigned short *)data
= rd_val
;
594 *(unsigned int *)data
= rd_val
;
598 *(unsigned long *)data
= rd_val
;
605 case 0x38: /* stx.b/h/w/d process */
606 rd
= inst
.reg3_format
.rd
;
607 opcode
= inst
.reg3_format
.opcode
;
612 *(unsigned char *)data
= vcpu
->arch
.gprs
[rd
];
616 *(unsigned short *)data
= vcpu
->arch
.gprs
[rd
];
620 *(unsigned int *)data
= vcpu
->arch
.gprs
[rd
];
624 *(unsigned long *)data
= vcpu
->arch
.gprs
[rd
];
635 if (ret
== EMULATE_DO_MMIO
) {
636 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE
, run
->mmio
.len
, run
->mmio
.phys_addr
, data
);
639 * If mmio device such as PCH-PIC is emulated in KVM,
640 * it need not return to user space to handle the mmio
643 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
644 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, vcpu
->arch
.badv
, run
->mmio
.len
, data
);
645 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
649 run
->mmio
.is_write
= 1;
650 vcpu
->mmio_needed
= 1;
651 vcpu
->mmio_is_write
= 1;
652 return EMULATE_DO_MMIO
;
655 vcpu
->arch
.pc
= curr_pc
;
656 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
657 inst
.word
, vcpu
->arch
.pc
, vcpu
->arch
.badv
);
658 kvm_arch_vcpu_dump_regs(vcpu
);
659 /* Rollback PC if emulation was unsuccessful */
664 static int kvm_handle_rdwr_fault(struct kvm_vcpu
*vcpu
, bool write
)
668 enum emulation_result er
= EMULATE_DONE
;
669 struct kvm_run
*run
= vcpu
->run
;
670 unsigned long badv
= vcpu
->arch
.badv
;
672 ret
= kvm_handle_mm_fault(vcpu
, badv
, write
);
675 inst
.word
= vcpu
->arch
.badi
;
677 er
= kvm_emu_mmio_write(vcpu
, inst
);
679 /* A code fetch fault doesn't count as an MMIO */
680 if (kvm_is_ifetch_fault(&vcpu
->arch
)) {
681 kvm_queue_exception(vcpu
, EXCCODE_ADE
, EXSUBCODE_ADEF
);
685 er
= kvm_emu_mmio_read(vcpu
, inst
);
689 if (er
== EMULATE_DONE
) {
691 } else if (er
== EMULATE_DO_MMIO
) {
692 run
->exit_reason
= KVM_EXIT_MMIO
;
695 kvm_queue_exception(vcpu
, EXCCODE_ADE
, EXSUBCODE_ADEM
);
702 static int kvm_handle_read_fault(struct kvm_vcpu
*vcpu
)
704 return kvm_handle_rdwr_fault(vcpu
, false);
707 static int kvm_handle_write_fault(struct kvm_vcpu
*vcpu
)
709 return kvm_handle_rdwr_fault(vcpu
, true);
712 int kvm_complete_user_service(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
714 update_pc(&vcpu
->arch
);
715 kvm_write_reg(vcpu
, LOONGARCH_GPR_A0
, run
->hypercall
.ret
);
721 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
722 * @vcpu: Virtual CPU context.
724 * Handle when the guest attempts to use fpu which hasn't been allowed
725 * by the root context.
727 static int kvm_handle_fpu_disabled(struct kvm_vcpu
*vcpu
)
729 struct kvm_run
*run
= vcpu
->run
;
731 if (!kvm_guest_has_fpu(&vcpu
->arch
)) {
732 kvm_queue_exception(vcpu
, EXCCODE_INE
, 0);
737 * If guest FPU not present, the FPU operation should have been
738 * treated as a reserved instruction!
739 * If FPU already in use, we shouldn't get this at all.
741 if (WARN_ON(vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
)) {
742 kvm_err("%s internal error\n", __func__
);
743 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
752 static long kvm_save_notify(struct kvm_vcpu
*vcpu
)
754 unsigned long id
, data
;
756 id
= kvm_read_reg(vcpu
, LOONGARCH_GPR_A1
);
757 data
= kvm_read_reg(vcpu
, LOONGARCH_GPR_A2
);
759 case BIT(KVM_FEATURE_STEAL_TIME
):
760 if (data
& ~(KVM_STEAL_PHYS_MASK
| KVM_STEAL_PHYS_VALID
))
761 return KVM_HCALL_INVALID_PARAMETER
;
763 vcpu
->arch
.st
.guest_addr
= data
;
764 if (!(data
& KVM_STEAL_PHYS_VALID
))
767 vcpu
->arch
.st
.last_steal
= current
->sched_info
.run_delay
;
768 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
771 return KVM_HCALL_INVALID_CODE
;
774 return KVM_HCALL_INVALID_CODE
;
778 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
779 * @vcpu: Virtual CPU context.
781 * Handle when the guest attempts to use LSX when it is disabled in the root
784 static int kvm_handle_lsx_disabled(struct kvm_vcpu
*vcpu
)
786 if (kvm_own_lsx(vcpu
))
787 kvm_queue_exception(vcpu
, EXCCODE_INE
, 0);
793 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
794 * @vcpu: Virtual CPU context.
796 * Handle when the guest attempts to use LASX when it is disabled in the root
799 static int kvm_handle_lasx_disabled(struct kvm_vcpu
*vcpu
)
801 if (kvm_own_lasx(vcpu
))
802 kvm_queue_exception(vcpu
, EXCCODE_INE
, 0);
807 static int kvm_handle_lbt_disabled(struct kvm_vcpu
*vcpu
)
809 if (kvm_own_lbt(vcpu
))
810 kvm_queue_exception(vcpu
, EXCCODE_INE
, 0);
815 static int kvm_send_pv_ipi(struct kvm_vcpu
*vcpu
)
817 unsigned int min
, cpu
, i
;
818 unsigned long ipi_bitmap
;
819 struct kvm_vcpu
*dest
;
821 min
= kvm_read_reg(vcpu
, LOONGARCH_GPR_A3
);
822 for (i
= 0; i
< 2; i
++, min
+= BITS_PER_LONG
) {
823 ipi_bitmap
= kvm_read_reg(vcpu
, LOONGARCH_GPR_A1
+ i
);
827 cpu
= find_first_bit((void *)&ipi_bitmap
, BITS_PER_LONG
);
828 while (cpu
< BITS_PER_LONG
) {
829 dest
= kvm_get_vcpu_by_cpuid(vcpu
->kvm
, cpu
+ min
);
830 cpu
= find_next_bit((void *)&ipi_bitmap
, BITS_PER_LONG
, cpu
+ 1);
834 /* Send SWI0 to dest vcpu to emulate IPI interrupt */
835 kvm_queue_irq(dest
, INT_SWI0
);
844 * Hypercall emulation always return to guest, Caller should check retval.
846 static void kvm_handle_service(struct kvm_vcpu
*vcpu
)
848 long ret
= KVM_HCALL_INVALID_CODE
;
849 unsigned long func
= kvm_read_reg(vcpu
, LOONGARCH_GPR_A0
);
852 case KVM_HCALL_FUNC_IPI
:
853 if (kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_IPI
)) {
854 kvm_send_pv_ipi(vcpu
);
855 ret
= KVM_HCALL_SUCCESS
;
858 case KVM_HCALL_FUNC_NOTIFY
:
859 if (kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
))
860 ret
= kvm_save_notify(vcpu
);
866 kvm_write_reg(vcpu
, LOONGARCH_GPR_A0
, ret
);
869 static int kvm_handle_hypercall(struct kvm_vcpu
*vcpu
)
875 inst
.word
= vcpu
->arch
.badi
;
876 code
= inst
.reg0i15_format
.immediate
;
880 case KVM_HCALL_SERVICE
:
881 vcpu
->stat
.hypercall_exits
++;
882 kvm_handle_service(vcpu
);
884 case KVM_HCALL_USER_SERVICE
:
885 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_USER_HCALL
)) {
886 kvm_write_reg(vcpu
, LOONGARCH_GPR_A0
, KVM_HCALL_INVALID_CODE
);
890 vcpu
->stat
.hypercall_exits
++;
891 vcpu
->run
->exit_reason
= KVM_EXIT_HYPERCALL
;
892 vcpu
->run
->hypercall
.nr
= KVM_HCALL_USER_SERVICE
;
893 vcpu
->run
->hypercall
.args
[0] = kvm_read_reg(vcpu
, LOONGARCH_GPR_A0
);
894 vcpu
->run
->hypercall
.args
[1] = kvm_read_reg(vcpu
, LOONGARCH_GPR_A1
);
895 vcpu
->run
->hypercall
.args
[2] = kvm_read_reg(vcpu
, LOONGARCH_GPR_A2
);
896 vcpu
->run
->hypercall
.args
[3] = kvm_read_reg(vcpu
, LOONGARCH_GPR_A3
);
897 vcpu
->run
->hypercall
.args
[4] = kvm_read_reg(vcpu
, LOONGARCH_GPR_A4
);
898 vcpu
->run
->hypercall
.args
[5] = kvm_read_reg(vcpu
, LOONGARCH_GPR_A5
);
899 vcpu
->run
->hypercall
.flags
= 0;
901 * Set invalid return value by default, let user-mode VMM modify it.
903 vcpu
->run
->hypercall
.ret
= KVM_HCALL_INVALID_CODE
;
906 case KVM_HCALL_SWDBG
:
907 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
908 if (vcpu
->guest_debug
& KVM_GUESTDBG_SW_BP_MASK
) {
909 vcpu
->run
->exit_reason
= KVM_EXIT_DEBUG
;
915 /* Treat it as noop intruction, only set return value */
916 kvm_write_reg(vcpu
, LOONGARCH_GPR_A0
, KVM_HCALL_INVALID_CODE
);
920 if (ret
== RESUME_GUEST
)
921 update_pc(&vcpu
->arch
);
927 * LoongArch KVM callback handling for unimplemented guest exiting
929 static int kvm_fault_ni(struct kvm_vcpu
*vcpu
)
931 unsigned int ecode
, inst
;
932 unsigned long estat
, badv
;
934 /* Fetch the instruction */
935 inst
= vcpu
->arch
.badi
;
936 badv
= vcpu
->arch
.badv
;
937 estat
= vcpu
->arch
.host_estat
;
938 ecode
= (estat
& CSR_ESTAT_EXC
) >> CSR_ESTAT_EXC_SHIFT
;
939 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
940 ecode
, vcpu
->arch
.pc
, inst
, badv
, read_gcsr_estat());
941 kvm_arch_vcpu_dump_regs(vcpu
);
942 kvm_queue_exception(vcpu
, EXCCODE_INE
, 0);
947 static exit_handle_fn kvm_fault_tables
[EXCCODE_INT_START
] = {
948 [0 ... EXCCODE_INT_START
- 1] = kvm_fault_ni
,
949 [EXCCODE_TLBI
] = kvm_handle_read_fault
,
950 [EXCCODE_TLBL
] = kvm_handle_read_fault
,
951 [EXCCODE_TLBS
] = kvm_handle_write_fault
,
952 [EXCCODE_TLBM
] = kvm_handle_write_fault
,
953 [EXCCODE_FPDIS
] = kvm_handle_fpu_disabled
,
954 [EXCCODE_LSXDIS
] = kvm_handle_lsx_disabled
,
955 [EXCCODE_LASXDIS
] = kvm_handle_lasx_disabled
,
956 [EXCCODE_BTDIS
] = kvm_handle_lbt_disabled
,
957 [EXCCODE_GSPR
] = kvm_handle_gspr
,
958 [EXCCODE_HVC
] = kvm_handle_hypercall
,
961 int kvm_handle_fault(struct kvm_vcpu
*vcpu
, int fault
)
963 return kvm_fault_tables
[fault
](vcpu
);