Merge tag 'rproc-v6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[linux.git] / arch / loongarch / kvm / exit.c
blobc1e8ec5b941b22cc32c0ed63e63d4fb6963c9559
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
26 int rd, rj;
27 unsigned int index, ret;
29 if (inst.reg2_format.opcode != cpucfg_op)
30 return EMULATE_FAIL;
32 rd = inst.reg2_format.rd;
33 rj = inst.reg2_format.rj;
34 ++vcpu->stat.cpucfg_exits;
35 index = vcpu->arch.gprs[rj];
38 * By LoongArch Reference Manual 2.2.10.5
39 * Return value is 0 for undefined CPUCFG index
41 * Disable preemption since hw gcsr is accessed
43 preempt_disable();
44 switch (index) {
45 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 break;
48 case CPUCFG_KVM_SIG:
49 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 break;
52 case CPUCFG_KVM_FEATURE:
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 vcpu->arch.gprs[rd] = ret;
55 break;
56 default:
57 vcpu->arch.gprs[rd] = 0;
58 break;
60 preempt_enable();
62 return EMULATE_DONE;
65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
67 unsigned long val = 0;
68 struct loongarch_csrs *csr = vcpu->arch.csr;
71 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 * For undefined CSR id, return value is 0
74 if (get_gcsr_flag(csrid) & SW_GCSR)
75 val = kvm_read_sw_gcsr(csr, csrid);
76 else
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
79 return val;
82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
84 unsigned long old = 0;
85 struct loongarch_csrs *csr = vcpu->arch.csr;
87 if (get_gcsr_flag(csrid) & SW_GCSR) {
88 old = kvm_read_sw_gcsr(csr, csrid);
89 kvm_write_sw_gcsr(csr, csrid, val);
90 } else
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
93 return old;
96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 unsigned long csr_mask, unsigned long val)
99 unsigned long old = 0;
100 struct loongarch_csrs *csr = vcpu->arch.csr;
102 if (get_gcsr_flag(csrid) & SW_GCSR) {
103 old = kvm_read_sw_gcsr(csr, csrid);
104 val = (old & ~csr_mask) | (val & csr_mask);
105 kvm_write_sw_gcsr(csr, csrid, val);
106 old = old & csr_mask;
107 } else
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
110 return old;
113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
115 unsigned int rd, rj, csrid;
116 unsigned long csr_mask, val = 0;
119 * CSR value mask imm
120 * rj = 0 means csrrd
121 * rj = 1 means csrwr
122 * rj != 0,1 means csrxchg
124 rd = inst.reg2csr_format.rd;
125 rj = inst.reg2csr_format.rj;
126 csrid = inst.reg2csr_format.csr;
128 if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 if (kvm_guest_has_pmu(&vcpu->arch)) {
130 vcpu->arch.pc -= 4;
131 kvm_make_request(KVM_REQ_PMU, vcpu);
132 return EMULATE_DONE;
136 /* Process CSR ops */
137 switch (rj) {
138 case 0: /* process csrrd */
139 val = kvm_emu_read_csr(vcpu, csrid);
140 vcpu->arch.gprs[rd] = val;
141 break;
142 case 1: /* process csrwr */
143 val = vcpu->arch.gprs[rd];
144 val = kvm_emu_write_csr(vcpu, csrid, val);
145 vcpu->arch.gprs[rd] = val;
146 break;
147 default: /* process csrxchg */
148 val = vcpu->arch.gprs[rd];
149 csr_mask = vcpu->arch.gprs[rj];
150 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 vcpu->arch.gprs[rd] = val;
154 return EMULATE_DONE;
157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
159 int idx, ret;
160 unsigned long *val;
161 u32 addr, rd, rj, opcode;
164 * Each IOCSR with different opcode
166 rd = inst.reg2_format.rd;
167 rj = inst.reg2_format.rj;
168 opcode = inst.reg2_format.opcode;
169 addr = vcpu->arch.gprs[rj];
170 run->iocsr_io.phys_addr = addr;
171 run->iocsr_io.is_write = 0;
172 val = &vcpu->arch.gprs[rd];
174 /* LoongArch is Little endian */
175 switch (opcode) {
176 case iocsrrdb_op:
177 run->iocsr_io.len = 1;
178 break;
179 case iocsrrdh_op:
180 run->iocsr_io.len = 2;
181 break;
182 case iocsrrdw_op:
183 run->iocsr_io.len = 4;
184 break;
185 case iocsrrdd_op:
186 run->iocsr_io.len = 8;
187 break;
188 case iocsrwrb_op:
189 run->iocsr_io.len = 1;
190 run->iocsr_io.is_write = 1;
191 break;
192 case iocsrwrh_op:
193 run->iocsr_io.len = 2;
194 run->iocsr_io.is_write = 1;
195 break;
196 case iocsrwrw_op:
197 run->iocsr_io.len = 4;
198 run->iocsr_io.is_write = 1;
199 break;
200 case iocsrwrd_op:
201 run->iocsr_io.len = 8;
202 run->iocsr_io.is_write = 1;
203 break;
204 default:
205 return EMULATE_FAIL;
208 if (run->iocsr_io.is_write) {
209 idx = srcu_read_lock(&vcpu->kvm->srcu);
210 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
211 srcu_read_unlock(&vcpu->kvm->srcu, idx);
212 if (ret == 0)
213 ret = EMULATE_DONE;
214 else {
215 ret = EMULATE_DO_IOCSR;
216 /* Save data and let user space to write it */
217 memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
219 trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
220 } else {
221 idx = srcu_read_lock(&vcpu->kvm->srcu);
222 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
223 srcu_read_unlock(&vcpu->kvm->srcu, idx);
224 if (ret == 0)
225 ret = EMULATE_DONE;
226 else {
227 ret = EMULATE_DO_IOCSR;
228 /* Save register id for iocsr read completion */
229 vcpu->arch.io_gpr = rd;
231 trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
234 return ret;
237 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
239 enum emulation_result er = EMULATE_DONE;
240 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
242 switch (run->iocsr_io.len) {
243 case 1:
244 *gpr = *(s8 *)run->iocsr_io.data;
245 break;
246 case 2:
247 *gpr = *(s16 *)run->iocsr_io.data;
248 break;
249 case 4:
250 *gpr = *(s32 *)run->iocsr_io.data;
251 break;
252 case 8:
253 *gpr = *(s64 *)run->iocsr_io.data;
254 break;
255 default:
256 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
257 run->iocsr_io.len, vcpu->arch.badv);
258 er = EMULATE_FAIL;
259 break;
262 return er;
265 int kvm_emu_idle(struct kvm_vcpu *vcpu)
267 ++vcpu->stat.idle_exits;
268 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
270 if (!kvm_arch_vcpu_runnable(vcpu))
271 kvm_vcpu_halt(vcpu);
273 return EMULATE_DONE;
276 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
278 unsigned long curr_pc;
279 larch_inst inst;
280 enum emulation_result er = EMULATE_DONE;
281 struct kvm_run *run = vcpu->run;
283 /* Fetch the instruction */
284 inst.word = vcpu->arch.badi;
285 curr_pc = vcpu->arch.pc;
286 update_pc(&vcpu->arch);
288 trace_kvm_exit_gspr(vcpu, inst.word);
289 er = EMULATE_FAIL;
290 switch (((inst.word >> 24) & 0xff)) {
291 case 0x0: /* CPUCFG GSPR */
292 er = kvm_emu_cpucfg(vcpu, inst);
293 break;
294 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
295 er = kvm_handle_csr(vcpu, inst);
296 break;
297 case 0x6: /* Cache, Idle and IOCSR GSPR */
298 switch (((inst.word >> 22) & 0x3ff)) {
299 case 0x18: /* Cache GSPR */
300 er = EMULATE_DONE;
301 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
302 break;
303 case 0x19: /* Idle/IOCSR GSPR */
304 switch (((inst.word >> 15) & 0x1ffff)) {
305 case 0xc90: /* IOCSR GSPR */
306 er = kvm_emu_iocsr(inst, run, vcpu);
307 break;
308 case 0xc91: /* Idle GSPR */
309 er = kvm_emu_idle(vcpu);
310 break;
311 default:
312 er = EMULATE_FAIL;
313 break;
315 break;
316 default:
317 er = EMULATE_FAIL;
318 break;
320 break;
321 default:
322 er = EMULATE_FAIL;
323 break;
326 /* Rollback PC only if emulation was unsuccessful */
327 if (er == EMULATE_FAIL) {
328 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
329 curr_pc, __func__, inst.word);
331 kvm_arch_vcpu_dump_regs(vcpu);
332 vcpu->arch.pc = curr_pc;
335 return er;
339 * Trigger GSPR:
340 * 1) Execute CPUCFG instruction;
341 * 2) Execute CACOP/IDLE instructions;
342 * 3) Access to unimplemented CSRs/IOCSRs.
344 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
346 int ret = RESUME_GUEST;
347 enum emulation_result er = EMULATE_DONE;
349 er = kvm_trap_handle_gspr(vcpu);
351 if (er == EMULATE_DONE) {
352 ret = RESUME_GUEST;
353 } else if (er == EMULATE_DO_MMIO) {
354 vcpu->run->exit_reason = KVM_EXIT_MMIO;
355 ret = RESUME_HOST;
356 } else if (er == EMULATE_DO_IOCSR) {
357 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
358 ret = RESUME_HOST;
359 } else {
360 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
361 ret = RESUME_GUEST;
364 return ret;
367 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
369 int idx, ret;
370 unsigned int op8, opcode, rd;
371 struct kvm_run *run = vcpu->run;
373 run->mmio.phys_addr = vcpu->arch.badv;
374 vcpu->mmio_needed = 2; /* signed */
375 op8 = (inst.word >> 24) & 0xff;
376 ret = EMULATE_DO_MMIO;
378 switch (op8) {
379 case 0x24 ... 0x27: /* ldptr.w/d process */
380 rd = inst.reg2i14_format.rd;
381 opcode = inst.reg2i14_format.opcode;
383 switch (opcode) {
384 case ldptrw_op:
385 run->mmio.len = 4;
386 break;
387 case ldptrd_op:
388 run->mmio.len = 8;
389 break;
390 default:
391 break;
393 break;
394 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
395 rd = inst.reg2i12_format.rd;
396 opcode = inst.reg2i12_format.opcode;
398 switch (opcode) {
399 case ldb_op:
400 run->mmio.len = 1;
401 break;
402 case ldbu_op:
403 vcpu->mmio_needed = 1; /* unsigned */
404 run->mmio.len = 1;
405 break;
406 case ldh_op:
407 run->mmio.len = 2;
408 break;
409 case ldhu_op:
410 vcpu->mmio_needed = 1; /* unsigned */
411 run->mmio.len = 2;
412 break;
413 case ldw_op:
414 run->mmio.len = 4;
415 break;
416 case ldwu_op:
417 vcpu->mmio_needed = 1; /* unsigned */
418 run->mmio.len = 4;
419 break;
420 case ldd_op:
421 run->mmio.len = 8;
422 break;
423 default:
424 ret = EMULATE_FAIL;
425 break;
427 break;
428 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
429 rd = inst.reg3_format.rd;
430 opcode = inst.reg3_format.opcode;
432 switch (opcode) {
433 case ldxb_op:
434 run->mmio.len = 1;
435 break;
436 case ldxbu_op:
437 run->mmio.len = 1;
438 vcpu->mmio_needed = 1; /* unsigned */
439 break;
440 case ldxh_op:
441 run->mmio.len = 2;
442 break;
443 case ldxhu_op:
444 run->mmio.len = 2;
445 vcpu->mmio_needed = 1; /* unsigned */
446 break;
447 case ldxw_op:
448 run->mmio.len = 4;
449 break;
450 case ldxwu_op:
451 run->mmio.len = 4;
452 vcpu->mmio_needed = 1; /* unsigned */
453 break;
454 case ldxd_op:
455 run->mmio.len = 8;
456 break;
457 default:
458 ret = EMULATE_FAIL;
459 break;
461 break;
462 default:
463 ret = EMULATE_FAIL;
466 if (ret == EMULATE_DO_MMIO) {
467 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
470 * If mmio device such as PCH-PIC is emulated in KVM,
471 * it need not return to user space to handle the mmio
472 * exception.
474 idx = srcu_read_lock(&vcpu->kvm->srcu);
475 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
476 run->mmio.len, &vcpu->arch.gprs[rd]);
477 srcu_read_unlock(&vcpu->kvm->srcu, idx);
478 if (!ret) {
479 update_pc(&vcpu->arch);
480 vcpu->mmio_needed = 0;
481 return EMULATE_DONE;
484 /* Set for kvm_complete_mmio_read() use */
485 vcpu->arch.io_gpr = rd;
486 run->mmio.is_write = 0;
487 vcpu->mmio_is_write = 0;
488 return EMULATE_DO_MMIO;
491 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
492 inst.word, vcpu->arch.pc, vcpu->arch.badv);
493 kvm_arch_vcpu_dump_regs(vcpu);
494 vcpu->mmio_needed = 0;
496 return ret;
499 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
501 enum emulation_result er = EMULATE_DONE;
502 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
504 /* Update with new PC */
505 update_pc(&vcpu->arch);
506 switch (run->mmio.len) {
507 case 1:
508 if (vcpu->mmio_needed == 2)
509 *gpr = *(s8 *)run->mmio.data;
510 else
511 *gpr = *(u8 *)run->mmio.data;
512 break;
513 case 2:
514 if (vcpu->mmio_needed == 2)
515 *gpr = *(s16 *)run->mmio.data;
516 else
517 *gpr = *(u16 *)run->mmio.data;
518 break;
519 case 4:
520 if (vcpu->mmio_needed == 2)
521 *gpr = *(s32 *)run->mmio.data;
522 else
523 *gpr = *(u32 *)run->mmio.data;
524 break;
525 case 8:
526 *gpr = *(s64 *)run->mmio.data;
527 break;
528 default:
529 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
530 run->mmio.len, vcpu->arch.badv);
531 er = EMULATE_FAIL;
532 break;
535 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
536 run->mmio.phys_addr, run->mmio.data);
538 return er;
541 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
543 int idx, ret;
544 unsigned int rd, op8, opcode;
545 unsigned long curr_pc, rd_val = 0;
546 struct kvm_run *run = vcpu->run;
547 void *data = run->mmio.data;
550 * Update PC and hold onto current PC in case there is
551 * an error and we want to rollback the PC
553 curr_pc = vcpu->arch.pc;
554 update_pc(&vcpu->arch);
556 op8 = (inst.word >> 24) & 0xff;
557 run->mmio.phys_addr = vcpu->arch.badv;
558 ret = EMULATE_DO_MMIO;
559 switch (op8) {
560 case 0x24 ... 0x27: /* stptr.w/d process */
561 rd = inst.reg2i14_format.rd;
562 opcode = inst.reg2i14_format.opcode;
564 switch (opcode) {
565 case stptrw_op:
566 run->mmio.len = 4;
567 *(unsigned int *)data = vcpu->arch.gprs[rd];
568 break;
569 case stptrd_op:
570 run->mmio.len = 8;
571 *(unsigned long *)data = vcpu->arch.gprs[rd];
572 break;
573 default:
574 ret = EMULATE_FAIL;
575 break;
577 break;
578 case 0x28 ... 0x2e: /* st.b/h/w/d process */
579 rd = inst.reg2i12_format.rd;
580 opcode = inst.reg2i12_format.opcode;
581 rd_val = vcpu->arch.gprs[rd];
583 switch (opcode) {
584 case stb_op:
585 run->mmio.len = 1;
586 *(unsigned char *)data = rd_val;
587 break;
588 case sth_op:
589 run->mmio.len = 2;
590 *(unsigned short *)data = rd_val;
591 break;
592 case stw_op:
593 run->mmio.len = 4;
594 *(unsigned int *)data = rd_val;
595 break;
596 case std_op:
597 run->mmio.len = 8;
598 *(unsigned long *)data = rd_val;
599 break;
600 default:
601 ret = EMULATE_FAIL;
602 break;
604 break;
605 case 0x38: /* stx.b/h/w/d process */
606 rd = inst.reg3_format.rd;
607 opcode = inst.reg3_format.opcode;
609 switch (opcode) {
610 case stxb_op:
611 run->mmio.len = 1;
612 *(unsigned char *)data = vcpu->arch.gprs[rd];
613 break;
614 case stxh_op:
615 run->mmio.len = 2;
616 *(unsigned short *)data = vcpu->arch.gprs[rd];
617 break;
618 case stxw_op:
619 run->mmio.len = 4;
620 *(unsigned int *)data = vcpu->arch.gprs[rd];
621 break;
622 case stxd_op:
623 run->mmio.len = 8;
624 *(unsigned long *)data = vcpu->arch.gprs[rd];
625 break;
626 default:
627 ret = EMULATE_FAIL;
628 break;
630 break;
631 default:
632 ret = EMULATE_FAIL;
635 if (ret == EMULATE_DO_MMIO) {
636 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
639 * If mmio device such as PCH-PIC is emulated in KVM,
640 * it need not return to user space to handle the mmio
641 * exception.
643 idx = srcu_read_lock(&vcpu->kvm->srcu);
644 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
645 srcu_read_unlock(&vcpu->kvm->srcu, idx);
646 if (!ret)
647 return EMULATE_DONE;
649 run->mmio.is_write = 1;
650 vcpu->mmio_needed = 1;
651 vcpu->mmio_is_write = 1;
652 return EMULATE_DO_MMIO;
655 vcpu->arch.pc = curr_pc;
656 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
657 inst.word, vcpu->arch.pc, vcpu->arch.badv);
658 kvm_arch_vcpu_dump_regs(vcpu);
659 /* Rollback PC if emulation was unsuccessful */
661 return ret;
664 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
666 int ret;
667 larch_inst inst;
668 enum emulation_result er = EMULATE_DONE;
669 struct kvm_run *run = vcpu->run;
670 unsigned long badv = vcpu->arch.badv;
672 ret = kvm_handle_mm_fault(vcpu, badv, write);
673 if (ret) {
674 /* Treat as MMIO */
675 inst.word = vcpu->arch.badi;
676 if (write) {
677 er = kvm_emu_mmio_write(vcpu, inst);
678 } else {
679 /* A code fetch fault doesn't count as an MMIO */
680 if (kvm_is_ifetch_fault(&vcpu->arch)) {
681 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
682 return RESUME_GUEST;
685 er = kvm_emu_mmio_read(vcpu, inst);
689 if (er == EMULATE_DONE) {
690 ret = RESUME_GUEST;
691 } else if (er == EMULATE_DO_MMIO) {
692 run->exit_reason = KVM_EXIT_MMIO;
693 ret = RESUME_HOST;
694 } else {
695 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
696 ret = RESUME_GUEST;
699 return ret;
702 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
704 return kvm_handle_rdwr_fault(vcpu, false);
707 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
709 return kvm_handle_rdwr_fault(vcpu, true);
712 int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
714 update_pc(&vcpu->arch);
715 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
717 return 0;
721 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
722 * @vcpu: Virtual CPU context.
724 * Handle when the guest attempts to use fpu which hasn't been allowed
725 * by the root context.
727 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
729 struct kvm_run *run = vcpu->run;
731 if (!kvm_guest_has_fpu(&vcpu->arch)) {
732 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
733 return RESUME_GUEST;
737 * If guest FPU not present, the FPU operation should have been
738 * treated as a reserved instruction!
739 * If FPU already in use, we shouldn't get this at all.
741 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
742 kvm_err("%s internal error\n", __func__);
743 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
744 return RESUME_HOST;
747 kvm_own_fpu(vcpu);
749 return RESUME_GUEST;
752 static long kvm_save_notify(struct kvm_vcpu *vcpu)
754 unsigned long id, data;
756 id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
757 data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
758 switch (id) {
759 case BIT(KVM_FEATURE_STEAL_TIME):
760 if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
761 return KVM_HCALL_INVALID_PARAMETER;
763 vcpu->arch.st.guest_addr = data;
764 if (!(data & KVM_STEAL_PHYS_VALID))
765 return 0;
767 vcpu->arch.st.last_steal = current->sched_info.run_delay;
768 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
769 return 0;
770 default:
771 return KVM_HCALL_INVALID_CODE;
774 return KVM_HCALL_INVALID_CODE;
778 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
779 * @vcpu: Virtual CPU context.
781 * Handle when the guest attempts to use LSX when it is disabled in the root
782 * context.
784 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
786 if (kvm_own_lsx(vcpu))
787 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
789 return RESUME_GUEST;
793 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
794 * @vcpu: Virtual CPU context.
796 * Handle when the guest attempts to use LASX when it is disabled in the root
797 * context.
799 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
801 if (kvm_own_lasx(vcpu))
802 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
804 return RESUME_GUEST;
807 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
809 if (kvm_own_lbt(vcpu))
810 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
812 return RESUME_GUEST;
815 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
817 unsigned int min, cpu, i;
818 unsigned long ipi_bitmap;
819 struct kvm_vcpu *dest;
821 min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
822 for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
823 ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
824 if (!ipi_bitmap)
825 continue;
827 cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
828 while (cpu < BITS_PER_LONG) {
829 dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
830 cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
831 if (!dest)
832 continue;
834 /* Send SWI0 to dest vcpu to emulate IPI interrupt */
835 kvm_queue_irq(dest, INT_SWI0);
836 kvm_vcpu_kick(dest);
840 return 0;
844 * Hypercall emulation always return to guest, Caller should check retval.
846 static void kvm_handle_service(struct kvm_vcpu *vcpu)
848 long ret = KVM_HCALL_INVALID_CODE;
849 unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
851 switch (func) {
852 case KVM_HCALL_FUNC_IPI:
853 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
854 kvm_send_pv_ipi(vcpu);
855 ret = KVM_HCALL_SUCCESS;
857 break;
858 case KVM_HCALL_FUNC_NOTIFY:
859 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
860 ret = kvm_save_notify(vcpu);
861 break;
862 default:
863 break;
866 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
869 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
871 int ret;
872 larch_inst inst;
873 unsigned int code;
875 inst.word = vcpu->arch.badi;
876 code = inst.reg0i15_format.immediate;
877 ret = RESUME_GUEST;
879 switch (code) {
880 case KVM_HCALL_SERVICE:
881 vcpu->stat.hypercall_exits++;
882 kvm_handle_service(vcpu);
883 break;
884 case KVM_HCALL_USER_SERVICE:
885 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
886 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
887 break;
890 vcpu->stat.hypercall_exits++;
891 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
892 vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
893 vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
894 vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
895 vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
896 vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
897 vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
898 vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
899 vcpu->run->hypercall.flags = 0;
901 * Set invalid return value by default, let user-mode VMM modify it.
903 vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
904 ret = RESUME_HOST;
905 break;
906 case KVM_HCALL_SWDBG:
907 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
908 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
909 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
910 ret = RESUME_HOST;
911 break;
913 fallthrough;
914 default:
915 /* Treat it as noop intruction, only set return value */
916 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
917 break;
920 if (ret == RESUME_GUEST)
921 update_pc(&vcpu->arch);
923 return ret;
927 * LoongArch KVM callback handling for unimplemented guest exiting
929 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
931 unsigned int ecode, inst;
932 unsigned long estat, badv;
934 /* Fetch the instruction */
935 inst = vcpu->arch.badi;
936 badv = vcpu->arch.badv;
937 estat = vcpu->arch.host_estat;
938 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
939 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
940 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
941 kvm_arch_vcpu_dump_regs(vcpu);
942 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
944 return RESUME_GUEST;
947 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
948 [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
949 [EXCCODE_TLBI] = kvm_handle_read_fault,
950 [EXCCODE_TLBL] = kvm_handle_read_fault,
951 [EXCCODE_TLBS] = kvm_handle_write_fault,
952 [EXCCODE_TLBM] = kvm_handle_write_fault,
953 [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
954 [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
955 [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
956 [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
957 [EXCCODE_GSPR] = kvm_handle_gspr,
958 [EXCCODE_HVC] = kvm_handle_hypercall,
961 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
963 return kvm_fault_tables[fault](vcpu);