1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (c) 2022 Ventana Micro Systems Inc.
7 #include <linux/bitops.h>
8 #include <linux/kvm_host.h>
10 #include <asm/cpufeature.h>
12 #define INSN_OPCODE_MASK 0x007c
13 #define INSN_OPCODE_SHIFT 2
14 #define INSN_OPCODE_SYSTEM 28
16 #define INSN_MASK_WFI 0xffffffff
17 #define INSN_MATCH_WFI 0x10500073
19 #define INSN_MASK_WRS 0xffffffff
20 #define INSN_MATCH_WRS 0x00d00073
22 #define INSN_MATCH_CSRRW 0x1073
23 #define INSN_MASK_CSRRW 0x707f
24 #define INSN_MATCH_CSRRS 0x2073
25 #define INSN_MASK_CSRRS 0x707f
26 #define INSN_MATCH_CSRRC 0x3073
27 #define INSN_MASK_CSRRC 0x707f
28 #define INSN_MATCH_CSRRWI 0x5073
29 #define INSN_MASK_CSRRWI 0x707f
30 #define INSN_MATCH_CSRRSI 0x6073
31 #define INSN_MASK_CSRRSI 0x707f
32 #define INSN_MATCH_CSRRCI 0x7073
33 #define INSN_MASK_CSRRCI 0x707f
35 #define INSN_MATCH_LB 0x3
36 #define INSN_MASK_LB 0x707f
37 #define INSN_MATCH_LH 0x1003
38 #define INSN_MASK_LH 0x707f
39 #define INSN_MATCH_LW 0x2003
40 #define INSN_MASK_LW 0x707f
41 #define INSN_MATCH_LD 0x3003
42 #define INSN_MASK_LD 0x707f
43 #define INSN_MATCH_LBU 0x4003
44 #define INSN_MASK_LBU 0x707f
45 #define INSN_MATCH_LHU 0x5003
46 #define INSN_MASK_LHU 0x707f
47 #define INSN_MATCH_LWU 0x6003
48 #define INSN_MASK_LWU 0x707f
49 #define INSN_MATCH_SB 0x23
50 #define INSN_MASK_SB 0x707f
51 #define INSN_MATCH_SH 0x1023
52 #define INSN_MASK_SH 0x707f
53 #define INSN_MATCH_SW 0x2023
54 #define INSN_MASK_SW 0x707f
55 #define INSN_MATCH_SD 0x3023
56 #define INSN_MASK_SD 0x707f
58 #define INSN_MATCH_C_LD 0x6000
59 #define INSN_MASK_C_LD 0xe003
60 #define INSN_MATCH_C_SD 0xe000
61 #define INSN_MASK_C_SD 0xe003
62 #define INSN_MATCH_C_LW 0x4000
63 #define INSN_MASK_C_LW 0xe003
64 #define INSN_MATCH_C_SW 0xc000
65 #define INSN_MASK_C_SW 0xe003
66 #define INSN_MATCH_C_LDSP 0x6002
67 #define INSN_MASK_C_LDSP 0xe003
68 #define INSN_MATCH_C_SDSP 0xe002
69 #define INSN_MASK_C_SDSP 0xe003
70 #define INSN_MATCH_C_LWSP 0x4002
71 #define INSN_MASK_C_LWSP 0xe003
72 #define INSN_MATCH_C_SWSP 0xc002
73 #define INSN_MASK_C_SWSP 0xe003
75 #define INSN_16BIT_MASK 0x3
77 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
79 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
82 #define LOG_REGBYTES 3
84 #define LOG_REGBYTES 2
86 #define REGBYTES (1 << LOG_REGBYTES)
94 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
95 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
96 (RV_X(x, 10, 3) << 3) | \
98 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
100 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
101 (RV_X(x, 12, 1) << 5) | \
102 (RV_X(x, 2, 2) << 6))
103 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
104 (RV_X(x, 12, 1) << 5) | \
105 (RV_X(x, 2, 3) << 6))
106 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
107 (RV_X(x, 7, 2) << 6))
108 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
109 (RV_X(x, 7, 3) << 6))
110 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
111 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
112 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
114 #define SHIFT_RIGHT(x, y) \
115 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
118 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
120 #define REG_OFFSET(insn, pos) \
121 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
123 #define REG_PTR(insn, pos, regs) \
124 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
126 #define GET_FUNCT3(insn) (((insn) >> 12) & 7)
128 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
129 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
130 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
131 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
132 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
133 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
134 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
135 #define IMM_I(insn) ((s32)(insn) >> 20)
136 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
137 (s32)(((insn) >> 7) & 0x1f))
143 * Possible return values are as follows:
144 * 1) Returns < 0 for error case
145 * 2) Returns 0 for exit to user-space
146 * 3) Returns 1 to continue with next sepc
147 * 4) Returns 2 to continue with same sepc
148 * 5) Returns 3 to inject illegal instruction trap and continue
149 * 6) Returns 4 to inject virtual instruction trap and continue
151 * Use enum kvm_insn_return for return values
153 int (*func
)(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, ulong insn
);
156 static int truly_illegal_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
159 struct kvm_cpu_trap utrap
= { 0 };
161 /* Redirect trap to Guest VCPU */
162 utrap
.sepc
= vcpu
->arch
.guest_context
.sepc
;
163 utrap
.scause
= EXC_INST_ILLEGAL
;
167 kvm_riscv_vcpu_trap_redirect(vcpu
, &utrap
);
172 static int truly_virtual_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
175 struct kvm_cpu_trap utrap
= { 0 };
177 /* Redirect trap to Guest VCPU */
178 utrap
.sepc
= vcpu
->arch
.guest_context
.sepc
;
179 utrap
.scause
= EXC_VIRTUAL_INST_FAULT
;
183 kvm_riscv_vcpu_trap_redirect(vcpu
, &utrap
);
189 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
191 * @vcpu: The VCPU pointer
193 void kvm_riscv_vcpu_wfi(struct kvm_vcpu
*vcpu
)
195 if (!kvm_arch_vcpu_runnable(vcpu
)) {
196 kvm_vcpu_srcu_read_unlock(vcpu
);
198 kvm_vcpu_srcu_read_lock(vcpu
);
202 static int wfi_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, ulong insn
)
204 vcpu
->stat
.wfi_exit_stat
++;
205 kvm_riscv_vcpu_wfi(vcpu
);
206 return KVM_INSN_CONTINUE_NEXT_SEPC
;
209 static int wrs_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, ulong insn
)
211 vcpu
->stat
.wrs_exit_stat
++;
212 kvm_vcpu_on_spin(vcpu
, vcpu
->arch
.guest_context
.sstatus
& SR_SPP
);
213 return KVM_INSN_CONTINUE_NEXT_SEPC
;
220 * Possible return values are as same as "func" callback in
221 * "struct insn_func".
223 int (*func
)(struct kvm_vcpu
*vcpu
, unsigned int csr_num
,
224 unsigned long *val
, unsigned long new_val
,
225 unsigned long wr_mask
);
228 static int seed_csr_rmw(struct kvm_vcpu
*vcpu
, unsigned int csr_num
,
229 unsigned long *val
, unsigned long new_val
,
230 unsigned long wr_mask
)
232 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZKR
))
233 return KVM_INSN_ILLEGAL_TRAP
;
235 return KVM_INSN_EXIT_TO_USER_SPACE
;
238 static const struct csr_func csr_funcs
[] = {
239 KVM_RISCV_VCPU_AIA_CSR_FUNCS
240 KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
241 { .base
= CSR_SEED
, .count
= 1, .func
= seed_csr_rmw
},
245 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
246 * emulation or in-kernel emulation
248 * @vcpu: The VCPU pointer
249 * @run: The VCPU run struct containing the CSR data
251 * Returns > 0 upon failure and 0 upon success
253 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
257 if (vcpu
->arch
.csr_decode
.return_handled
)
259 vcpu
->arch
.csr_decode
.return_handled
= 1;
261 /* Update destination register for CSR reads */
262 insn
= vcpu
->arch
.csr_decode
.insn
;
263 if ((insn
>> SH_RD
) & MASK_RX
)
264 SET_RD(insn
, &vcpu
->arch
.guest_context
,
265 run
->riscv_csr
.ret_value
);
267 /* Move to next instruction */
268 vcpu
->arch
.guest_context
.sepc
+= INSN_LEN(insn
);
273 static int csr_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, ulong insn
)
275 int i
, rc
= KVM_INSN_ILLEGAL_TRAP
;
276 unsigned int csr_num
= insn
>> SH_RS2
;
277 unsigned int rs1_num
= (insn
>> SH_RS1
) & MASK_RX
;
278 ulong rs1_val
= GET_RS1(insn
, &vcpu
->arch
.guest_context
);
279 const struct csr_func
*tcfn
, *cfn
= NULL
;
280 ulong val
= 0, wr_mask
= 0, new_val
= 0;
282 /* Decode the CSR instruction */
283 switch (GET_FUNCT3(insn
)) {
284 case GET_FUNCT3(INSN_MATCH_CSRRW
):
288 case GET_FUNCT3(INSN_MATCH_CSRRS
):
292 case GET_FUNCT3(INSN_MATCH_CSRRC
):
296 case GET_FUNCT3(INSN_MATCH_CSRRWI
):
300 case GET_FUNCT3(INSN_MATCH_CSRRSI
):
304 case GET_FUNCT3(INSN_MATCH_CSRRCI
):
312 /* Save instruction decode info */
313 vcpu
->arch
.csr_decode
.insn
= insn
;
314 vcpu
->arch
.csr_decode
.return_handled
= 0;
316 /* Update CSR details in kvm_run struct */
317 run
->riscv_csr
.csr_num
= csr_num
;
318 run
->riscv_csr
.new_value
= new_val
;
319 run
->riscv_csr
.write_mask
= wr_mask
;
320 run
->riscv_csr
.ret_value
= 0;
322 /* Find in-kernel CSR function */
323 for (i
= 0; i
< ARRAY_SIZE(csr_funcs
); i
++) {
324 tcfn
= &csr_funcs
[i
];
325 if ((tcfn
->base
<= csr_num
) &&
326 (csr_num
< (tcfn
->base
+ tcfn
->count
))) {
332 /* First try in-kernel CSR emulation */
333 if (cfn
&& cfn
->func
) {
334 rc
= cfn
->func(vcpu
, csr_num
, &val
, new_val
, wr_mask
);
335 if (rc
> KVM_INSN_EXIT_TO_USER_SPACE
) {
336 if (rc
== KVM_INSN_CONTINUE_NEXT_SEPC
) {
337 run
->riscv_csr
.ret_value
= val
;
338 vcpu
->stat
.csr_exit_kernel
++;
339 kvm_riscv_vcpu_csr_return(vcpu
, run
);
340 rc
= KVM_INSN_CONTINUE_SAME_SEPC
;
346 /* Exit to user-space for CSR emulation */
347 if (rc
<= KVM_INSN_EXIT_TO_USER_SPACE
) {
348 vcpu
->stat
.csr_exit_user
++;
349 run
->exit_reason
= KVM_EXIT_RISCV_CSR
;
355 static const struct insn_func system_opcode_funcs
[] = {
357 .mask
= INSN_MASK_CSRRW
,
358 .match
= INSN_MATCH_CSRRW
,
362 .mask
= INSN_MASK_CSRRS
,
363 .match
= INSN_MATCH_CSRRS
,
367 .mask
= INSN_MASK_CSRRC
,
368 .match
= INSN_MATCH_CSRRC
,
372 .mask
= INSN_MASK_CSRRWI
,
373 .match
= INSN_MATCH_CSRRWI
,
377 .mask
= INSN_MASK_CSRRSI
,
378 .match
= INSN_MATCH_CSRRSI
,
382 .mask
= INSN_MASK_CSRRCI
,
383 .match
= INSN_MATCH_CSRRCI
,
387 .mask
= INSN_MASK_WFI
,
388 .match
= INSN_MATCH_WFI
,
392 .mask
= INSN_MASK_WRS
,
393 .match
= INSN_MATCH_WRS
,
398 static int system_opcode_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
401 int i
, rc
= KVM_INSN_ILLEGAL_TRAP
;
402 const struct insn_func
*ifn
;
404 for (i
= 0; i
< ARRAY_SIZE(system_opcode_funcs
); i
++) {
405 ifn
= &system_opcode_funcs
[i
];
406 if ((insn
& ifn
->mask
) == ifn
->match
) {
407 rc
= ifn
->func(vcpu
, run
, insn
);
413 case KVM_INSN_ILLEGAL_TRAP
:
414 return truly_illegal_insn(vcpu
, run
, insn
);
415 case KVM_INSN_VIRTUAL_TRAP
:
416 return truly_virtual_insn(vcpu
, run
, insn
);
417 case KVM_INSN_CONTINUE_NEXT_SEPC
:
418 vcpu
->arch
.guest_context
.sepc
+= INSN_LEN(insn
);
424 return (rc
<= 0) ? rc
: 1;
428 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
430 * @vcpu: The VCPU pointer
431 * @run: The VCPU run struct containing the mmio data
432 * @trap: Trap details
434 * Returns > 0 to continue run-loop
435 * Returns 0 to exit run-loop and handle in user-space.
436 * Returns < 0 to report failure and exit run-loop
438 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
439 struct kvm_cpu_trap
*trap
)
441 unsigned long insn
= trap
->stval
;
442 struct kvm_cpu_trap utrap
= { 0 };
443 struct kvm_cpu_context
*ct
;
445 if (unlikely(INSN_IS_16BIT(insn
))) {
447 ct
= &vcpu
->arch
.guest_context
;
448 insn
= kvm_riscv_vcpu_unpriv_read(vcpu
, true,
452 utrap
.sepc
= ct
->sepc
;
453 kvm_riscv_vcpu_trap_redirect(vcpu
, &utrap
);
457 if (INSN_IS_16BIT(insn
))
458 return truly_illegal_insn(vcpu
, run
, insn
);
461 switch ((insn
& INSN_OPCODE_MASK
) >> INSN_OPCODE_SHIFT
) {
462 case INSN_OPCODE_SYSTEM
:
463 return system_opcode_insn(vcpu
, run
, insn
);
465 return truly_illegal_insn(vcpu
, run
, insn
);
470 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
472 * @vcpu: The VCPU pointer
473 * @run: The VCPU run struct containing the mmio data
474 * @fault_addr: Guest physical address to load
475 * @htinst: Transformed encoding of the load instruction
477 * Returns > 0 to continue run-loop
478 * Returns 0 to exit run-loop and handle in user-space.
479 * Returns < 0 to report failure and exit run-loop
481 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
482 unsigned long fault_addr
,
483 unsigned long htinst
)
487 int shift
= 0, len
= 0, insn_len
= 0;
488 struct kvm_cpu_trap utrap
= { 0 };
489 struct kvm_cpu_context
*ct
= &vcpu
->arch
.guest_context
;
491 /* Determine trapped instruction */
494 * Bit[0] == 1 implies trapped instruction value is
495 * transformed instruction or custom instruction.
497 insn
= htinst
| INSN_16BIT_MASK
;
498 insn_len
= (htinst
& BIT(1)) ? INSN_LEN(insn
) : 2;
501 * Bit[0] == 0 implies trapped instruction value is
502 * zero or special value.
504 insn
= kvm_riscv_vcpu_unpriv_read(vcpu
, true, ct
->sepc
,
507 /* Redirect trap if we failed to read instruction */
508 utrap
.sepc
= ct
->sepc
;
509 kvm_riscv_vcpu_trap_redirect(vcpu
, &utrap
);
512 insn_len
= INSN_LEN(insn
);
515 /* Decode length of MMIO and shift */
516 if ((insn
& INSN_MASK_LW
) == INSN_MATCH_LW
) {
518 shift
= 8 * (sizeof(ulong
) - len
);
519 } else if ((insn
& INSN_MASK_LB
) == INSN_MATCH_LB
) {
521 shift
= 8 * (sizeof(ulong
) - len
);
522 } else if ((insn
& INSN_MASK_LBU
) == INSN_MATCH_LBU
) {
524 shift
= 8 * (sizeof(ulong
) - len
);
526 } else if ((insn
& INSN_MASK_LD
) == INSN_MATCH_LD
) {
528 shift
= 8 * (sizeof(ulong
) - len
);
529 } else if ((insn
& INSN_MASK_LWU
) == INSN_MATCH_LWU
) {
532 } else if ((insn
& INSN_MASK_LH
) == INSN_MATCH_LH
) {
534 shift
= 8 * (sizeof(ulong
) - len
);
535 } else if ((insn
& INSN_MASK_LHU
) == INSN_MATCH_LHU
) {
538 } else if ((insn
& INSN_MASK_C_LD
) == INSN_MATCH_C_LD
) {
540 shift
= 8 * (sizeof(ulong
) - len
);
541 insn
= RVC_RS2S(insn
) << SH_RD
;
542 } else if ((insn
& INSN_MASK_C_LDSP
) == INSN_MATCH_C_LDSP
&&
543 ((insn
>> SH_RD
) & 0x1f)) {
545 shift
= 8 * (sizeof(ulong
) - len
);
547 } else if ((insn
& INSN_MASK_C_LW
) == INSN_MATCH_C_LW
) {
549 shift
= 8 * (sizeof(ulong
) - len
);
550 insn
= RVC_RS2S(insn
) << SH_RD
;
551 } else if ((insn
& INSN_MASK_C_LWSP
) == INSN_MATCH_C_LWSP
&&
552 ((insn
>> SH_RD
) & 0x1f)) {
554 shift
= 8 * (sizeof(ulong
) - len
);
559 /* Fault address should be aligned to length of MMIO */
560 if (fault_addr
& (len
- 1))
563 /* Save instruction decode info */
564 vcpu
->arch
.mmio_decode
.insn
= insn
;
565 vcpu
->arch
.mmio_decode
.insn_len
= insn_len
;
566 vcpu
->arch
.mmio_decode
.shift
= shift
;
567 vcpu
->arch
.mmio_decode
.len
= len
;
568 vcpu
->arch
.mmio_decode
.return_handled
= 0;
570 /* Update MMIO details in kvm_run struct */
571 run
->mmio
.is_write
= false;
572 run
->mmio
.phys_addr
= fault_addr
;
575 /* Try to handle MMIO access in the kernel */
576 if (!kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, fault_addr
, len
, data_buf
)) {
577 /* Successfully handled MMIO access in the kernel so resume */
578 memcpy(run
->mmio
.data
, data_buf
, len
);
579 vcpu
->stat
.mmio_exit_kernel
++;
580 kvm_riscv_vcpu_mmio_return(vcpu
, run
);
584 /* Exit to userspace for MMIO emulation */
585 vcpu
->stat
.mmio_exit_user
++;
586 run
->exit_reason
= KVM_EXIT_MMIO
;
592 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
594 * @vcpu: The VCPU pointer
595 * @run: The VCPU run struct containing the mmio data
596 * @fault_addr: Guest physical address to store
597 * @htinst: Transformed encoding of the store instruction
599 * Returns > 0 to continue run-loop
600 * Returns 0 to exit run-loop and handle in user-space.
601 * Returns < 0 to report failure and exit run-loop
603 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
604 unsigned long fault_addr
,
605 unsigned long htinst
)
613 int len
= 0, insn_len
= 0;
614 struct kvm_cpu_trap utrap
= { 0 };
615 struct kvm_cpu_context
*ct
= &vcpu
->arch
.guest_context
;
617 /* Determine trapped instruction */
620 * Bit[0] == 1 implies trapped instruction value is
621 * transformed instruction or custom instruction.
623 insn
= htinst
| INSN_16BIT_MASK
;
624 insn_len
= (htinst
& BIT(1)) ? INSN_LEN(insn
) : 2;
627 * Bit[0] == 0 implies trapped instruction value is
628 * zero or special value.
630 insn
= kvm_riscv_vcpu_unpriv_read(vcpu
, true, ct
->sepc
,
633 /* Redirect trap if we failed to read instruction */
634 utrap
.sepc
= ct
->sepc
;
635 kvm_riscv_vcpu_trap_redirect(vcpu
, &utrap
);
638 insn_len
= INSN_LEN(insn
);
641 data
= GET_RS2(insn
, &vcpu
->arch
.guest_context
);
642 data8
= data16
= data32
= data64
= data
;
644 if ((insn
& INSN_MASK_SW
) == INSN_MATCH_SW
) {
646 } else if ((insn
& INSN_MASK_SB
) == INSN_MATCH_SB
) {
649 } else if ((insn
& INSN_MASK_SD
) == INSN_MATCH_SD
) {
652 } else if ((insn
& INSN_MASK_SH
) == INSN_MATCH_SH
) {
655 } else if ((insn
& INSN_MASK_C_SD
) == INSN_MATCH_C_SD
) {
657 data64
= GET_RS2S(insn
, &vcpu
->arch
.guest_context
);
658 } else if ((insn
& INSN_MASK_C_SDSP
) == INSN_MATCH_C_SDSP
&&
659 ((insn
>> SH_RD
) & 0x1f)) {
661 data64
= GET_RS2C(insn
, &vcpu
->arch
.guest_context
);
663 } else if ((insn
& INSN_MASK_C_SW
) == INSN_MATCH_C_SW
) {
665 data32
= GET_RS2S(insn
, &vcpu
->arch
.guest_context
);
666 } else if ((insn
& INSN_MASK_C_SWSP
) == INSN_MATCH_C_SWSP
&&
667 ((insn
>> SH_RD
) & 0x1f)) {
669 data32
= GET_RS2C(insn
, &vcpu
->arch
.guest_context
);
674 /* Fault address should be aligned to length of MMIO */
675 if (fault_addr
& (len
- 1))
678 /* Save instruction decode info */
679 vcpu
->arch
.mmio_decode
.insn
= insn
;
680 vcpu
->arch
.mmio_decode
.insn_len
= insn_len
;
681 vcpu
->arch
.mmio_decode
.shift
= 0;
682 vcpu
->arch
.mmio_decode
.len
= len
;
683 vcpu
->arch
.mmio_decode
.return_handled
= 0;
685 /* Copy data to kvm_run instance */
688 *((u8
*)run
->mmio
.data
) = data8
;
691 *((u16
*)run
->mmio
.data
) = data16
;
694 *((u32
*)run
->mmio
.data
) = data32
;
697 *((u64
*)run
->mmio
.data
) = data64
;
703 /* Update MMIO details in kvm_run struct */
704 run
->mmio
.is_write
= true;
705 run
->mmio
.phys_addr
= fault_addr
;
708 /* Try to handle MMIO access in the kernel */
709 if (!kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
,
710 fault_addr
, len
, run
->mmio
.data
)) {
711 /* Successfully handled MMIO access in the kernel so resume */
712 vcpu
->stat
.mmio_exit_kernel
++;
713 kvm_riscv_vcpu_mmio_return(vcpu
, run
);
717 /* Exit to userspace for MMIO emulation */
718 vcpu
->stat
.mmio_exit_user
++;
719 run
->exit_reason
= KVM_EXIT_MMIO
;
725 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
726 * or in-kernel IO emulation
728 * @vcpu: The VCPU pointer
729 * @run: The VCPU run struct containing the mmio data
731 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
740 if (vcpu
->arch
.mmio_decode
.return_handled
)
743 vcpu
->arch
.mmio_decode
.return_handled
= 1;
744 insn
= vcpu
->arch
.mmio_decode
.insn
;
746 if (run
->mmio
.is_write
)
749 len
= vcpu
->arch
.mmio_decode
.len
;
750 shift
= vcpu
->arch
.mmio_decode
.shift
;
754 data8
= *((u8
*)run
->mmio
.data
);
755 SET_RD(insn
, &vcpu
->arch
.guest_context
,
756 (ulong
)data8
<< shift
>> shift
);
759 data16
= *((u16
*)run
->mmio
.data
);
760 SET_RD(insn
, &vcpu
->arch
.guest_context
,
761 (ulong
)data16
<< shift
>> shift
);
764 data32
= *((u32
*)run
->mmio
.data
);
765 SET_RD(insn
, &vcpu
->arch
.guest_context
,
766 (ulong
)data32
<< shift
>> shift
);
769 data64
= *((u64
*)run
->mmio
.data
);
770 SET_RD(insn
, &vcpu
->arch
.guest_context
,
771 (ulong
)data64
<< shift
>> shift
);
778 /* Move to next instruction */
779 vcpu
->arch
.guest_context
.sepc
+= vcpu
->arch
.mmio_decode
.insn_len
;