1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
14 #define CREATE_TRACE_POINTS
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU
, int_exits
),
20 STATS_DESC_COUNTER(VCPU
, idle_exits
),
21 STATS_DESC_COUNTER(VCPU
, cpucfg_exits
),
22 STATS_DESC_COUNTER(VCPU
, signal_exits
),
23 STATS_DESC_COUNTER(VCPU
, hypercall_exits
)
26 const struct kvm_stats_header kvm_vcpu_stats_header
= {
27 .name_size
= KVM_STATS_NAME_SIZE
,
28 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
29 .id_offset
= sizeof(struct kvm_stats_header
),
30 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
31 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
32 sizeof(kvm_vcpu_stats_desc
),
35 static inline void kvm_save_host_pmu(struct kvm_vcpu
*vcpu
)
37 struct kvm_context
*context
;
39 context
= this_cpu_ptr(vcpu
->kvm
->arch
.vmcs
);
40 context
->perf_cntr
[0] = read_csr_perfcntr0();
41 context
->perf_cntr
[1] = read_csr_perfcntr1();
42 context
->perf_cntr
[2] = read_csr_perfcntr2();
43 context
->perf_cntr
[3] = read_csr_perfcntr3();
44 context
->perf_ctrl
[0] = write_csr_perfctrl0(0);
45 context
->perf_ctrl
[1] = write_csr_perfctrl1(0);
46 context
->perf_ctrl
[2] = write_csr_perfctrl2(0);
47 context
->perf_ctrl
[3] = write_csr_perfctrl3(0);
50 static inline void kvm_restore_host_pmu(struct kvm_vcpu
*vcpu
)
52 struct kvm_context
*context
;
54 context
= this_cpu_ptr(vcpu
->kvm
->arch
.vmcs
);
55 write_csr_perfcntr0(context
->perf_cntr
[0]);
56 write_csr_perfcntr1(context
->perf_cntr
[1]);
57 write_csr_perfcntr2(context
->perf_cntr
[2]);
58 write_csr_perfcntr3(context
->perf_cntr
[3]);
59 write_csr_perfctrl0(context
->perf_ctrl
[0]);
60 write_csr_perfctrl1(context
->perf_ctrl
[1]);
61 write_csr_perfctrl2(context
->perf_ctrl
[2]);
62 write_csr_perfctrl3(context
->perf_ctrl
[3]);
66 static inline void kvm_save_guest_pmu(struct kvm_vcpu
*vcpu
)
68 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
70 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR0
);
71 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR1
);
72 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR2
);
73 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR3
);
74 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
);
75 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
);
76 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
);
77 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu
*vcpu
)
82 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
84 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR0
);
85 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR1
);
86 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR2
);
87 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR3
);
88 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
);
89 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
);
90 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
);
91 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
94 static int kvm_own_pmu(struct kvm_vcpu
*vcpu
)
98 if (!kvm_guest_has_pmu(&vcpu
->arch
))
101 kvm_save_host_pmu(vcpu
);
103 /* Set PM0-PM(num) to guest */
104 val
= read_csr_gcfg() & ~CSR_GCFG_GPERF
;
105 val
|= (kvm_get_pmu_num(&vcpu
->arch
) + 1) << CSR_GCFG_GPERF_SHIFT
;
108 kvm_restore_guest_pmu(vcpu
);
113 static void kvm_lose_pmu(struct kvm_vcpu
*vcpu
)
116 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
118 if (!(vcpu
->arch
.aux_inuse
& KVM_LARCH_PMU
))
121 kvm_save_guest_pmu(vcpu
);
123 /* Disable pmu access from guest */
124 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF
);
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 * exiting the guest, so that the next time trap into the guest.
129 * We don't need to deal with PMU CSRs contexts.
131 val
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
);
132 val
|= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
);
133 val
|= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
);
134 val
|= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
135 if (!(val
& KVM_PMU_EVENT_ENABLED
))
136 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_PMU
;
138 kvm_restore_host_pmu(vcpu
);
141 static void kvm_restore_pmu(struct kvm_vcpu
*vcpu
)
143 if ((vcpu
->arch
.aux_inuse
& KVM_LARCH_PMU
))
144 kvm_make_request(KVM_REQ_PMU
, vcpu
);
147 static void kvm_check_pmu(struct kvm_vcpu
*vcpu
)
149 if (kvm_check_request(KVM_REQ_PMU
, vcpu
)) {
151 vcpu
->arch
.aux_inuse
|= KVM_LARCH_PMU
;
155 static void kvm_update_stolen_time(struct kvm_vcpu
*vcpu
)
160 struct kvm_memslots
*slots
;
161 struct kvm_steal_time __user
*st
;
162 struct gfn_to_hva_cache
*ghc
;
164 ghc
= &vcpu
->arch
.st
.cache
;
165 gpa
= vcpu
->arch
.st
.guest_addr
;
166 if (!(gpa
& KVM_STEAL_PHYS_VALID
))
169 gpa
&= KVM_STEAL_PHYS_MASK
;
170 slots
= kvm_memslots(vcpu
->kvm
);
171 if (slots
->generation
!= ghc
->generation
|| gpa
!= ghc
->gpa
) {
172 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
, ghc
, gpa
, sizeof(*st
))) {
173 ghc
->gpa
= INVALID_GPA
;
178 st
= (struct kvm_steal_time __user
*)ghc
->hva
;
179 unsafe_get_user(version
, &st
->version
, out
);
181 version
+= 1; /* first time write, random junk */
184 unsafe_put_user(version
, &st
->version
, out
);
187 unsafe_get_user(steal
, &st
->steal
, out
);
188 steal
+= current
->sched_info
.run_delay
- vcpu
->arch
.st
.last_steal
;
189 vcpu
->arch
.st
.last_steal
= current
->sched_info
.run_delay
;
190 unsafe_put_user(steal
, &st
->steal
, out
);
194 unsafe_put_user(version
, &st
->version
, out
);
196 mark_page_dirty_in_slot(vcpu
->kvm
, ghc
->memslot
, gpa_to_gfn(ghc
->gpa
));
200 * kvm_check_requests - check and handle pending vCPU requests
202 * Return: RESUME_GUEST if we should enter the guest
203 * RESUME_HOST if we should exit to userspace
205 static int kvm_check_requests(struct kvm_vcpu
*vcpu
)
207 if (!kvm_request_pending(vcpu
))
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
211 vcpu
->arch
.vpid
= 0; /* Drop vpid for this vCPU */
213 if (kvm_dirty_ring_check_request(vcpu
))
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE
, vcpu
))
217 kvm_update_stolen_time(vcpu
);
222 static void kvm_late_check_requests(struct kvm_vcpu
*vcpu
)
224 lockdep_assert_irqs_disabled();
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA
, vcpu
))
226 if (vcpu
->arch
.flush_gpa
!= INVALID_GPA
) {
227 kvm_flush_tlb_gpa(vcpu
, vcpu
->arch
.flush_gpa
);
228 vcpu
->arch
.flush_gpa
= INVALID_GPA
;
233 * Check and handle pending signal and vCPU requests etc
234 * Run with irq enabled and preempt enabled
236 * Return: RESUME_GUEST if we should enter the guest
237 * RESUME_HOST if we should exit to userspace
238 * < 0 if we should exit to userspace, where the return value
241 static int kvm_enter_guest_check(struct kvm_vcpu
*vcpu
)
246 * Check conditions before entering the guest
248 ret
= xfer_to_guest_mode_handle_work(vcpu
);
252 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
253 ret
= kvm_check_requests(vcpu
);
254 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
260 * Called with irq enabled
262 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
263 * Others if we should exit to userspace
265 static int kvm_pre_enter_guest(struct kvm_vcpu
*vcpu
)
270 ret
= kvm_enter_guest_check(vcpu
);
271 if (ret
!= RESUME_GUEST
)
275 * Handle vcpu timer, interrupts, check requests and
276 * check vmid before vcpu enter guest
279 kvm_deliver_intr(vcpu
);
280 kvm_deliver_exception(vcpu
);
281 /* Make sure the vcpu mode has been written */
282 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
283 kvm_check_vpid(vcpu
);
287 * Called after function kvm_check_vpid()
288 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
289 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
291 kvm_late_check_requests(vcpu
);
292 vcpu
->arch
.host_eentry
= csr_read64(LOONGARCH_CSR_EENTRY
);
293 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
294 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_SWCSR_LATEST
;
296 if (kvm_request_pending(vcpu
) || xfer_to_guest_mode_work_pending()) {
297 /* make sure the vcpu mode has been written */
298 smp_store_mb(vcpu
->mode
, OUTSIDE_GUEST_MODE
);
302 } while (ret
!= RESUME_GUEST
);
308 * Return 1 for resume guest and "<= 0" for resume host.
310 static int kvm_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
312 int ret
= RESUME_GUEST
;
313 unsigned long estat
= vcpu
->arch
.host_estat
;
314 u32 intr
= estat
& 0x1fff; /* Ignore NMI */
315 u32 ecode
= (estat
& CSR_ESTAT_EXC
) >> CSR_ESTAT_EXC_SHIFT
;
317 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
319 /* Set a default exit reason */
320 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
324 guest_timing_exit_irqoff();
325 guest_state_exit_irqoff();
328 trace_kvm_exit(vcpu
, ecode
);
330 ret
= kvm_handle_fault(vcpu
, ecode
);
332 WARN(!intr
, "vm exiting with suspicious irq\n");
333 ++vcpu
->stat
.int_exits
;
336 if (ret
== RESUME_GUEST
)
337 ret
= kvm_pre_enter_guest(vcpu
);
339 if (ret
!= RESUME_GUEST
) {
344 guest_timing_enter_irqoff();
345 guest_state_enter_irqoff();
346 trace_kvm_reenter(vcpu
);
351 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
353 return !!(vcpu
->arch
.irq_pending
) &&
354 vcpu
->arch
.mp_state
.mp_state
== KVM_MP_STATE_RUNNABLE
;
357 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
359 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
362 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
367 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
369 return VM_FAULT_SIGBUS
;
372 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
373 struct kvm_translation
*tr
)
378 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
382 /* Protect from TOD sync and vcpu_load/put() */
384 ret
= kvm_pending_timer(vcpu
) ||
385 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT
) & (1 << INT_TI
);
391 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
395 kvm_debug("vCPU Register Dump:\n");
396 kvm_debug("\tPC = 0x%08lx\n", vcpu
->arch
.pc
);
397 kvm_debug("\tExceptions: %08lx\n", vcpu
->arch
.irq_pending
);
399 for (i
= 0; i
< 32; i
+= 4) {
400 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i
,
401 vcpu
->arch
.gprs
[i
], vcpu
->arch
.gprs
[i
+ 1],
402 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
405 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
406 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD
),
407 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT
));
409 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA
));
414 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
415 struct kvm_mp_state
*mp_state
)
417 *mp_state
= vcpu
->arch
.mp_state
;
422 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
423 struct kvm_mp_state
*mp_state
)
427 switch (mp_state
->mp_state
) {
428 case KVM_MP_STATE_RUNNABLE
:
429 vcpu
->arch
.mp_state
= *mp_state
;
438 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
439 struct kvm_guest_debug
*dbg
)
441 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
)
444 if (dbg
->control
& KVM_GUESTDBG_ENABLE
)
445 vcpu
->guest_debug
= dbg
->control
;
447 vcpu
->guest_debug
= 0;
452 static inline int kvm_set_cpuid(struct kvm_vcpu
*vcpu
, u64 val
)
455 struct kvm_phyid_map
*map
;
456 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
458 if (val
>= KVM_MAX_PHYID
)
461 map
= vcpu
->kvm
->arch
.phyid_map
;
462 cpuid
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
464 spin_lock(&vcpu
->kvm
->arch
.phyid_map_lock
);
465 if ((cpuid
< KVM_MAX_PHYID
) && map
->phys_map
[cpuid
].enabled
) {
466 /* Discard duplicated CPUID set operation */
468 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
473 * CPUID is already set before
474 * Forbid changing to a different CPUID at runtime
476 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
480 if (map
->phys_map
[val
].enabled
) {
481 /* Discard duplicated CPUID set operation */
482 if (vcpu
== map
->phys_map
[val
].vcpu
) {
483 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
488 * New CPUID is already set with other vcpu
489 * Forbid sharing the same CPUID between different vcpus
491 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
495 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
, val
);
496 map
->phys_map
[val
].enabled
= true;
497 map
->phys_map
[val
].vcpu
= vcpu
;
498 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
503 static inline void kvm_drop_cpuid(struct kvm_vcpu
*vcpu
)
506 struct kvm_phyid_map
*map
;
507 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
509 map
= vcpu
->kvm
->arch
.phyid_map
;
510 cpuid
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
512 if (cpuid
>= KVM_MAX_PHYID
)
515 spin_lock(&vcpu
->kvm
->arch
.phyid_map_lock
);
516 if (map
->phys_map
[cpuid
].enabled
) {
517 map
->phys_map
[cpuid
].vcpu
= NULL
;
518 map
->phys_map
[cpuid
].enabled
= false;
519 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
, KVM_MAX_PHYID
);
521 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
524 struct kvm_vcpu
*kvm_get_vcpu_by_cpuid(struct kvm
*kvm
, int cpuid
)
526 struct kvm_phyid_map
*map
;
528 if (cpuid
>= KVM_MAX_PHYID
)
531 map
= kvm
->arch
.phyid_map
;
532 if (!map
->phys_map
[cpuid
].enabled
)
535 return map
->phys_map
[cpuid
].vcpu
;
538 static int _kvm_getcsr(struct kvm_vcpu
*vcpu
, unsigned int id
, u64
*val
)
541 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
543 if (get_gcsr_flag(id
) & INVALID_GCSR
)
546 if (id
== LOONGARCH_CSR_ESTAT
) {
550 * Sync pending interrupts into ESTAT so that interrupt
551 * remains during VM migration stage
553 kvm_deliver_intr(vcpu
);
554 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_SWCSR_LATEST
;
558 /* ESTAT IP0~IP7 get from GINTC */
559 gintc
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_GINTC
) & 0xff;
560 *val
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_ESTAT
) | (gintc
<< 2);
565 * Get software CSR state since software state is consistent
566 * with hardware for synchronous ioctl
568 *val
= kvm_read_sw_gcsr(csr
, id
);
573 static int _kvm_setcsr(struct kvm_vcpu
*vcpu
, unsigned int id
, u64 val
)
576 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
578 if (get_gcsr_flag(id
) & INVALID_GCSR
)
581 if (id
== LOONGARCH_CSR_CPUID
)
582 return kvm_set_cpuid(vcpu
, val
);
584 if (id
== LOONGARCH_CSR_ESTAT
) {
585 /* ESTAT IP0~IP7 inject through GINTC */
586 gintc
= (val
>> 2) & 0xff;
587 kvm_set_sw_gcsr(csr
, LOONGARCH_CSR_GINTC
, gintc
);
589 gintc
= val
& ~(0xffUL
<< 2);
590 kvm_set_sw_gcsr(csr
, LOONGARCH_CSR_ESTAT
, gintc
);
595 kvm_write_sw_gcsr(csr
, id
, val
);
598 * After modifying the PMU CSR register value of the vcpu.
599 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
601 if (id
>= LOONGARCH_CSR_PERFCTRL0
&& id
<= LOONGARCH_CSR_PERFCNTR3
) {
604 val
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
) |
605 kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
) |
606 kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
) |
607 kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
609 if (val
& KVM_PMU_EVENT_ENABLED
)
610 kvm_make_request(KVM_REQ_PMU
, vcpu
);
616 static int _kvm_get_cpucfg_mask(int id
, u64
*v
)
618 if (id
< 0 || id
>= KVM_MAX_CPUCFG_REGS
)
622 case LOONGARCH_CPUCFG0
:
625 case LOONGARCH_CPUCFG1
:
626 /* CPUCFG1_MSGINT is not supported by KVM */
629 case LOONGARCH_CPUCFG2
:
630 /* CPUCFG2 features unconditionally supported by KVM */
631 *v
= CPUCFG2_FP
| CPUCFG2_FPSP
| CPUCFG2_FPDP
|
632 CPUCFG2_FPVERS
| CPUCFG2_LLFTP
| CPUCFG2_LLFTPREV
|
633 CPUCFG2_LSPW
| CPUCFG2_LAM
;
635 * For the ISA extensions listed below, if one is supported
636 * by the host, then it is also supported by KVM.
646 if (cpu_has_lbt_mips
)
647 *v
|= CPUCFG2_MIPSBT
;
650 case LOONGARCH_CPUCFG3
:
653 case LOONGARCH_CPUCFG4
:
654 case LOONGARCH_CPUCFG5
:
657 case LOONGARCH_CPUCFG6
:
663 case LOONGARCH_CPUCFG16
:
666 case LOONGARCH_CPUCFG17
... LOONGARCH_CPUCFG20
:
671 * CPUCFG bits should be zero if reserved by HW or not
679 static int kvm_check_cpucfg(int id
, u64 val
)
684 ret
= _kvm_get_cpucfg_mask(id
, &mask
);
689 /* Unsupported features and/or the higher 32 bits should not be set */
693 case LOONGARCH_CPUCFG2
:
694 if (!(val
& CPUCFG2_LLFTP
))
695 /* Guests must have a constant timer */
697 if ((val
& CPUCFG2_FP
) && (!(val
& CPUCFG2_FPSP
) || !(val
& CPUCFG2_FPDP
)))
698 /* Single and double float point must both be set when FP is enabled */
700 if ((val
& CPUCFG2_LSX
) && !(val
& CPUCFG2_FP
))
701 /* LSX architecturally implies FP but val does not satisfy that */
703 if ((val
& CPUCFG2_LASX
) && !(val
& CPUCFG2_LSX
))
704 /* LASX architecturally implies LSX and FP but val does not satisfy that */
707 case LOONGARCH_CPUCFG6
:
708 if (val
& CPUCFG6_PMP
) {
709 u32 host
= read_cpucfg(LOONGARCH_CPUCFG6
);
710 if ((val
& CPUCFG6_PMBITS
) != (host
& CPUCFG6_PMBITS
))
712 if ((val
& CPUCFG6_PMNUM
) > (host
& CPUCFG6_PMNUM
))
714 if ((val
& CPUCFG6_UPM
) && !(host
& CPUCFG6_UPM
))
720 * Values for the other CPUCFG IDs are not being further validated
721 * besides the mask check above.
727 static int kvm_get_one_reg(struct kvm_vcpu
*vcpu
,
728 const struct kvm_one_reg
*reg
, u64
*v
)
731 u64 type
= reg
->id
& KVM_REG_LOONGARCH_MASK
;
734 case KVM_REG_LOONGARCH_CSR
:
735 id
= KVM_GET_IOC_CSR_IDX(reg
->id
);
736 ret
= _kvm_getcsr(vcpu
, id
, v
);
738 case KVM_REG_LOONGARCH_CPUCFG
:
739 id
= KVM_GET_IOC_CPUCFG_IDX(reg
->id
);
740 if (id
>= 0 && id
< KVM_MAX_CPUCFG_REGS
)
741 *v
= vcpu
->arch
.cpucfg
[id
];
745 case KVM_REG_LOONGARCH_LBT
:
746 if (!kvm_guest_has_lbt(&vcpu
->arch
))
750 case KVM_REG_LOONGARCH_LBT_SCR0
:
751 *v
= vcpu
->arch
.lbt
.scr0
;
753 case KVM_REG_LOONGARCH_LBT_SCR1
:
754 *v
= vcpu
->arch
.lbt
.scr1
;
756 case KVM_REG_LOONGARCH_LBT_SCR2
:
757 *v
= vcpu
->arch
.lbt
.scr2
;
759 case KVM_REG_LOONGARCH_LBT_SCR3
:
760 *v
= vcpu
->arch
.lbt
.scr3
;
762 case KVM_REG_LOONGARCH_LBT_EFLAGS
:
763 *v
= vcpu
->arch
.lbt
.eflags
;
765 case KVM_REG_LOONGARCH_LBT_FTOP
:
766 *v
= vcpu
->arch
.fpu
.ftop
;
773 case KVM_REG_LOONGARCH_KVM
:
775 case KVM_REG_LOONGARCH_COUNTER
:
776 *v
= drdtime() + vcpu
->kvm
->arch
.time_offset
;
778 case KVM_REG_LOONGARCH_DEBUG_INST
:
779 *v
= INSN_HVCL
| KVM_HCALL_SWDBG
;
794 static int kvm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
797 u64 v
, size
= reg
->id
& KVM_REG_SIZE_MASK
;
800 case KVM_REG_SIZE_U64
:
801 ret
= kvm_get_one_reg(vcpu
, reg
, &v
);
804 ret
= put_user(v
, (u64 __user
*)(long)reg
->addr
);
814 static int kvm_set_one_reg(struct kvm_vcpu
*vcpu
,
815 const struct kvm_one_reg
*reg
, u64 v
)
818 u64 type
= reg
->id
& KVM_REG_LOONGARCH_MASK
;
821 case KVM_REG_LOONGARCH_CSR
:
822 id
= KVM_GET_IOC_CSR_IDX(reg
->id
);
823 ret
= _kvm_setcsr(vcpu
, id
, v
);
825 case KVM_REG_LOONGARCH_CPUCFG
:
826 id
= KVM_GET_IOC_CPUCFG_IDX(reg
->id
);
827 ret
= kvm_check_cpucfg(id
, v
);
830 vcpu
->arch
.cpucfg
[id
] = (u32
)v
;
831 if (id
== LOONGARCH_CPUCFG6
)
832 vcpu
->arch
.max_pmu_csrid
=
833 LOONGARCH_CSR_PERFCTRL0
+ 2 * kvm_get_pmu_num(&vcpu
->arch
) + 1;
835 case KVM_REG_LOONGARCH_LBT
:
836 if (!kvm_guest_has_lbt(&vcpu
->arch
))
840 case KVM_REG_LOONGARCH_LBT_SCR0
:
841 vcpu
->arch
.lbt
.scr0
= v
;
843 case KVM_REG_LOONGARCH_LBT_SCR1
:
844 vcpu
->arch
.lbt
.scr1
= v
;
846 case KVM_REG_LOONGARCH_LBT_SCR2
:
847 vcpu
->arch
.lbt
.scr2
= v
;
849 case KVM_REG_LOONGARCH_LBT_SCR3
:
850 vcpu
->arch
.lbt
.scr3
= v
;
852 case KVM_REG_LOONGARCH_LBT_EFLAGS
:
853 vcpu
->arch
.lbt
.eflags
= v
;
855 case KVM_REG_LOONGARCH_LBT_FTOP
:
856 vcpu
->arch
.fpu
.ftop
= v
;
863 case KVM_REG_LOONGARCH_KVM
:
865 case KVM_REG_LOONGARCH_COUNTER
:
867 * gftoffset is relative with board, not vcpu
868 * only set for the first time for smp system
870 if (vcpu
->vcpu_id
== 0)
871 vcpu
->kvm
->arch
.time_offset
= (signed long)(v
- drdtime());
873 case KVM_REG_LOONGARCH_VCPU_RESET
:
874 vcpu
->arch
.st
.guest_addr
= 0;
875 memset(&vcpu
->arch
.irq_pending
, 0, sizeof(vcpu
->arch
.irq_pending
));
876 memset(&vcpu
->arch
.irq_clear
, 0, sizeof(vcpu
->arch
.irq_clear
));
891 static int kvm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
894 u64 v
, size
= reg
->id
& KVM_REG_SIZE_MASK
;
897 case KVM_REG_SIZE_U64
:
898 ret
= get_user(v
, (u64 __user
*)(long)reg
->addr
);
906 return kvm_set_one_reg(vcpu
, reg
, v
);
909 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
914 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
919 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
923 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
924 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
926 regs
->pc
= vcpu
->arch
.pc
;
931 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
935 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
936 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
938 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
939 vcpu
->arch
.pc
= regs
->pc
;
944 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
945 struct kvm_enable_cap
*cap
)
947 /* FPU is enabled by default, will support LSX/LASX later. */
951 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu
*vcpu
,
952 struct kvm_device_attr
*attr
)
954 switch (attr
->attr
) {
955 case LOONGARCH_CPUCFG2
:
956 case LOONGARCH_CPUCFG6
:
958 case CPUCFG_KVM_FEATURE
:
967 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu
*vcpu
,
968 struct kvm_device_attr
*attr
)
970 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
)
971 || attr
->attr
!= KVM_LOONGARCH_VCPU_PVTIME_GPA
)
977 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu
*vcpu
,
978 struct kvm_device_attr
*attr
)
982 switch (attr
->group
) {
983 case KVM_LOONGARCH_VCPU_CPUCFG
:
984 ret
= kvm_loongarch_cpucfg_has_attr(vcpu
, attr
);
986 case KVM_LOONGARCH_VCPU_PVTIME_CTRL
:
987 ret
= kvm_loongarch_pvtime_has_attr(vcpu
, attr
);
996 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu
*vcpu
,
997 struct kvm_device_attr
*attr
)
1001 uint64_t __user
*uaddr
= (uint64_t __user
*)attr
->addr
;
1003 switch (attr
->attr
) {
1004 case 0 ... (KVM_MAX_CPUCFG_REGS
- 1):
1005 ret
= _kvm_get_cpucfg_mask(attr
->attr
, &val
);
1009 case CPUCFG_KVM_FEATURE
:
1010 val
= vcpu
->kvm
->arch
.pv_features
& LOONGARCH_PV_FEAT_MASK
;
1016 put_user(val
, uaddr
);
1021 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu
*vcpu
,
1022 struct kvm_device_attr
*attr
)
1025 u64 __user
*user
= (u64 __user
*)attr
->addr
;
1027 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
)
1028 || attr
->attr
!= KVM_LOONGARCH_VCPU_PVTIME_GPA
)
1031 gpa
= vcpu
->arch
.st
.guest_addr
;
1032 if (put_user(gpa
, user
))
1038 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu
*vcpu
,
1039 struct kvm_device_attr
*attr
)
1043 switch (attr
->group
) {
1044 case KVM_LOONGARCH_VCPU_CPUCFG
:
1045 ret
= kvm_loongarch_cpucfg_get_attr(vcpu
, attr
);
1047 case KVM_LOONGARCH_VCPU_PVTIME_CTRL
:
1048 ret
= kvm_loongarch_pvtime_get_attr(vcpu
, attr
);
1057 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu
*vcpu
,
1058 struct kvm_device_attr
*attr
)
1061 u64 __user
*user
= (u64 __user
*)attr
->addr
;
1062 struct kvm
*kvm
= vcpu
->kvm
;
1064 switch (attr
->attr
) {
1065 case CPUCFG_KVM_FEATURE
:
1066 if (get_user(val
, user
))
1069 valid
= LOONGARCH_PV_FEAT_MASK
;
1073 /* All vCPUs need set the same PV features */
1074 if ((kvm
->arch
.pv_features
& LOONGARCH_PV_FEAT_UPDATED
)
1075 && ((kvm
->arch
.pv_features
& valid
) != val
))
1077 kvm
->arch
.pv_features
= val
| LOONGARCH_PV_FEAT_UPDATED
;
1084 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu
*vcpu
,
1085 struct kvm_device_attr
*attr
)
1088 u64 gpa
, __user
*user
= (u64 __user
*)attr
->addr
;
1089 struct kvm
*kvm
= vcpu
->kvm
;
1091 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
)
1092 || attr
->attr
!= KVM_LOONGARCH_VCPU_PVTIME_GPA
)
1095 if (get_user(gpa
, user
))
1098 if (gpa
& ~(KVM_STEAL_PHYS_MASK
| KVM_STEAL_PHYS_VALID
))
1101 if (!(gpa
& KVM_STEAL_PHYS_VALID
)) {
1102 vcpu
->arch
.st
.guest_addr
= gpa
;
1106 /* Check the address is in a valid memslot */
1107 idx
= srcu_read_lock(&kvm
->srcu
);
1108 if (kvm_is_error_hva(gfn_to_hva(kvm
, gpa
>> PAGE_SHIFT
)))
1110 srcu_read_unlock(&kvm
->srcu
, idx
);
1113 vcpu
->arch
.st
.guest_addr
= gpa
;
1114 vcpu
->arch
.st
.last_steal
= current
->sched_info
.run_delay
;
1115 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
1121 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu
*vcpu
,
1122 struct kvm_device_attr
*attr
)
1126 switch (attr
->group
) {
1127 case KVM_LOONGARCH_VCPU_CPUCFG
:
1128 ret
= kvm_loongarch_cpucfg_set_attr(vcpu
, attr
);
1130 case KVM_LOONGARCH_VCPU_PVTIME_CTRL
:
1131 ret
= kvm_loongarch_pvtime_set_attr(vcpu
, attr
);
1140 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1141 unsigned int ioctl
, unsigned long arg
)
1144 struct kvm_device_attr attr
;
1145 void __user
*argp
= (void __user
*)arg
;
1146 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1149 * Only software CSR should be modified
1151 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1152 * should be used. Since CSR registers owns by this vcpu, if switch
1153 * to other vcpus, other vcpus need reload CSR registers.
1155 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1156 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1157 * aux_inuse flag and reload CSR registers form software.
1161 case KVM_SET_ONE_REG
:
1162 case KVM_GET_ONE_REG
: {
1163 struct kvm_one_reg reg
;
1166 if (copy_from_user(®
, argp
, sizeof(reg
)))
1168 if (ioctl
== KVM_SET_ONE_REG
) {
1169 r
= kvm_set_reg(vcpu
, ®
);
1170 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_HWCSR_USABLE
;
1172 r
= kvm_get_reg(vcpu
, ®
);
1175 case KVM_ENABLE_CAP
: {
1176 struct kvm_enable_cap cap
;
1179 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1181 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1184 case KVM_HAS_DEVICE_ATTR
: {
1186 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1188 r
= kvm_loongarch_vcpu_has_attr(vcpu
, &attr
);
1191 case KVM_GET_DEVICE_ATTR
: {
1193 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1195 r
= kvm_loongarch_vcpu_get_attr(vcpu
, &attr
);
1198 case KVM_SET_DEVICE_ATTR
: {
1200 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1202 r
= kvm_loongarch_vcpu_set_attr(vcpu
, &attr
);
1213 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1217 fpu
->fcc
= vcpu
->arch
.fpu
.fcc
;
1218 fpu
->fcsr
= vcpu
->arch
.fpu
.fcsr
;
1219 for (i
= 0; i
< NUM_FPU_REGS
; i
++)
1220 memcpy(&fpu
->fpr
[i
], &vcpu
->arch
.fpu
.fpr
[i
], FPU_REG_WIDTH
/ 64);
1225 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1229 vcpu
->arch
.fpu
.fcc
= fpu
->fcc
;
1230 vcpu
->arch
.fpu
.fcsr
= fpu
->fcsr
;
1231 for (i
= 0; i
< NUM_FPU_REGS
; i
++)
1232 memcpy(&vcpu
->arch
.fpu
.fpr
[i
], &fpu
->fpr
[i
], FPU_REG_WIDTH
/ 64);
1237 #ifdef CONFIG_CPU_HAS_LBT
1238 int kvm_own_lbt(struct kvm_vcpu
*vcpu
)
1240 if (!kvm_guest_has_lbt(&vcpu
->arch
))
1244 set_csr_euen(CSR_EUEN_LBTEN
);
1245 _restore_lbt(&vcpu
->arch
.lbt
);
1246 vcpu
->arch
.aux_inuse
|= KVM_LARCH_LBT
;
1252 static void kvm_lose_lbt(struct kvm_vcpu
*vcpu
)
1255 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LBT
) {
1256 _save_lbt(&vcpu
->arch
.lbt
);
1257 clear_csr_euen(CSR_EUEN_LBTEN
);
1258 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_LBT
;
1263 static void kvm_check_fcsr(struct kvm_vcpu
*vcpu
, unsigned long fcsr
)
1266 * If TM is enabled, top register save/restore will
1267 * cause lbt exception, here enable lbt in advance
1269 if (fcsr
& FPU_CSR_TM
)
1273 static void kvm_check_fcsr_alive(struct kvm_vcpu
*vcpu
)
1275 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
) {
1276 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LBT
)
1278 kvm_check_fcsr(vcpu
, read_fcsr(LOONGARCH_FCSR0
));
1282 static inline void kvm_lose_lbt(struct kvm_vcpu
*vcpu
) { }
1283 static inline void kvm_check_fcsr(struct kvm_vcpu
*vcpu
, unsigned long fcsr
) { }
1284 static inline void kvm_check_fcsr_alive(struct kvm_vcpu
*vcpu
) { }
1287 /* Enable FPU and restore context */
1288 void kvm_own_fpu(struct kvm_vcpu
*vcpu
)
1293 * Enable FPU for guest
1294 * Set FR and FRE according to guest context
1296 kvm_check_fcsr(vcpu
, vcpu
->arch
.fpu
.fcsr
);
1297 set_csr_euen(CSR_EUEN_FPEN
);
1299 kvm_restore_fpu(&vcpu
->arch
.fpu
);
1300 vcpu
->arch
.aux_inuse
|= KVM_LARCH_FPU
;
1301 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_FPU
);
1306 #ifdef CONFIG_CPU_HAS_LSX
1307 /* Enable LSX and restore context */
1308 int kvm_own_lsx(struct kvm_vcpu
*vcpu
)
1310 if (!kvm_guest_has_fpu(&vcpu
->arch
) || !kvm_guest_has_lsx(&vcpu
->arch
))
1315 /* Enable LSX for guest */
1316 kvm_check_fcsr(vcpu
, vcpu
->arch
.fpu
.fcsr
);
1317 set_csr_euen(CSR_EUEN_LSXEN
| CSR_EUEN_FPEN
);
1318 switch (vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
) {
1321 * Guest FPU state already loaded,
1322 * only restore upper LSX state
1324 _restore_lsx_upper(&vcpu
->arch
.fpu
);
1327 /* Neither FP or LSX already active,
1328 * restore full LSX state
1330 kvm_restore_lsx(&vcpu
->arch
.fpu
);
1334 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_LSX
);
1335 vcpu
->arch
.aux_inuse
|= KVM_LARCH_LSX
| KVM_LARCH_FPU
;
1342 #ifdef CONFIG_CPU_HAS_LASX
1343 /* Enable LASX and restore context */
1344 int kvm_own_lasx(struct kvm_vcpu
*vcpu
)
1346 if (!kvm_guest_has_fpu(&vcpu
->arch
) || !kvm_guest_has_lsx(&vcpu
->arch
) || !kvm_guest_has_lasx(&vcpu
->arch
))
1351 kvm_check_fcsr(vcpu
, vcpu
->arch
.fpu
.fcsr
);
1352 set_csr_euen(CSR_EUEN_FPEN
| CSR_EUEN_LSXEN
| CSR_EUEN_LASXEN
);
1353 switch (vcpu
->arch
.aux_inuse
& (KVM_LARCH_FPU
| KVM_LARCH_LSX
)) {
1355 case KVM_LARCH_LSX
| KVM_LARCH_FPU
:
1356 /* Guest LSX state already loaded, only restore upper LASX state */
1357 _restore_lasx_upper(&vcpu
->arch
.fpu
);
1360 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1361 _restore_lsx_upper(&vcpu
->arch
.fpu
);
1362 _restore_lasx_upper(&vcpu
->arch
.fpu
);
1365 /* Neither FP or LSX already active, restore full LASX state */
1366 kvm_restore_lasx(&vcpu
->arch
.fpu
);
1370 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_LASX
);
1371 vcpu
->arch
.aux_inuse
|= KVM_LARCH_LASX
| KVM_LARCH_LSX
| KVM_LARCH_FPU
;
1378 /* Save context and disable FPU */
1379 void kvm_lose_fpu(struct kvm_vcpu
*vcpu
)
1383 kvm_check_fcsr_alive(vcpu
);
1384 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LASX
) {
1385 kvm_save_lasx(&vcpu
->arch
.fpu
);
1386 vcpu
->arch
.aux_inuse
&= ~(KVM_LARCH_LSX
| KVM_LARCH_FPU
| KVM_LARCH_LASX
);
1387 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_LASX
);
1389 /* Disable LASX & LSX & FPU */
1390 clear_csr_euen(CSR_EUEN_FPEN
| CSR_EUEN_LSXEN
| CSR_EUEN_LASXEN
);
1391 } else if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LSX
) {
1392 kvm_save_lsx(&vcpu
->arch
.fpu
);
1393 vcpu
->arch
.aux_inuse
&= ~(KVM_LARCH_LSX
| KVM_LARCH_FPU
);
1394 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_LSX
);
1396 /* Disable LSX & FPU */
1397 clear_csr_euen(CSR_EUEN_FPEN
| CSR_EUEN_LSXEN
);
1398 } else if (vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
) {
1399 kvm_save_fpu(&vcpu
->arch
.fpu
);
1400 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_FPU
;
1401 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU
);
1404 clear_csr_euen(CSR_EUEN_FPEN
);
1411 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1413 int intr
= (int)irq
->irq
;
1416 kvm_queue_irq(vcpu
, intr
);
1418 kvm_dequeue_irq(vcpu
, -intr
);
1420 kvm_err("%s: invalid interrupt ioctl %d\n", __func__
, irq
->irq
);
1424 kvm_vcpu_kick(vcpu
);
1429 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
1430 unsigned int ioctl
, unsigned long arg
)
1432 void __user
*argp
= (void __user
*)arg
;
1433 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1435 if (ioctl
== KVM_INTERRUPT
) {
1436 struct kvm_interrupt irq
;
1438 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1441 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
, irq
.irq
);
1443 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1446 return -ENOIOCTLCMD
;
1449 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
1454 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
1456 unsigned long timer_hz
;
1457 struct loongarch_csrs
*csr
;
1459 vcpu
->arch
.vpid
= 0;
1460 vcpu
->arch
.flush_gpa
= INVALID_GPA
;
1462 hrtimer_init(&vcpu
->arch
.swtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED_HARD
);
1463 vcpu
->arch
.swtimer
.function
= kvm_swtimer_wakeup
;
1465 vcpu
->arch
.handle_exit
= kvm_handle_exit
;
1466 vcpu
->arch
.guest_eentry
= (unsigned long)kvm_loongarch_ops
->exc_entry
;
1467 vcpu
->arch
.csr
= kzalloc(sizeof(struct loongarch_csrs
), GFP_KERNEL
);
1468 if (!vcpu
->arch
.csr
)
1472 * All kvm exceptions share one exception entry, and host <-> guest
1473 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1475 vcpu
->arch
.host_ecfg
= (read_csr_ecfg() & CSR_ECFG_VS
);
1478 vcpu
->arch
.last_sched_cpu
= -1;
1480 /* Init ipi_state lock */
1481 spin_lock_init(&vcpu
->arch
.ipi_state
.lock
);
1484 * Initialize guest register state to valid architectural reset state.
1486 timer_hz
= calc_const_freq();
1487 kvm_init_timer(vcpu
, timer_hz
);
1489 /* Set Initialize mode for guest */
1490 csr
= vcpu
->arch
.csr
;
1491 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CRMD
, CSR_CRMD_DA
);
1494 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_TMID
, vcpu
->vcpu_id
);
1495 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
, KVM_MAX_PHYID
);
1497 /* Start with no pending virtual guest interrupts */
1498 csr
->csrs
[LOONGARCH_CSR_GINTC
] = 0;
1503 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1507 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1510 struct kvm_context
*context
;
1512 hrtimer_cancel(&vcpu
->arch
.swtimer
);
1513 kvm_mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
1514 kvm_drop_cpuid(vcpu
);
1515 kfree(vcpu
->arch
.csr
);
1518 * If the vCPU is freed and reused as another vCPU, we don't want the
1519 * matching pointer wrongly hanging around in last_vcpu.
1521 for_each_possible_cpu(cpu
) {
1522 context
= per_cpu_ptr(vcpu
->kvm
->arch
.vmcs
, cpu
);
1523 if (context
->last_vcpu
== vcpu
)
1524 context
->last_vcpu
= NULL
;
1528 static int _kvm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1531 struct kvm_context
*context
;
1532 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
1535 * Have we migrated to a different CPU?
1536 * If so, any old guest TLB state may be stale.
1538 migrated
= (vcpu
->arch
.last_sched_cpu
!= cpu
);
1541 * Was this the last vCPU to run on this CPU?
1542 * If not, any old guest state from this vCPU will have been clobbered.
1544 context
= per_cpu_ptr(vcpu
->kvm
->arch
.vmcs
, cpu
);
1545 if (migrated
|| (context
->last_vcpu
!= vcpu
))
1546 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_HWCSR_USABLE
;
1547 context
->last_vcpu
= vcpu
;
1549 /* Restore timer state regardless */
1550 kvm_restore_timer(vcpu
);
1552 /* Control guest page CCA attribute */
1553 change_csr_gcfg(CSR_GCFG_MATC_MASK
, CSR_GCFG_MATC_ROOT
);
1554 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
1556 /* Restore hardware PMU CSRs */
1557 kvm_restore_pmu(vcpu
);
1559 /* Don't bother restoring registers multiple times unless necessary */
1560 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_HWCSR_USABLE
)
1563 write_csr_gcntc((ulong
)vcpu
->kvm
->arch
.time_offset
);
1565 /* Restore guest CSR registers */
1566 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_CRMD
);
1567 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PRMD
);
1568 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_EUEN
);
1569 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_MISC
);
1570 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ECFG
);
1571 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ERA
);
1572 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_BADV
);
1573 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_BADI
);
1574 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_EENTRY
);
1575 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBIDX
);
1576 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBEHI
);
1577 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO0
);
1578 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO1
);
1579 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ASID
);
1580 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PGDL
);
1581 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PGDH
);
1582 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL0
);
1583 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL1
);
1584 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_STLBPGSIZE
);
1585 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_RVACFG
);
1586 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
1587 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS0
);
1588 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS1
);
1589 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS2
);
1590 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS3
);
1591 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS4
);
1592 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS5
);
1593 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS6
);
1594 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS7
);
1595 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TMID
);
1596 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_CNTC
);
1597 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRENTRY
);
1598 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRBADV
);
1599 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRERA
);
1600 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRSAVE
);
1601 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO0
);
1602 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO1
);
1603 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBREHI
);
1604 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRPRMD
);
1605 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN0
);
1606 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN1
);
1607 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN2
);
1608 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN3
);
1609 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_LLBCTL
);
1611 /* Restore Root.GINTC from unused Guest.GINTC register */
1612 write_csr_gintc(csr
->csrs
[LOONGARCH_CSR_GINTC
]);
1615 * We should clear linked load bit to break interrupted atomics. This
1616 * prevents a SC on the next vCPU from succeeding by matching a LL on
1617 * the previous vCPU.
1619 if (vcpu
->kvm
->created_vcpus
> 1)
1620 set_gcsr_llbctl(CSR_LLBCTL_WCLLB
);
1622 vcpu
->arch
.aux_inuse
|= KVM_LARCH_HWCSR_USABLE
;
1627 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1629 unsigned long flags
;
1631 local_irq_save(flags
);
1632 /* Restore guest state to registers */
1633 _kvm_vcpu_load(vcpu
, cpu
);
1634 local_irq_restore(flags
);
1637 static int _kvm_vcpu_put(struct kvm_vcpu
*vcpu
, int cpu
)
1639 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
1644 * Update CSR state from hardware if software CSR state is stale,
1645 * most CSR registers are kept unchanged during process context
1646 * switch except CSR registers like remaining timer tick value and
1647 * injected interrupt state.
1649 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_SWCSR_LATEST
)
1652 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_CRMD
);
1653 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRMD
);
1654 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_EUEN
);
1655 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_MISC
);
1656 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ECFG
);
1657 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ERA
);
1658 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_BADV
);
1659 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_BADI
);
1660 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_EENTRY
);
1661 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBIDX
);
1662 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBEHI
);
1663 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO0
);
1664 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO1
);
1665 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ASID
);
1666 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PGDL
);
1667 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PGDH
);
1668 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL0
);
1669 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL1
);
1670 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_STLBPGSIZE
);
1671 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_RVACFG
);
1672 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
1673 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRCFG1
);
1674 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRCFG2
);
1675 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRCFG3
);
1676 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS0
);
1677 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS1
);
1678 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS2
);
1679 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS3
);
1680 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS4
);
1681 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS5
);
1682 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS6
);
1683 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS7
);
1684 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TMID
);
1685 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_CNTC
);
1686 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_LLBCTL
);
1687 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRENTRY
);
1688 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRBADV
);
1689 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRERA
);
1690 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRSAVE
);
1691 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO0
);
1692 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO1
);
1693 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBREHI
);
1694 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRPRMD
);
1695 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN0
);
1696 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN1
);
1697 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN2
);
1698 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN3
);
1700 vcpu
->arch
.aux_inuse
|= KVM_LARCH_SWCSR_LATEST
;
1703 kvm_save_timer(vcpu
);
1704 /* Save Root.GINTC into unused Guest.GINTC register */
1705 csr
->csrs
[LOONGARCH_CSR_GINTC
] = read_csr_gintc();
1710 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1713 unsigned long flags
;
1715 local_irq_save(flags
);
1716 cpu
= smp_processor_id();
1717 vcpu
->arch
.last_sched_cpu
= cpu
;
1719 /* Save guest state in registers */
1720 _kvm_vcpu_put(vcpu
, cpu
);
1721 local_irq_restore(flags
);
1724 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
1727 struct kvm_run
*run
= vcpu
->run
;
1729 if (vcpu
->mmio_needed
) {
1730 if (!vcpu
->mmio_is_write
)
1731 kvm_complete_mmio_read(vcpu
, run
);
1732 vcpu
->mmio_needed
= 0;
1735 switch (run
->exit_reason
) {
1736 case KVM_EXIT_HYPERCALL
:
1737 kvm_complete_user_service(vcpu
, run
);
1739 case KVM_EXIT_LOONGARCH_IOCSR
:
1740 if (!run
->iocsr_io
.is_write
)
1741 kvm_complete_iocsr_read(vcpu
, run
);
1745 if (!vcpu
->wants_to_run
)
1748 /* Clear exit_reason */
1749 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1752 kvm_sigset_activate(vcpu
);
1753 r
= kvm_pre_enter_guest(vcpu
);
1754 if (r
!= RESUME_GUEST
)
1757 guest_timing_enter_irqoff();
1758 guest_state_enter_irqoff();
1759 trace_kvm_enter(vcpu
);
1760 r
= kvm_loongarch_ops
->enter_guest(run
, vcpu
);
1762 trace_kvm_out(vcpu
);
1764 * Guest exit is already recorded at kvm_handle_exit()
1765 * return value must not be RESUME_GUEST
1769 kvm_sigset_deactivate(vcpu
);