1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
14 #define CREATE_TRACE_POINTS
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
18 KVM_GENERIC_VCPU_STATS(),
19 STATS_DESC_COUNTER(VCPU
, int_exits
),
20 STATS_DESC_COUNTER(VCPU
, idle_exits
),
21 STATS_DESC_COUNTER(VCPU
, cpucfg_exits
),
22 STATS_DESC_COUNTER(VCPU
, signal_exits
),
23 STATS_DESC_COUNTER(VCPU
, hypercall_exits
)
26 const struct kvm_stats_header kvm_vcpu_stats_header
= {
27 .name_size
= KVM_STATS_NAME_SIZE
,
28 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
29 .id_offset
= sizeof(struct kvm_stats_header
),
30 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
31 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
32 sizeof(kvm_vcpu_stats_desc
),
35 static inline void kvm_save_host_pmu(struct kvm_vcpu
*vcpu
)
37 struct kvm_context
*context
;
39 context
= this_cpu_ptr(vcpu
->kvm
->arch
.vmcs
);
40 context
->perf_cntr
[0] = read_csr_perfcntr0();
41 context
->perf_cntr
[1] = read_csr_perfcntr1();
42 context
->perf_cntr
[2] = read_csr_perfcntr2();
43 context
->perf_cntr
[3] = read_csr_perfcntr3();
44 context
->perf_ctrl
[0] = write_csr_perfctrl0(0);
45 context
->perf_ctrl
[1] = write_csr_perfctrl1(0);
46 context
->perf_ctrl
[2] = write_csr_perfctrl2(0);
47 context
->perf_ctrl
[3] = write_csr_perfctrl3(0);
50 static inline void kvm_restore_host_pmu(struct kvm_vcpu
*vcpu
)
52 struct kvm_context
*context
;
54 context
= this_cpu_ptr(vcpu
->kvm
->arch
.vmcs
);
55 write_csr_perfcntr0(context
->perf_cntr
[0]);
56 write_csr_perfcntr1(context
->perf_cntr
[1]);
57 write_csr_perfcntr2(context
->perf_cntr
[2]);
58 write_csr_perfcntr3(context
->perf_cntr
[3]);
59 write_csr_perfctrl0(context
->perf_ctrl
[0]);
60 write_csr_perfctrl1(context
->perf_ctrl
[1]);
61 write_csr_perfctrl2(context
->perf_ctrl
[2]);
62 write_csr_perfctrl3(context
->perf_ctrl
[3]);
66 static inline void kvm_save_guest_pmu(struct kvm_vcpu
*vcpu
)
68 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
70 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR0
);
71 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR1
);
72 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR2
);
73 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR3
);
74 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
);
75 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
);
76 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
);
77 kvm_read_clear_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu
*vcpu
)
82 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
84 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR0
);
85 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR1
);
86 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR2
);
87 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCNTR3
);
88 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
);
89 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
);
90 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
);
91 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
94 static int kvm_own_pmu(struct kvm_vcpu
*vcpu
)
98 if (!kvm_guest_has_pmu(&vcpu
->arch
))
101 kvm_save_host_pmu(vcpu
);
103 /* Set PM0-PM(num) to guest */
104 val
= read_csr_gcfg() & ~CSR_GCFG_GPERF
;
105 val
|= (kvm_get_pmu_num(&vcpu
->arch
) + 1) << CSR_GCFG_GPERF_SHIFT
;
108 kvm_restore_guest_pmu(vcpu
);
113 static void kvm_lose_pmu(struct kvm_vcpu
*vcpu
)
116 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
118 if (!(vcpu
->arch
.aux_inuse
& KVM_LARCH_PMU
))
121 kvm_save_guest_pmu(vcpu
);
123 /* Disable pmu access from guest */
124 write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF
);
127 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 * exiting the guest, so that the next time trap into the guest.
129 * We don't need to deal with PMU CSRs contexts.
131 val
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
);
132 val
|= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
);
133 val
|= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
);
134 val
|= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
135 if (!(val
& KVM_PMU_EVENT_ENABLED
))
136 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_PMU
;
138 kvm_restore_host_pmu(vcpu
);
141 static void kvm_restore_pmu(struct kvm_vcpu
*vcpu
)
143 if ((vcpu
->arch
.aux_inuse
& KVM_LARCH_PMU
))
144 kvm_make_request(KVM_REQ_PMU
, vcpu
);
147 static void kvm_check_pmu(struct kvm_vcpu
*vcpu
)
149 if (kvm_check_request(KVM_REQ_PMU
, vcpu
)) {
151 vcpu
->arch
.aux_inuse
|= KVM_LARCH_PMU
;
155 static void kvm_update_stolen_time(struct kvm_vcpu
*vcpu
)
160 struct kvm_memslots
*slots
;
161 struct kvm_steal_time __user
*st
;
162 struct gfn_to_hva_cache
*ghc
;
164 ghc
= &vcpu
->arch
.st
.cache
;
165 gpa
= vcpu
->arch
.st
.guest_addr
;
166 if (!(gpa
& KVM_STEAL_PHYS_VALID
))
169 gpa
&= KVM_STEAL_PHYS_MASK
;
170 slots
= kvm_memslots(vcpu
->kvm
);
171 if (slots
->generation
!= ghc
->generation
|| gpa
!= ghc
->gpa
) {
172 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
, ghc
, gpa
, sizeof(*st
))) {
173 ghc
->gpa
= INVALID_GPA
;
178 st
= (struct kvm_steal_time __user
*)ghc
->hva
;
179 unsafe_get_user(version
, &st
->version
, out
);
181 version
+= 1; /* first time write, random junk */
184 unsafe_put_user(version
, &st
->version
, out
);
187 unsafe_get_user(steal
, &st
->steal
, out
);
188 steal
+= current
->sched_info
.run_delay
- vcpu
->arch
.st
.last_steal
;
189 vcpu
->arch
.st
.last_steal
= current
->sched_info
.run_delay
;
190 unsafe_put_user(steal
, &st
->steal
, out
);
194 unsafe_put_user(version
, &st
->version
, out
);
196 mark_page_dirty_in_slot(vcpu
->kvm
, ghc
->memslot
, gpa_to_gfn(ghc
->gpa
));
200 * kvm_check_requests - check and handle pending vCPU requests
202 * Return: RESUME_GUEST if we should enter the guest
203 * RESUME_HOST if we should exit to userspace
205 static int kvm_check_requests(struct kvm_vcpu
*vcpu
)
207 if (!kvm_request_pending(vcpu
))
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
211 vcpu
->arch
.vpid
= 0; /* Drop vpid for this vCPU */
213 if (kvm_dirty_ring_check_request(vcpu
))
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE
, vcpu
))
217 kvm_update_stolen_time(vcpu
);
222 static void kvm_late_check_requests(struct kvm_vcpu
*vcpu
)
224 lockdep_assert_irqs_disabled();
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA
, vcpu
))
226 if (vcpu
->arch
.flush_gpa
!= INVALID_GPA
) {
227 kvm_flush_tlb_gpa(vcpu
, vcpu
->arch
.flush_gpa
);
228 vcpu
->arch
.flush_gpa
= INVALID_GPA
;
233 * Check and handle pending signal and vCPU requests etc
234 * Run with irq enabled and preempt enabled
236 * Return: RESUME_GUEST if we should enter the guest
237 * RESUME_HOST if we should exit to userspace
238 * < 0 if we should exit to userspace, where the return value
241 static int kvm_enter_guest_check(struct kvm_vcpu
*vcpu
)
246 * Check conditions before entering the guest
248 ret
= xfer_to_guest_mode_handle_work(vcpu
);
252 ret
= kvm_check_requests(vcpu
);
258 * Called with irq enabled
260 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
261 * Others if we should exit to userspace
263 static int kvm_pre_enter_guest(struct kvm_vcpu
*vcpu
)
268 ret
= kvm_enter_guest_check(vcpu
);
269 if (ret
!= RESUME_GUEST
)
273 * Handle vcpu timer, interrupts, check requests and
274 * check vmid before vcpu enter guest
277 kvm_deliver_intr(vcpu
);
278 kvm_deliver_exception(vcpu
);
279 /* Make sure the vcpu mode has been written */
280 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
281 kvm_check_vpid(vcpu
);
285 * Called after function kvm_check_vpid()
286 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
287 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
289 kvm_late_check_requests(vcpu
);
290 vcpu
->arch
.host_eentry
= csr_read64(LOONGARCH_CSR_EENTRY
);
291 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
292 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_SWCSR_LATEST
;
294 if (kvm_request_pending(vcpu
) || xfer_to_guest_mode_work_pending()) {
295 /* make sure the vcpu mode has been written */
296 smp_store_mb(vcpu
->mode
, OUTSIDE_GUEST_MODE
);
300 } while (ret
!= RESUME_GUEST
);
306 * Return 1 for resume guest and "<= 0" for resume host.
308 static int kvm_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
310 int ret
= RESUME_GUEST
;
311 unsigned long estat
= vcpu
->arch
.host_estat
;
312 u32 intr
= estat
& 0x1fff; /* Ignore NMI */
313 u32 ecode
= (estat
& CSR_ESTAT_EXC
) >> CSR_ESTAT_EXC_SHIFT
;
315 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
317 /* Set a default exit reason */
318 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
322 guest_timing_exit_irqoff();
323 guest_state_exit_irqoff();
326 trace_kvm_exit(vcpu
, ecode
);
328 ret
= kvm_handle_fault(vcpu
, ecode
);
330 WARN(!intr
, "vm exiting with suspicious irq\n");
331 ++vcpu
->stat
.int_exits
;
334 if (ret
== RESUME_GUEST
)
335 ret
= kvm_pre_enter_guest(vcpu
);
337 if (ret
!= RESUME_GUEST
) {
342 guest_timing_enter_irqoff();
343 guest_state_enter_irqoff();
344 trace_kvm_reenter(vcpu
);
349 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
351 return !!(vcpu
->arch
.irq_pending
) &&
352 vcpu
->arch
.mp_state
.mp_state
== KVM_MP_STATE_RUNNABLE
;
355 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
357 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
360 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
365 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
367 return VM_FAULT_SIGBUS
;
370 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
371 struct kvm_translation
*tr
)
376 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
380 /* Protect from TOD sync and vcpu_load/put() */
382 ret
= kvm_pending_timer(vcpu
) ||
383 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT
) & (1 << INT_TI
);
389 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
393 kvm_debug("vCPU Register Dump:\n");
394 kvm_debug("\tPC = 0x%08lx\n", vcpu
->arch
.pc
);
395 kvm_debug("\tExceptions: %08lx\n", vcpu
->arch
.irq_pending
);
397 for (i
= 0; i
< 32; i
+= 4) {
398 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i
,
399 vcpu
->arch
.gprs
[i
], vcpu
->arch
.gprs
[i
+ 1],
400 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
403 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
404 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD
),
405 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT
));
407 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA
));
412 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
413 struct kvm_mp_state
*mp_state
)
415 *mp_state
= vcpu
->arch
.mp_state
;
420 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
421 struct kvm_mp_state
*mp_state
)
425 switch (mp_state
->mp_state
) {
426 case KVM_MP_STATE_RUNNABLE
:
427 vcpu
->arch
.mp_state
= *mp_state
;
436 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
437 struct kvm_guest_debug
*dbg
)
439 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
)
442 if (dbg
->control
& KVM_GUESTDBG_ENABLE
)
443 vcpu
->guest_debug
= dbg
->control
;
445 vcpu
->guest_debug
= 0;
450 static inline int kvm_set_cpuid(struct kvm_vcpu
*vcpu
, u64 val
)
453 struct kvm_phyid_map
*map
;
454 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
456 if (val
>= KVM_MAX_PHYID
)
459 map
= vcpu
->kvm
->arch
.phyid_map
;
460 cpuid
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
462 spin_lock(&vcpu
->kvm
->arch
.phyid_map_lock
);
463 if ((cpuid
< KVM_MAX_PHYID
) && map
->phys_map
[cpuid
].enabled
) {
464 /* Discard duplicated CPUID set operation */
466 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
471 * CPUID is already set before
472 * Forbid changing to a different CPUID at runtime
474 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
478 if (map
->phys_map
[val
].enabled
) {
479 /* Discard duplicated CPUID set operation */
480 if (vcpu
== map
->phys_map
[val
].vcpu
) {
481 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
486 * New CPUID is already set with other vcpu
487 * Forbid sharing the same CPUID between different vcpus
489 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
493 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
, val
);
494 map
->phys_map
[val
].enabled
= true;
495 map
->phys_map
[val
].vcpu
= vcpu
;
496 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
501 static inline void kvm_drop_cpuid(struct kvm_vcpu
*vcpu
)
504 struct kvm_phyid_map
*map
;
505 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
507 map
= vcpu
->kvm
->arch
.phyid_map
;
508 cpuid
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
510 if (cpuid
>= KVM_MAX_PHYID
)
513 spin_lock(&vcpu
->kvm
->arch
.phyid_map_lock
);
514 if (map
->phys_map
[cpuid
].enabled
) {
515 map
->phys_map
[cpuid
].vcpu
= NULL
;
516 map
->phys_map
[cpuid
].enabled
= false;
517 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
, KVM_MAX_PHYID
);
519 spin_unlock(&vcpu
->kvm
->arch
.phyid_map_lock
);
522 struct kvm_vcpu
*kvm_get_vcpu_by_cpuid(struct kvm
*kvm
, int cpuid
)
524 struct kvm_phyid_map
*map
;
526 if (cpuid
>= KVM_MAX_PHYID
)
529 map
= kvm
->arch
.phyid_map
;
530 if (!map
->phys_map
[cpuid
].enabled
)
533 return map
->phys_map
[cpuid
].vcpu
;
536 static int _kvm_getcsr(struct kvm_vcpu
*vcpu
, unsigned int id
, u64
*val
)
539 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
541 if (get_gcsr_flag(id
) & INVALID_GCSR
)
544 if (id
== LOONGARCH_CSR_ESTAT
) {
548 * Sync pending interrupts into ESTAT so that interrupt
549 * remains during VM migration stage
551 kvm_deliver_intr(vcpu
);
552 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_SWCSR_LATEST
;
556 /* ESTAT IP0~IP7 get from GINTC */
557 gintc
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_GINTC
) & 0xff;
558 *val
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_ESTAT
) | (gintc
<< 2);
563 * Get software CSR state since software state is consistent
564 * with hardware for synchronous ioctl
566 *val
= kvm_read_sw_gcsr(csr
, id
);
571 static int _kvm_setcsr(struct kvm_vcpu
*vcpu
, unsigned int id
, u64 val
)
574 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
576 if (get_gcsr_flag(id
) & INVALID_GCSR
)
579 if (id
== LOONGARCH_CSR_CPUID
)
580 return kvm_set_cpuid(vcpu
, val
);
582 if (id
== LOONGARCH_CSR_ESTAT
) {
583 /* ESTAT IP0~IP7 inject through GINTC */
584 gintc
= (val
>> 2) & 0xff;
585 kvm_set_sw_gcsr(csr
, LOONGARCH_CSR_GINTC
, gintc
);
587 gintc
= val
& ~(0xffUL
<< 2);
588 kvm_set_sw_gcsr(csr
, LOONGARCH_CSR_ESTAT
, gintc
);
593 kvm_write_sw_gcsr(csr
, id
, val
);
596 * After modifying the PMU CSR register value of the vcpu.
597 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
599 if (id
>= LOONGARCH_CSR_PERFCTRL0
&& id
<= LOONGARCH_CSR_PERFCNTR3
) {
602 val
= kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL0
) |
603 kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL1
) |
604 kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL2
) |
605 kvm_read_sw_gcsr(csr
, LOONGARCH_CSR_PERFCTRL3
);
607 if (val
& KVM_PMU_EVENT_ENABLED
)
608 kvm_make_request(KVM_REQ_PMU
, vcpu
);
614 static int _kvm_get_cpucfg_mask(int id
, u64
*v
)
616 if (id
< 0 || id
>= KVM_MAX_CPUCFG_REGS
)
620 case LOONGARCH_CPUCFG0
:
623 case LOONGARCH_CPUCFG1
:
624 /* CPUCFG1_MSGINT is not supported by KVM */
627 case LOONGARCH_CPUCFG2
:
628 /* CPUCFG2 features unconditionally supported by KVM */
629 *v
= CPUCFG2_FP
| CPUCFG2_FPSP
| CPUCFG2_FPDP
|
630 CPUCFG2_FPVERS
| CPUCFG2_LLFTP
| CPUCFG2_LLFTPREV
|
631 CPUCFG2_LSPW
| CPUCFG2_LAM
;
633 * For the ISA extensions listed below, if one is supported
634 * by the host, then it is also supported by KVM.
644 if (cpu_has_lbt_mips
)
645 *v
|= CPUCFG2_MIPSBT
;
648 case LOONGARCH_CPUCFG3
:
651 case LOONGARCH_CPUCFG4
:
652 case LOONGARCH_CPUCFG5
:
655 case LOONGARCH_CPUCFG6
:
661 case LOONGARCH_CPUCFG16
:
664 case LOONGARCH_CPUCFG17
... LOONGARCH_CPUCFG20
:
669 * CPUCFG bits should be zero if reserved by HW or not
677 static int kvm_check_cpucfg(int id
, u64 val
)
682 ret
= _kvm_get_cpucfg_mask(id
, &mask
);
687 /* Unsupported features and/or the higher 32 bits should not be set */
691 case LOONGARCH_CPUCFG2
:
692 if (!(val
& CPUCFG2_LLFTP
))
693 /* Guests must have a constant timer */
695 if ((val
& CPUCFG2_FP
) && (!(val
& CPUCFG2_FPSP
) || !(val
& CPUCFG2_FPDP
)))
696 /* Single and double float point must both be set when FP is enabled */
698 if ((val
& CPUCFG2_LSX
) && !(val
& CPUCFG2_FP
))
699 /* LSX architecturally implies FP but val does not satisfy that */
701 if ((val
& CPUCFG2_LASX
) && !(val
& CPUCFG2_LSX
))
702 /* LASX architecturally implies LSX and FP but val does not satisfy that */
705 case LOONGARCH_CPUCFG6
:
706 if (val
& CPUCFG6_PMP
) {
707 u32 host
= read_cpucfg(LOONGARCH_CPUCFG6
);
708 if ((val
& CPUCFG6_PMBITS
) != (host
& CPUCFG6_PMBITS
))
710 if ((val
& CPUCFG6_PMNUM
) > (host
& CPUCFG6_PMNUM
))
712 if ((val
& CPUCFG6_UPM
) && !(host
& CPUCFG6_UPM
))
718 * Values for the other CPUCFG IDs are not being further validated
719 * besides the mask check above.
725 static int kvm_get_one_reg(struct kvm_vcpu
*vcpu
,
726 const struct kvm_one_reg
*reg
, u64
*v
)
729 u64 type
= reg
->id
& KVM_REG_LOONGARCH_MASK
;
732 case KVM_REG_LOONGARCH_CSR
:
733 id
= KVM_GET_IOC_CSR_IDX(reg
->id
);
734 ret
= _kvm_getcsr(vcpu
, id
, v
);
736 case KVM_REG_LOONGARCH_CPUCFG
:
737 id
= KVM_GET_IOC_CPUCFG_IDX(reg
->id
);
738 if (id
>= 0 && id
< KVM_MAX_CPUCFG_REGS
)
739 *v
= vcpu
->arch
.cpucfg
[id
];
743 case KVM_REG_LOONGARCH_LBT
:
744 if (!kvm_guest_has_lbt(&vcpu
->arch
))
748 case KVM_REG_LOONGARCH_LBT_SCR0
:
749 *v
= vcpu
->arch
.lbt
.scr0
;
751 case KVM_REG_LOONGARCH_LBT_SCR1
:
752 *v
= vcpu
->arch
.lbt
.scr1
;
754 case KVM_REG_LOONGARCH_LBT_SCR2
:
755 *v
= vcpu
->arch
.lbt
.scr2
;
757 case KVM_REG_LOONGARCH_LBT_SCR3
:
758 *v
= vcpu
->arch
.lbt
.scr3
;
760 case KVM_REG_LOONGARCH_LBT_EFLAGS
:
761 *v
= vcpu
->arch
.lbt
.eflags
;
763 case KVM_REG_LOONGARCH_LBT_FTOP
:
764 *v
= vcpu
->arch
.fpu
.ftop
;
771 case KVM_REG_LOONGARCH_KVM
:
773 case KVM_REG_LOONGARCH_COUNTER
:
774 *v
= drdtime() + vcpu
->kvm
->arch
.time_offset
;
776 case KVM_REG_LOONGARCH_DEBUG_INST
:
777 *v
= INSN_HVCL
| KVM_HCALL_SWDBG
;
792 static int kvm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
795 u64 v
, size
= reg
->id
& KVM_REG_SIZE_MASK
;
798 case KVM_REG_SIZE_U64
:
799 ret
= kvm_get_one_reg(vcpu
, reg
, &v
);
802 ret
= put_user(v
, (u64 __user
*)(long)reg
->addr
);
812 static int kvm_set_one_reg(struct kvm_vcpu
*vcpu
,
813 const struct kvm_one_reg
*reg
, u64 v
)
816 u64 type
= reg
->id
& KVM_REG_LOONGARCH_MASK
;
819 case KVM_REG_LOONGARCH_CSR
:
820 id
= KVM_GET_IOC_CSR_IDX(reg
->id
);
821 ret
= _kvm_setcsr(vcpu
, id
, v
);
823 case KVM_REG_LOONGARCH_CPUCFG
:
824 id
= KVM_GET_IOC_CPUCFG_IDX(reg
->id
);
825 ret
= kvm_check_cpucfg(id
, v
);
828 vcpu
->arch
.cpucfg
[id
] = (u32
)v
;
829 if (id
== LOONGARCH_CPUCFG6
)
830 vcpu
->arch
.max_pmu_csrid
=
831 LOONGARCH_CSR_PERFCTRL0
+ 2 * kvm_get_pmu_num(&vcpu
->arch
) + 1;
833 case KVM_REG_LOONGARCH_LBT
:
834 if (!kvm_guest_has_lbt(&vcpu
->arch
))
838 case KVM_REG_LOONGARCH_LBT_SCR0
:
839 vcpu
->arch
.lbt
.scr0
= v
;
841 case KVM_REG_LOONGARCH_LBT_SCR1
:
842 vcpu
->arch
.lbt
.scr1
= v
;
844 case KVM_REG_LOONGARCH_LBT_SCR2
:
845 vcpu
->arch
.lbt
.scr2
= v
;
847 case KVM_REG_LOONGARCH_LBT_SCR3
:
848 vcpu
->arch
.lbt
.scr3
= v
;
850 case KVM_REG_LOONGARCH_LBT_EFLAGS
:
851 vcpu
->arch
.lbt
.eflags
= v
;
853 case KVM_REG_LOONGARCH_LBT_FTOP
:
854 vcpu
->arch
.fpu
.ftop
= v
;
861 case KVM_REG_LOONGARCH_KVM
:
863 case KVM_REG_LOONGARCH_COUNTER
:
865 * gftoffset is relative with board, not vcpu
866 * only set for the first time for smp system
868 if (vcpu
->vcpu_id
== 0)
869 vcpu
->kvm
->arch
.time_offset
= (signed long)(v
- drdtime());
871 case KVM_REG_LOONGARCH_VCPU_RESET
:
872 vcpu
->arch
.st
.guest_addr
= 0;
873 memset(&vcpu
->arch
.irq_pending
, 0, sizeof(vcpu
->arch
.irq_pending
));
874 memset(&vcpu
->arch
.irq_clear
, 0, sizeof(vcpu
->arch
.irq_clear
));
889 static int kvm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
892 u64 v
, size
= reg
->id
& KVM_REG_SIZE_MASK
;
895 case KVM_REG_SIZE_U64
:
896 ret
= get_user(v
, (u64 __user
*)(long)reg
->addr
);
904 return kvm_set_one_reg(vcpu
, reg
, v
);
907 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
912 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
917 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
921 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
922 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
924 regs
->pc
= vcpu
->arch
.pc
;
929 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
933 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
934 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
936 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
937 vcpu
->arch
.pc
= regs
->pc
;
942 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
943 struct kvm_enable_cap
*cap
)
945 /* FPU is enabled by default, will support LSX/LASX later. */
949 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu
*vcpu
,
950 struct kvm_device_attr
*attr
)
952 switch (attr
->attr
) {
953 case LOONGARCH_CPUCFG2
:
954 case LOONGARCH_CPUCFG6
:
956 case CPUCFG_KVM_FEATURE
:
965 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu
*vcpu
,
966 struct kvm_device_attr
*attr
)
968 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
)
969 || attr
->attr
!= KVM_LOONGARCH_VCPU_PVTIME_GPA
)
975 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu
*vcpu
,
976 struct kvm_device_attr
*attr
)
980 switch (attr
->group
) {
981 case KVM_LOONGARCH_VCPU_CPUCFG
:
982 ret
= kvm_loongarch_cpucfg_has_attr(vcpu
, attr
);
984 case KVM_LOONGARCH_VCPU_PVTIME_CTRL
:
985 ret
= kvm_loongarch_pvtime_has_attr(vcpu
, attr
);
994 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu
*vcpu
,
995 struct kvm_device_attr
*attr
)
999 uint64_t __user
*uaddr
= (uint64_t __user
*)attr
->addr
;
1001 switch (attr
->attr
) {
1002 case 0 ... (KVM_MAX_CPUCFG_REGS
- 1):
1003 ret
= _kvm_get_cpucfg_mask(attr
->attr
, &val
);
1007 case CPUCFG_KVM_FEATURE
:
1008 val
= vcpu
->kvm
->arch
.pv_features
& LOONGARCH_PV_FEAT_MASK
;
1014 put_user(val
, uaddr
);
1019 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu
*vcpu
,
1020 struct kvm_device_attr
*attr
)
1023 u64 __user
*user
= (u64 __user
*)attr
->addr
;
1025 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
)
1026 || attr
->attr
!= KVM_LOONGARCH_VCPU_PVTIME_GPA
)
1029 gpa
= vcpu
->arch
.st
.guest_addr
;
1030 if (put_user(gpa
, user
))
1036 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu
*vcpu
,
1037 struct kvm_device_attr
*attr
)
1041 switch (attr
->group
) {
1042 case KVM_LOONGARCH_VCPU_CPUCFG
:
1043 ret
= kvm_loongarch_cpucfg_get_attr(vcpu
, attr
);
1045 case KVM_LOONGARCH_VCPU_PVTIME_CTRL
:
1046 ret
= kvm_loongarch_pvtime_get_attr(vcpu
, attr
);
1055 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu
*vcpu
,
1056 struct kvm_device_attr
*attr
)
1059 u64 __user
*user
= (u64 __user
*)attr
->addr
;
1060 struct kvm
*kvm
= vcpu
->kvm
;
1062 switch (attr
->attr
) {
1063 case CPUCFG_KVM_FEATURE
:
1064 if (get_user(val
, user
))
1067 valid
= LOONGARCH_PV_FEAT_MASK
;
1071 /* All vCPUs need set the same PV features */
1072 if ((kvm
->arch
.pv_features
& LOONGARCH_PV_FEAT_UPDATED
)
1073 && ((kvm
->arch
.pv_features
& valid
) != val
))
1075 kvm
->arch
.pv_features
= val
| LOONGARCH_PV_FEAT_UPDATED
;
1082 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu
*vcpu
,
1083 struct kvm_device_attr
*attr
)
1086 u64 gpa
, __user
*user
= (u64 __user
*)attr
->addr
;
1087 struct kvm
*kvm
= vcpu
->kvm
;
1089 if (!kvm_guest_has_pv_feature(vcpu
, KVM_FEATURE_STEAL_TIME
)
1090 || attr
->attr
!= KVM_LOONGARCH_VCPU_PVTIME_GPA
)
1093 if (get_user(gpa
, user
))
1096 if (gpa
& ~(KVM_STEAL_PHYS_MASK
| KVM_STEAL_PHYS_VALID
))
1099 if (!(gpa
& KVM_STEAL_PHYS_VALID
)) {
1100 vcpu
->arch
.st
.guest_addr
= gpa
;
1104 /* Check the address is in a valid memslot */
1105 idx
= srcu_read_lock(&kvm
->srcu
);
1106 if (kvm_is_error_hva(gfn_to_hva(kvm
, gpa
>> PAGE_SHIFT
)))
1108 srcu_read_unlock(&kvm
->srcu
, idx
);
1111 vcpu
->arch
.st
.guest_addr
= gpa
;
1112 vcpu
->arch
.st
.last_steal
= current
->sched_info
.run_delay
;
1113 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
1119 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu
*vcpu
,
1120 struct kvm_device_attr
*attr
)
1124 switch (attr
->group
) {
1125 case KVM_LOONGARCH_VCPU_CPUCFG
:
1126 ret
= kvm_loongarch_cpucfg_set_attr(vcpu
, attr
);
1128 case KVM_LOONGARCH_VCPU_PVTIME_CTRL
:
1129 ret
= kvm_loongarch_pvtime_set_attr(vcpu
, attr
);
1138 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1139 unsigned int ioctl
, unsigned long arg
)
1142 struct kvm_device_attr attr
;
1143 void __user
*argp
= (void __user
*)arg
;
1144 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1147 * Only software CSR should be modified
1149 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1150 * should be used. Since CSR registers owns by this vcpu, if switch
1151 * to other vcpus, other vcpus need reload CSR registers.
1153 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1154 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1155 * aux_inuse flag and reload CSR registers form software.
1159 case KVM_SET_ONE_REG
:
1160 case KVM_GET_ONE_REG
: {
1161 struct kvm_one_reg reg
;
1164 if (copy_from_user(®
, argp
, sizeof(reg
)))
1166 if (ioctl
== KVM_SET_ONE_REG
) {
1167 r
= kvm_set_reg(vcpu
, ®
);
1168 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_HWCSR_USABLE
;
1170 r
= kvm_get_reg(vcpu
, ®
);
1173 case KVM_ENABLE_CAP
: {
1174 struct kvm_enable_cap cap
;
1177 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1179 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1182 case KVM_HAS_DEVICE_ATTR
: {
1184 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1186 r
= kvm_loongarch_vcpu_has_attr(vcpu
, &attr
);
1189 case KVM_GET_DEVICE_ATTR
: {
1191 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1193 r
= kvm_loongarch_vcpu_get_attr(vcpu
, &attr
);
1196 case KVM_SET_DEVICE_ATTR
: {
1198 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1200 r
= kvm_loongarch_vcpu_set_attr(vcpu
, &attr
);
1211 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1215 fpu
->fcc
= vcpu
->arch
.fpu
.fcc
;
1216 fpu
->fcsr
= vcpu
->arch
.fpu
.fcsr
;
1217 for (i
= 0; i
< NUM_FPU_REGS
; i
++)
1218 memcpy(&fpu
->fpr
[i
], &vcpu
->arch
.fpu
.fpr
[i
], FPU_REG_WIDTH
/ 64);
1223 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1227 vcpu
->arch
.fpu
.fcc
= fpu
->fcc
;
1228 vcpu
->arch
.fpu
.fcsr
= fpu
->fcsr
;
1229 for (i
= 0; i
< NUM_FPU_REGS
; i
++)
1230 memcpy(&vcpu
->arch
.fpu
.fpr
[i
], &fpu
->fpr
[i
], FPU_REG_WIDTH
/ 64);
1235 #ifdef CONFIG_CPU_HAS_LBT
1236 int kvm_own_lbt(struct kvm_vcpu
*vcpu
)
1238 if (!kvm_guest_has_lbt(&vcpu
->arch
))
1242 set_csr_euen(CSR_EUEN_LBTEN
);
1243 _restore_lbt(&vcpu
->arch
.lbt
);
1244 vcpu
->arch
.aux_inuse
|= KVM_LARCH_LBT
;
1250 static void kvm_lose_lbt(struct kvm_vcpu
*vcpu
)
1253 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LBT
) {
1254 _save_lbt(&vcpu
->arch
.lbt
);
1255 clear_csr_euen(CSR_EUEN_LBTEN
);
1256 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_LBT
;
1261 static void kvm_check_fcsr(struct kvm_vcpu
*vcpu
, unsigned long fcsr
)
1264 * If TM is enabled, top register save/restore will
1265 * cause lbt exception, here enable lbt in advance
1267 if (fcsr
& FPU_CSR_TM
)
1271 static void kvm_check_fcsr_alive(struct kvm_vcpu
*vcpu
)
1273 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
) {
1274 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LBT
)
1276 kvm_check_fcsr(vcpu
, read_fcsr(LOONGARCH_FCSR0
));
1280 static inline void kvm_lose_lbt(struct kvm_vcpu
*vcpu
) { }
1281 static inline void kvm_check_fcsr(struct kvm_vcpu
*vcpu
, unsigned long fcsr
) { }
1282 static inline void kvm_check_fcsr_alive(struct kvm_vcpu
*vcpu
) { }
1285 /* Enable FPU and restore context */
1286 void kvm_own_fpu(struct kvm_vcpu
*vcpu
)
1291 * Enable FPU for guest
1292 * Set FR and FRE according to guest context
1294 kvm_check_fcsr(vcpu
, vcpu
->arch
.fpu
.fcsr
);
1295 set_csr_euen(CSR_EUEN_FPEN
);
1297 kvm_restore_fpu(&vcpu
->arch
.fpu
);
1298 vcpu
->arch
.aux_inuse
|= KVM_LARCH_FPU
;
1299 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_FPU
);
1304 #ifdef CONFIG_CPU_HAS_LSX
1305 /* Enable LSX and restore context */
1306 int kvm_own_lsx(struct kvm_vcpu
*vcpu
)
1308 if (!kvm_guest_has_fpu(&vcpu
->arch
) || !kvm_guest_has_lsx(&vcpu
->arch
))
1313 /* Enable LSX for guest */
1314 kvm_check_fcsr(vcpu
, vcpu
->arch
.fpu
.fcsr
);
1315 set_csr_euen(CSR_EUEN_LSXEN
| CSR_EUEN_FPEN
);
1316 switch (vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
) {
1319 * Guest FPU state already loaded,
1320 * only restore upper LSX state
1322 _restore_lsx_upper(&vcpu
->arch
.fpu
);
1325 /* Neither FP or LSX already active,
1326 * restore full LSX state
1328 kvm_restore_lsx(&vcpu
->arch
.fpu
);
1332 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_LSX
);
1333 vcpu
->arch
.aux_inuse
|= KVM_LARCH_LSX
| KVM_LARCH_FPU
;
1340 #ifdef CONFIG_CPU_HAS_LASX
1341 /* Enable LASX and restore context */
1342 int kvm_own_lasx(struct kvm_vcpu
*vcpu
)
1344 if (!kvm_guest_has_fpu(&vcpu
->arch
) || !kvm_guest_has_lsx(&vcpu
->arch
) || !kvm_guest_has_lasx(&vcpu
->arch
))
1349 kvm_check_fcsr(vcpu
, vcpu
->arch
.fpu
.fcsr
);
1350 set_csr_euen(CSR_EUEN_FPEN
| CSR_EUEN_LSXEN
| CSR_EUEN_LASXEN
);
1351 switch (vcpu
->arch
.aux_inuse
& (KVM_LARCH_FPU
| KVM_LARCH_LSX
)) {
1353 case KVM_LARCH_LSX
| KVM_LARCH_FPU
:
1354 /* Guest LSX state already loaded, only restore upper LASX state */
1355 _restore_lasx_upper(&vcpu
->arch
.fpu
);
1358 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1359 _restore_lsx_upper(&vcpu
->arch
.fpu
);
1360 _restore_lasx_upper(&vcpu
->arch
.fpu
);
1363 /* Neither FP or LSX already active, restore full LASX state */
1364 kvm_restore_lasx(&vcpu
->arch
.fpu
);
1368 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_RESTORE
, KVM_TRACE_AUX_LASX
);
1369 vcpu
->arch
.aux_inuse
|= KVM_LARCH_LASX
| KVM_LARCH_LSX
| KVM_LARCH_FPU
;
1376 /* Save context and disable FPU */
1377 void kvm_lose_fpu(struct kvm_vcpu
*vcpu
)
1381 kvm_check_fcsr_alive(vcpu
);
1382 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LASX
) {
1383 kvm_save_lasx(&vcpu
->arch
.fpu
);
1384 vcpu
->arch
.aux_inuse
&= ~(KVM_LARCH_LSX
| KVM_LARCH_FPU
| KVM_LARCH_LASX
);
1385 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_LASX
);
1387 /* Disable LASX & LSX & FPU */
1388 clear_csr_euen(CSR_EUEN_FPEN
| CSR_EUEN_LSXEN
| CSR_EUEN_LASXEN
);
1389 } else if (vcpu
->arch
.aux_inuse
& KVM_LARCH_LSX
) {
1390 kvm_save_lsx(&vcpu
->arch
.fpu
);
1391 vcpu
->arch
.aux_inuse
&= ~(KVM_LARCH_LSX
| KVM_LARCH_FPU
);
1392 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_LSX
);
1394 /* Disable LSX & FPU */
1395 clear_csr_euen(CSR_EUEN_FPEN
| CSR_EUEN_LSXEN
);
1396 } else if (vcpu
->arch
.aux_inuse
& KVM_LARCH_FPU
) {
1397 kvm_save_fpu(&vcpu
->arch
.fpu
);
1398 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_FPU
;
1399 trace_kvm_aux(vcpu
, KVM_TRACE_AUX_SAVE
, KVM_TRACE_AUX_FPU
);
1402 clear_csr_euen(CSR_EUEN_FPEN
);
1409 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1411 int intr
= (int)irq
->irq
;
1414 kvm_queue_irq(vcpu
, intr
);
1416 kvm_dequeue_irq(vcpu
, -intr
);
1418 kvm_err("%s: invalid interrupt ioctl %d\n", __func__
, irq
->irq
);
1422 kvm_vcpu_kick(vcpu
);
1427 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
1428 unsigned int ioctl
, unsigned long arg
)
1430 void __user
*argp
= (void __user
*)arg
;
1431 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1433 if (ioctl
== KVM_INTERRUPT
) {
1434 struct kvm_interrupt irq
;
1436 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1439 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
, irq
.irq
);
1441 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1444 return -ENOIOCTLCMD
;
1447 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
1452 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
1454 unsigned long timer_hz
;
1455 struct loongarch_csrs
*csr
;
1457 vcpu
->arch
.vpid
= 0;
1458 vcpu
->arch
.flush_gpa
= INVALID_GPA
;
1460 hrtimer_init(&vcpu
->arch
.swtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED_HARD
);
1461 vcpu
->arch
.swtimer
.function
= kvm_swtimer_wakeup
;
1463 vcpu
->arch
.handle_exit
= kvm_handle_exit
;
1464 vcpu
->arch
.guest_eentry
= (unsigned long)kvm_loongarch_ops
->exc_entry
;
1465 vcpu
->arch
.csr
= kzalloc(sizeof(struct loongarch_csrs
), GFP_KERNEL
);
1466 if (!vcpu
->arch
.csr
)
1470 * All kvm exceptions share one exception entry, and host <-> guest
1471 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1473 vcpu
->arch
.host_ecfg
= (read_csr_ecfg() & CSR_ECFG_VS
);
1476 vcpu
->arch
.last_sched_cpu
= -1;
1478 /* Init ipi_state lock */
1479 spin_lock_init(&vcpu
->arch
.ipi_state
.lock
);
1482 * Initialize guest register state to valid architectural reset state.
1484 timer_hz
= calc_const_freq();
1485 kvm_init_timer(vcpu
, timer_hz
);
1487 /* Set Initialize mode for guest */
1488 csr
= vcpu
->arch
.csr
;
1489 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CRMD
, CSR_CRMD_DA
);
1492 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_TMID
, vcpu
->vcpu_id
);
1493 kvm_write_sw_gcsr(csr
, LOONGARCH_CSR_CPUID
, KVM_MAX_PHYID
);
1495 /* Start with no pending virtual guest interrupts */
1496 csr
->csrs
[LOONGARCH_CSR_GINTC
] = 0;
1501 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1505 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1508 struct kvm_context
*context
;
1510 hrtimer_cancel(&vcpu
->arch
.swtimer
);
1511 kvm_mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
1512 kvm_drop_cpuid(vcpu
);
1513 kfree(vcpu
->arch
.csr
);
1516 * If the vCPU is freed and reused as another vCPU, we don't want the
1517 * matching pointer wrongly hanging around in last_vcpu.
1519 for_each_possible_cpu(cpu
) {
1520 context
= per_cpu_ptr(vcpu
->kvm
->arch
.vmcs
, cpu
);
1521 if (context
->last_vcpu
== vcpu
)
1522 context
->last_vcpu
= NULL
;
1526 static int _kvm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1529 struct kvm_context
*context
;
1530 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
1533 * Have we migrated to a different CPU?
1534 * If so, any old guest TLB state may be stale.
1536 migrated
= (vcpu
->arch
.last_sched_cpu
!= cpu
);
1539 * Was this the last vCPU to run on this CPU?
1540 * If not, any old guest state from this vCPU will have been clobbered.
1542 context
= per_cpu_ptr(vcpu
->kvm
->arch
.vmcs
, cpu
);
1543 if (migrated
|| (context
->last_vcpu
!= vcpu
))
1544 vcpu
->arch
.aux_inuse
&= ~KVM_LARCH_HWCSR_USABLE
;
1545 context
->last_vcpu
= vcpu
;
1547 /* Restore timer state regardless */
1548 kvm_restore_timer(vcpu
);
1550 /* Control guest page CCA attribute */
1551 change_csr_gcfg(CSR_GCFG_MATC_MASK
, CSR_GCFG_MATC_ROOT
);
1552 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
1554 /* Restore hardware PMU CSRs */
1555 kvm_restore_pmu(vcpu
);
1557 /* Don't bother restoring registers multiple times unless necessary */
1558 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_HWCSR_USABLE
)
1561 write_csr_gcntc((ulong
)vcpu
->kvm
->arch
.time_offset
);
1563 /* Restore guest CSR registers */
1564 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_CRMD
);
1565 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PRMD
);
1566 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_EUEN
);
1567 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_MISC
);
1568 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ECFG
);
1569 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ERA
);
1570 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_BADV
);
1571 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_BADI
);
1572 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_EENTRY
);
1573 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBIDX
);
1574 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBEHI
);
1575 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO0
);
1576 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO1
);
1577 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_ASID
);
1578 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PGDL
);
1579 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PGDH
);
1580 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL0
);
1581 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL1
);
1582 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_STLBPGSIZE
);
1583 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_RVACFG
);
1584 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
1585 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS0
);
1586 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS1
);
1587 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS2
);
1588 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS3
);
1589 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS4
);
1590 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS5
);
1591 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS6
);
1592 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_KS7
);
1593 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TMID
);
1594 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_CNTC
);
1595 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRENTRY
);
1596 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRBADV
);
1597 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRERA
);
1598 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRSAVE
);
1599 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO0
);
1600 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO1
);
1601 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBREHI
);
1602 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_TLBRPRMD
);
1603 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN0
);
1604 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN1
);
1605 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN2
);
1606 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN3
);
1607 kvm_restore_hw_gcsr(csr
, LOONGARCH_CSR_LLBCTL
);
1609 /* Restore Root.GINTC from unused Guest.GINTC register */
1610 write_csr_gintc(csr
->csrs
[LOONGARCH_CSR_GINTC
]);
1613 * We should clear linked load bit to break interrupted atomics. This
1614 * prevents a SC on the next vCPU from succeeding by matching a LL on
1615 * the previous vCPU.
1617 if (vcpu
->kvm
->created_vcpus
> 1)
1618 set_gcsr_llbctl(CSR_LLBCTL_WCLLB
);
1620 vcpu
->arch
.aux_inuse
|= KVM_LARCH_HWCSR_USABLE
;
1625 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1627 unsigned long flags
;
1629 local_irq_save(flags
);
1630 /* Restore guest state to registers */
1631 _kvm_vcpu_load(vcpu
, cpu
);
1632 local_irq_restore(flags
);
1635 static int _kvm_vcpu_put(struct kvm_vcpu
*vcpu
, int cpu
)
1637 struct loongarch_csrs
*csr
= vcpu
->arch
.csr
;
1642 * Update CSR state from hardware if software CSR state is stale,
1643 * most CSR registers are kept unchanged during process context
1644 * switch except CSR registers like remaining timer tick value and
1645 * injected interrupt state.
1647 if (vcpu
->arch
.aux_inuse
& KVM_LARCH_SWCSR_LATEST
)
1650 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_CRMD
);
1651 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRMD
);
1652 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_EUEN
);
1653 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_MISC
);
1654 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ECFG
);
1655 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ERA
);
1656 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_BADV
);
1657 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_BADI
);
1658 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_EENTRY
);
1659 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBIDX
);
1660 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBEHI
);
1661 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO0
);
1662 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBELO1
);
1663 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_ASID
);
1664 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PGDL
);
1665 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PGDH
);
1666 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL0
);
1667 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PWCTL1
);
1668 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_STLBPGSIZE
);
1669 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_RVACFG
);
1670 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_CPUID
);
1671 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRCFG1
);
1672 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRCFG2
);
1673 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_PRCFG3
);
1674 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS0
);
1675 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS1
);
1676 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS2
);
1677 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS3
);
1678 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS4
);
1679 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS5
);
1680 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS6
);
1681 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_KS7
);
1682 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TMID
);
1683 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_CNTC
);
1684 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_LLBCTL
);
1685 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRENTRY
);
1686 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRBADV
);
1687 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRERA
);
1688 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRSAVE
);
1689 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO0
);
1690 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRELO1
);
1691 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBREHI
);
1692 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_TLBRPRMD
);
1693 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN0
);
1694 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN1
);
1695 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN2
);
1696 kvm_save_hw_gcsr(csr
, LOONGARCH_CSR_DMWIN3
);
1698 vcpu
->arch
.aux_inuse
|= KVM_LARCH_SWCSR_LATEST
;
1701 kvm_save_timer(vcpu
);
1702 /* Save Root.GINTC into unused Guest.GINTC register */
1703 csr
->csrs
[LOONGARCH_CSR_GINTC
] = read_csr_gintc();
1708 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1711 unsigned long flags
;
1713 local_irq_save(flags
);
1714 cpu
= smp_processor_id();
1715 vcpu
->arch
.last_sched_cpu
= cpu
;
1717 /* Save guest state in registers */
1718 _kvm_vcpu_put(vcpu
, cpu
);
1719 local_irq_restore(flags
);
1722 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
1725 struct kvm_run
*run
= vcpu
->run
;
1727 if (vcpu
->mmio_needed
) {
1728 if (!vcpu
->mmio_is_write
)
1729 kvm_complete_mmio_read(vcpu
, run
);
1730 vcpu
->mmio_needed
= 0;
1733 if (run
->exit_reason
== KVM_EXIT_LOONGARCH_IOCSR
) {
1734 if (!run
->iocsr_io
.is_write
)
1735 kvm_complete_iocsr_read(vcpu
, run
);
1738 if (!vcpu
->wants_to_run
)
1741 /* Clear exit_reason */
1742 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1745 kvm_sigset_activate(vcpu
);
1746 r
= kvm_pre_enter_guest(vcpu
);
1747 if (r
!= RESUME_GUEST
)
1750 guest_timing_enter_irqoff();
1751 guest_state_enter_irqoff();
1752 trace_kvm_enter(vcpu
);
1753 r
= kvm_loongarch_ops
->enter_guest(run
, vcpu
);
1755 trace_kvm_out(vcpu
);
1757 * Guest exit is already recorded at kvm_handle_exit()
1758 * return value must not be RESUME_GUEST
1762 kvm_sigset_deactivate(vcpu
);