1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/compiler.h>
8 #include <linux/irqchip/arm-gic-v3.h>
9 #include <linux/kvm_host.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
15 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
16 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
17 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
19 static u64 __hyp_text
__gic_v3_get_lr(unsigned int lr
)
23 return read_gicreg(ICH_LR0_EL2
);
25 return read_gicreg(ICH_LR1_EL2
);
27 return read_gicreg(ICH_LR2_EL2
);
29 return read_gicreg(ICH_LR3_EL2
);
31 return read_gicreg(ICH_LR4_EL2
);
33 return read_gicreg(ICH_LR5_EL2
);
35 return read_gicreg(ICH_LR6_EL2
);
37 return read_gicreg(ICH_LR7_EL2
);
39 return read_gicreg(ICH_LR8_EL2
);
41 return read_gicreg(ICH_LR9_EL2
);
43 return read_gicreg(ICH_LR10_EL2
);
45 return read_gicreg(ICH_LR11_EL2
);
47 return read_gicreg(ICH_LR12_EL2
);
49 return read_gicreg(ICH_LR13_EL2
);
51 return read_gicreg(ICH_LR14_EL2
);
53 return read_gicreg(ICH_LR15_EL2
);
59 static void __hyp_text
__gic_v3_set_lr(u64 val
, int lr
)
63 write_gicreg(val
, ICH_LR0_EL2
);
66 write_gicreg(val
, ICH_LR1_EL2
);
69 write_gicreg(val
, ICH_LR2_EL2
);
72 write_gicreg(val
, ICH_LR3_EL2
);
75 write_gicreg(val
, ICH_LR4_EL2
);
78 write_gicreg(val
, ICH_LR5_EL2
);
81 write_gicreg(val
, ICH_LR6_EL2
);
84 write_gicreg(val
, ICH_LR7_EL2
);
87 write_gicreg(val
, ICH_LR8_EL2
);
90 write_gicreg(val
, ICH_LR9_EL2
);
93 write_gicreg(val
, ICH_LR10_EL2
);
96 write_gicreg(val
, ICH_LR11_EL2
);
99 write_gicreg(val
, ICH_LR12_EL2
);
102 write_gicreg(val
, ICH_LR13_EL2
);
105 write_gicreg(val
, ICH_LR14_EL2
);
108 write_gicreg(val
, ICH_LR15_EL2
);
113 static void __hyp_text
__vgic_v3_write_ap0rn(u32 val
, int n
)
117 write_gicreg(val
, ICH_AP0R0_EL2
);
120 write_gicreg(val
, ICH_AP0R1_EL2
);
123 write_gicreg(val
, ICH_AP0R2_EL2
);
126 write_gicreg(val
, ICH_AP0R3_EL2
);
131 static void __hyp_text
__vgic_v3_write_ap1rn(u32 val
, int n
)
135 write_gicreg(val
, ICH_AP1R0_EL2
);
138 write_gicreg(val
, ICH_AP1R1_EL2
);
141 write_gicreg(val
, ICH_AP1R2_EL2
);
144 write_gicreg(val
, ICH_AP1R3_EL2
);
149 static u32 __hyp_text
__vgic_v3_read_ap0rn(int n
)
155 val
= read_gicreg(ICH_AP0R0_EL2
);
158 val
= read_gicreg(ICH_AP0R1_EL2
);
161 val
= read_gicreg(ICH_AP0R2_EL2
);
164 val
= read_gicreg(ICH_AP0R3_EL2
);
173 static u32 __hyp_text
__vgic_v3_read_ap1rn(int n
)
179 val
= read_gicreg(ICH_AP1R0_EL2
);
182 val
= read_gicreg(ICH_AP1R1_EL2
);
185 val
= read_gicreg(ICH_AP1R2_EL2
);
188 val
= read_gicreg(ICH_AP1R3_EL2
);
197 void __hyp_text
__vgic_v3_save_state(struct kvm_vcpu
*vcpu
)
199 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
200 u64 used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
203 * Make sure stores to the GIC via the memory mapped interface
204 * are now visible to the system register interface when reading the
205 * LRs, and when reading back the VMCR on non-VHE systems.
207 if (used_lrs
|| !has_vhe()) {
208 if (!cpu_if
->vgic_sre
) {
214 if (used_lrs
|| cpu_if
->its_vpe
.its_vm
) {
218 elrsr
= read_gicreg(ICH_ELRSR_EL2
);
220 write_gicreg(cpu_if
->vgic_hcr
& ~ICH_HCR_EN
, ICH_HCR_EL2
);
222 for (i
= 0; i
< used_lrs
; i
++) {
223 if (elrsr
& (1 << i
))
224 cpu_if
->vgic_lr
[i
] &= ~ICH_LR_STATE
;
226 cpu_if
->vgic_lr
[i
] = __gic_v3_get_lr(i
);
228 __gic_v3_set_lr(0, i
);
233 void __hyp_text
__vgic_v3_restore_state(struct kvm_vcpu
*vcpu
)
235 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
236 u64 used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
239 if (used_lrs
|| cpu_if
->its_vpe
.its_vm
) {
240 write_gicreg(cpu_if
->vgic_hcr
, ICH_HCR_EL2
);
242 for (i
= 0; i
< used_lrs
; i
++)
243 __gic_v3_set_lr(cpu_if
->vgic_lr
[i
], i
);
247 * Ensure that writes to the LRs, and on non-VHE systems ensure that
248 * the write to the VMCR in __vgic_v3_activate_traps(), will have
249 * reached the (re)distributors. This ensure the guest will read the
250 * correct values from the memory-mapped interface.
252 if (used_lrs
|| !has_vhe()) {
253 if (!cpu_if
->vgic_sre
) {
260 void __hyp_text
__vgic_v3_activate_traps(struct kvm_vcpu
*vcpu
)
262 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
265 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
266 * Group0 interrupt (as generated in GICv2 mode) to be
267 * delivered as a FIQ to the guest, with potentially fatal
268 * consequences. So we must make sure that ICC_SRE_EL1 has
269 * been actually programmed with the value we want before
270 * starting to mess with the rest of the GIC, and VMCR_EL2 in
271 * particular. This logic must be called before
272 * __vgic_v3_restore_state().
274 if (!cpu_if
->vgic_sre
) {
275 write_gicreg(0, ICC_SRE_EL1
);
277 write_gicreg(cpu_if
->vgic_vmcr
, ICH_VMCR_EL2
);
282 * Ensure that the write to the VMCR will have reached
283 * the (re)distributors. This ensure the guest will
284 * read the correct values from the memory-mapped
293 * Prevent the guest from touching the GIC system registers if
294 * SRE isn't enabled for GICv3 emulation.
296 write_gicreg(read_gicreg(ICC_SRE_EL2
) & ~ICC_SRE_EL2_ENABLE
,
300 * If we need to trap system registers, we must write
301 * ICH_HCR_EL2 anyway, even if no interrupts are being
304 if (static_branch_unlikely(&vgic_v3_cpuif_trap
) ||
305 cpu_if
->its_vpe
.its_vm
)
306 write_gicreg(cpu_if
->vgic_hcr
, ICH_HCR_EL2
);
309 void __hyp_text
__vgic_v3_deactivate_traps(struct kvm_vcpu
*vcpu
)
311 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
314 if (!cpu_if
->vgic_sre
) {
315 cpu_if
->vgic_vmcr
= read_gicreg(ICH_VMCR_EL2
);
318 val
= read_gicreg(ICC_SRE_EL2
);
319 write_gicreg(val
| ICC_SRE_EL2_ENABLE
, ICC_SRE_EL2
);
321 if (!cpu_if
->vgic_sre
) {
322 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
324 write_gicreg(1, ICC_SRE_EL1
);
328 * If we were trapping system registers, we enabled the VGIC even if
329 * no interrupts were being injected, and we disable it again here.
331 if (static_branch_unlikely(&vgic_v3_cpuif_trap
) ||
332 cpu_if
->its_vpe
.its_vm
)
333 write_gicreg(0, ICH_HCR_EL2
);
336 void __hyp_text
__vgic_v3_save_aprs(struct kvm_vcpu
*vcpu
)
338 struct vgic_v3_cpu_if
*cpu_if
;
342 vcpu
= kern_hyp_va(vcpu
);
343 cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
345 val
= read_gicreg(ICH_VTR_EL2
);
346 nr_pre_bits
= vtr_to_nr_pre_bits(val
);
348 switch (nr_pre_bits
) {
350 cpu_if
->vgic_ap0r
[3] = __vgic_v3_read_ap0rn(3);
351 cpu_if
->vgic_ap0r
[2] = __vgic_v3_read_ap0rn(2);
354 cpu_if
->vgic_ap0r
[1] = __vgic_v3_read_ap0rn(1);
357 cpu_if
->vgic_ap0r
[0] = __vgic_v3_read_ap0rn(0);
360 switch (nr_pre_bits
) {
362 cpu_if
->vgic_ap1r
[3] = __vgic_v3_read_ap1rn(3);
363 cpu_if
->vgic_ap1r
[2] = __vgic_v3_read_ap1rn(2);
366 cpu_if
->vgic_ap1r
[1] = __vgic_v3_read_ap1rn(1);
369 cpu_if
->vgic_ap1r
[0] = __vgic_v3_read_ap1rn(0);
373 void __hyp_text
__vgic_v3_restore_aprs(struct kvm_vcpu
*vcpu
)
375 struct vgic_v3_cpu_if
*cpu_if
;
379 vcpu
= kern_hyp_va(vcpu
);
380 cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
382 val
= read_gicreg(ICH_VTR_EL2
);
383 nr_pre_bits
= vtr_to_nr_pre_bits(val
);
385 switch (nr_pre_bits
) {
387 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[3], 3);
388 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[2], 2);
391 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[1], 1);
394 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[0], 0);
397 switch (nr_pre_bits
) {
399 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[3], 3);
400 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[2], 2);
403 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[1], 1);
406 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[0], 0);
410 void __hyp_text
__vgic_v3_init_lrs(void)
412 int max_lr_idx
= vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2
));
415 for (i
= 0; i
<= max_lr_idx
; i
++)
416 __gic_v3_set_lr(0, i
);
419 u64 __hyp_text
__vgic_v3_get_ich_vtr_el2(void)
421 return read_gicreg(ICH_VTR_EL2
);
424 u64 __hyp_text
__vgic_v3_read_vmcr(void)
426 return read_gicreg(ICH_VMCR_EL2
);
429 void __hyp_text
__vgic_v3_write_vmcr(u32 vmcr
)
431 write_gicreg(vmcr
, ICH_VMCR_EL2
);
436 static int __hyp_text
__vgic_v3_bpr_min(void)
438 /* See Pseudocode for VPriorityGroup */
439 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2
));
442 static int __hyp_text
__vgic_v3_get_group(struct kvm_vcpu
*vcpu
)
444 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
445 u8 crm
= (esr
& ESR_ELx_SYS64_ISS_CRM_MASK
) >> ESR_ELx_SYS64_ISS_CRM_SHIFT
;
450 #define GICv3_IDLE_PRIORITY 0xff
452 static int __hyp_text
__vgic_v3_highest_priority_lr(struct kvm_vcpu
*vcpu
,
456 unsigned int used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
457 u8 priority
= GICv3_IDLE_PRIORITY
;
460 for (i
= 0; i
< used_lrs
; i
++) {
461 u64 val
= __gic_v3_get_lr(i
);
462 u8 lr_prio
= (val
& ICH_LR_PRIORITY_MASK
) >> ICH_LR_PRIORITY_SHIFT
;
464 /* Not pending in the state? */
465 if ((val
& ICH_LR_STATE
) != ICH_LR_PENDING_BIT
)
468 /* Group-0 interrupt, but Group-0 disabled? */
469 if (!(val
& ICH_LR_GROUP
) && !(vmcr
& ICH_VMCR_ENG0_MASK
))
472 /* Group-1 interrupt, but Group-1 disabled? */
473 if ((val
& ICH_LR_GROUP
) && !(vmcr
& ICH_VMCR_ENG1_MASK
))
476 /* Not the highest priority? */
477 if (lr_prio
>= priority
)
480 /* This is a candidate */
487 *lr_val
= ICC_IAR1_EL1_SPURIOUS
;
492 static int __hyp_text
__vgic_v3_find_active_lr(struct kvm_vcpu
*vcpu
,
493 int intid
, u64
*lr_val
)
495 unsigned int used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
498 for (i
= 0; i
< used_lrs
; i
++) {
499 u64 val
= __gic_v3_get_lr(i
);
501 if ((val
& ICH_LR_VIRTUAL_ID_MASK
) == intid
&&
502 (val
& ICH_LR_ACTIVE_BIT
)) {
508 *lr_val
= ICC_IAR1_EL1_SPURIOUS
;
512 static int __hyp_text
__vgic_v3_get_highest_active_priority(void)
514 u8 nr_apr_regs
= vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2
));
518 for (i
= 0; i
< nr_apr_regs
; i
++) {
522 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
523 * contain the active priority levels for this VCPU
524 * for the maximum number of supported priority
525 * levels, and we return the full priority level only
526 * if the BPR is programmed to its minimum, otherwise
527 * we return a combination of the priority level and
528 * subpriority, as determined by the setting of the
529 * BPR, but without the full subpriority.
531 val
= __vgic_v3_read_ap0rn(i
);
532 val
|= __vgic_v3_read_ap1rn(i
);
538 return (hap
+ __ffs(val
)) << __vgic_v3_bpr_min();
541 return GICv3_IDLE_PRIORITY
;
544 static unsigned int __hyp_text
__vgic_v3_get_bpr0(u32 vmcr
)
546 return (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
549 static unsigned int __hyp_text
__vgic_v3_get_bpr1(u32 vmcr
)
553 if (vmcr
& ICH_VMCR_CBPR_MASK
) {
554 bpr
= __vgic_v3_get_bpr0(vmcr
);
558 bpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
565 * Convert a priority to a preemption level, taking the relevant BPR
566 * into account by zeroing the sub-priority bits.
568 static u8 __hyp_text
__vgic_v3_pri_to_pre(u8 pri
, u32 vmcr
, int grp
)
573 bpr
= __vgic_v3_get_bpr0(vmcr
) + 1;
575 bpr
= __vgic_v3_get_bpr1(vmcr
);
577 return pri
& (GENMASK(7, 0) << bpr
);
581 * The priority value is independent of any of the BPR values, so we
582 * normalize it using the minumal BPR value. This guarantees that no
583 * matter what the guest does with its BPR, we can always set/get the
584 * same value of a priority.
586 static void __hyp_text
__vgic_v3_set_active_priority(u8 pri
, u32 vmcr
, int grp
)
592 pre
= __vgic_v3_pri_to_pre(pri
, vmcr
, grp
);
593 ap
= pre
>> __vgic_v3_bpr_min();
597 val
= __vgic_v3_read_ap0rn(apr
);
598 __vgic_v3_write_ap0rn(val
| BIT(ap
% 32), apr
);
600 val
= __vgic_v3_read_ap1rn(apr
);
601 __vgic_v3_write_ap1rn(val
| BIT(ap
% 32), apr
);
605 static int __hyp_text
__vgic_v3_clear_highest_active_priority(void)
607 u8 nr_apr_regs
= vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2
));
611 for (i
= 0; i
< nr_apr_regs
; i
++) {
615 ap0
= __vgic_v3_read_ap0rn(i
);
616 ap1
= __vgic_v3_read_ap1rn(i
);
622 c0
= ap0
? __ffs(ap0
) : 32;
623 c1
= ap1
? __ffs(ap1
) : 32;
625 /* Always clear the LSB, which is the highest priority */
628 __vgic_v3_write_ap0rn(ap0
, i
);
632 __vgic_v3_write_ap1rn(ap1
, i
);
636 /* Rescale to 8 bits of priority */
637 return hap
<< __vgic_v3_bpr_min();
640 return GICv3_IDLE_PRIORITY
;
643 static void __hyp_text
__vgic_v3_read_iar(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
649 grp
= __vgic_v3_get_group(vcpu
);
651 lr
= __vgic_v3_highest_priority_lr(vcpu
, vmcr
, &lr_val
);
655 if (grp
!= !!(lr_val
& ICH_LR_GROUP
))
658 pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
659 lr_prio
= (lr_val
& ICH_LR_PRIORITY_MASK
) >> ICH_LR_PRIORITY_SHIFT
;
663 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio
, vmcr
, grp
))
666 lr_val
&= ~ICH_LR_STATE
;
667 /* No active state for LPIs */
668 if ((lr_val
& ICH_LR_VIRTUAL_ID_MASK
) <= VGIC_MAX_SPI
)
669 lr_val
|= ICH_LR_ACTIVE_BIT
;
670 __gic_v3_set_lr(lr_val
, lr
);
671 __vgic_v3_set_active_priority(lr_prio
, vmcr
, grp
);
672 vcpu_set_reg(vcpu
, rt
, lr_val
& ICH_LR_VIRTUAL_ID_MASK
);
676 vcpu_set_reg(vcpu
, rt
, ICC_IAR1_EL1_SPURIOUS
);
679 static void __hyp_text
__vgic_v3_clear_active_lr(int lr
, u64 lr_val
)
681 lr_val
&= ~ICH_LR_ACTIVE_BIT
;
682 if (lr_val
& ICH_LR_HW
) {
685 pid
= (lr_val
& ICH_LR_PHYS_ID_MASK
) >> ICH_LR_PHYS_ID_SHIFT
;
689 __gic_v3_set_lr(lr_val
, lr
);
692 static void __hyp_text
__vgic_v3_bump_eoicount(void)
696 hcr
= read_gicreg(ICH_HCR_EL2
);
697 hcr
+= 1 << ICH_HCR_EOIcount_SHIFT
;
698 write_gicreg(hcr
, ICH_HCR_EL2
);
701 static void __hyp_text
__vgic_v3_write_dir(struct kvm_vcpu
*vcpu
,
704 u32 vid
= vcpu_get_reg(vcpu
, rt
);
708 /* EOImode == 0, nothing to be done here */
709 if (!(vmcr
& ICH_VMCR_EOIM_MASK
))
712 /* No deactivate to be performed on an LPI */
713 if (vid
>= VGIC_MIN_LPI
)
716 lr
= __vgic_v3_find_active_lr(vcpu
, vid
, &lr_val
);
718 __vgic_v3_bump_eoicount();
722 __vgic_v3_clear_active_lr(lr
, lr_val
);
725 static void __hyp_text
__vgic_v3_write_eoir(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
727 u32 vid
= vcpu_get_reg(vcpu
, rt
);
729 u8 lr_prio
, act_prio
;
732 grp
= __vgic_v3_get_group(vcpu
);
734 /* Drop priority in any case */
735 act_prio
= __vgic_v3_clear_highest_active_priority();
737 /* If EOIing an LPI, no deactivate to be performed */
738 if (vid
>= VGIC_MIN_LPI
)
741 /* EOImode == 1, nothing to be done here */
742 if (vmcr
& ICH_VMCR_EOIM_MASK
)
745 lr
= __vgic_v3_find_active_lr(vcpu
, vid
, &lr_val
);
747 __vgic_v3_bump_eoicount();
751 lr_prio
= (lr_val
& ICH_LR_PRIORITY_MASK
) >> ICH_LR_PRIORITY_SHIFT
;
753 /* If priorities or group do not match, the guest has fscked-up. */
754 if (grp
!= !!(lr_val
& ICH_LR_GROUP
) ||
755 __vgic_v3_pri_to_pre(lr_prio
, vmcr
, grp
) != act_prio
)
758 /* Let's now perform the deactivation */
759 __vgic_v3_clear_active_lr(lr
, lr_val
);
762 static void __hyp_text
__vgic_v3_read_igrpen0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
764 vcpu_set_reg(vcpu
, rt
, !!(vmcr
& ICH_VMCR_ENG0_MASK
));
767 static void __hyp_text
__vgic_v3_read_igrpen1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
769 vcpu_set_reg(vcpu
, rt
, !!(vmcr
& ICH_VMCR_ENG1_MASK
));
772 static void __hyp_text
__vgic_v3_write_igrpen0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
774 u64 val
= vcpu_get_reg(vcpu
, rt
);
777 vmcr
|= ICH_VMCR_ENG0_MASK
;
779 vmcr
&= ~ICH_VMCR_ENG0_MASK
;
781 __vgic_v3_write_vmcr(vmcr
);
784 static void __hyp_text
__vgic_v3_write_igrpen1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
786 u64 val
= vcpu_get_reg(vcpu
, rt
);
789 vmcr
|= ICH_VMCR_ENG1_MASK
;
791 vmcr
&= ~ICH_VMCR_ENG1_MASK
;
793 __vgic_v3_write_vmcr(vmcr
);
796 static void __hyp_text
__vgic_v3_read_bpr0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
798 vcpu_set_reg(vcpu
, rt
, __vgic_v3_get_bpr0(vmcr
));
801 static void __hyp_text
__vgic_v3_read_bpr1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
803 vcpu_set_reg(vcpu
, rt
, __vgic_v3_get_bpr1(vmcr
));
806 static void __hyp_text
__vgic_v3_write_bpr0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
808 u64 val
= vcpu_get_reg(vcpu
, rt
);
809 u8 bpr_min
= __vgic_v3_bpr_min() - 1;
811 /* Enforce BPR limiting */
815 val
<<= ICH_VMCR_BPR0_SHIFT
;
816 val
&= ICH_VMCR_BPR0_MASK
;
817 vmcr
&= ~ICH_VMCR_BPR0_MASK
;
820 __vgic_v3_write_vmcr(vmcr
);
823 static void __hyp_text
__vgic_v3_write_bpr1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
825 u64 val
= vcpu_get_reg(vcpu
, rt
);
826 u8 bpr_min
= __vgic_v3_bpr_min();
828 if (vmcr
& ICH_VMCR_CBPR_MASK
)
831 /* Enforce BPR limiting */
835 val
<<= ICH_VMCR_BPR1_SHIFT
;
836 val
&= ICH_VMCR_BPR1_MASK
;
837 vmcr
&= ~ICH_VMCR_BPR1_MASK
;
840 __vgic_v3_write_vmcr(vmcr
);
843 static void __hyp_text
__vgic_v3_read_apxrn(struct kvm_vcpu
*vcpu
, int rt
, int n
)
847 if (!__vgic_v3_get_group(vcpu
))
848 val
= __vgic_v3_read_ap0rn(n
);
850 val
= __vgic_v3_read_ap1rn(n
);
852 vcpu_set_reg(vcpu
, rt
, val
);
855 static void __hyp_text
__vgic_v3_write_apxrn(struct kvm_vcpu
*vcpu
, int rt
, int n
)
857 u32 val
= vcpu_get_reg(vcpu
, rt
);
859 if (!__vgic_v3_get_group(vcpu
))
860 __vgic_v3_write_ap0rn(val
, n
);
862 __vgic_v3_write_ap1rn(val
, n
);
865 static void __hyp_text
__vgic_v3_read_apxr0(struct kvm_vcpu
*vcpu
,
868 __vgic_v3_read_apxrn(vcpu
, rt
, 0);
871 static void __hyp_text
__vgic_v3_read_apxr1(struct kvm_vcpu
*vcpu
,
874 __vgic_v3_read_apxrn(vcpu
, rt
, 1);
877 static void __hyp_text
__vgic_v3_read_apxr2(struct kvm_vcpu
*vcpu
,
880 __vgic_v3_read_apxrn(vcpu
, rt
, 2);
883 static void __hyp_text
__vgic_v3_read_apxr3(struct kvm_vcpu
*vcpu
,
886 __vgic_v3_read_apxrn(vcpu
, rt
, 3);
889 static void __hyp_text
__vgic_v3_write_apxr0(struct kvm_vcpu
*vcpu
,
892 __vgic_v3_write_apxrn(vcpu
, rt
, 0);
895 static void __hyp_text
__vgic_v3_write_apxr1(struct kvm_vcpu
*vcpu
,
898 __vgic_v3_write_apxrn(vcpu
, rt
, 1);
901 static void __hyp_text
__vgic_v3_write_apxr2(struct kvm_vcpu
*vcpu
,
904 __vgic_v3_write_apxrn(vcpu
, rt
, 2);
907 static void __hyp_text
__vgic_v3_write_apxr3(struct kvm_vcpu
*vcpu
,
910 __vgic_v3_write_apxrn(vcpu
, rt
, 3);
913 static void __hyp_text
__vgic_v3_read_hppir(struct kvm_vcpu
*vcpu
,
919 grp
= __vgic_v3_get_group(vcpu
);
921 lr
= __vgic_v3_highest_priority_lr(vcpu
, vmcr
, &lr_val
);
925 lr_grp
= !!(lr_val
& ICH_LR_GROUP
);
927 lr_val
= ICC_IAR1_EL1_SPURIOUS
;
930 vcpu_set_reg(vcpu
, rt
, lr_val
& ICH_LR_VIRTUAL_ID_MASK
);
933 static void __hyp_text
__vgic_v3_read_pmr(struct kvm_vcpu
*vcpu
,
936 vmcr
&= ICH_VMCR_PMR_MASK
;
937 vmcr
>>= ICH_VMCR_PMR_SHIFT
;
938 vcpu_set_reg(vcpu
, rt
, vmcr
);
941 static void __hyp_text
__vgic_v3_write_pmr(struct kvm_vcpu
*vcpu
,
944 u32 val
= vcpu_get_reg(vcpu
, rt
);
946 val
<<= ICH_VMCR_PMR_SHIFT
;
947 val
&= ICH_VMCR_PMR_MASK
;
948 vmcr
&= ~ICH_VMCR_PMR_MASK
;
951 write_gicreg(vmcr
, ICH_VMCR_EL2
);
954 static void __hyp_text
__vgic_v3_read_rpr(struct kvm_vcpu
*vcpu
,
957 u32 val
= __vgic_v3_get_highest_active_priority();
958 vcpu_set_reg(vcpu
, rt
, val
);
961 static void __hyp_text
__vgic_v3_read_ctlr(struct kvm_vcpu
*vcpu
,
966 vtr
= read_gicreg(ICH_VTR_EL2
);
968 val
= ((vtr
>> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT
;
970 val
|= ((vtr
>> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT
;
972 val
|= ((vtr
>> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT
;
974 val
|= ((vtr
>> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT
;
976 val
|= ((vmcr
& ICH_VMCR_EOIM_MASK
) >> ICH_VMCR_EOIM_SHIFT
) << ICC_CTLR_EL1_EOImode_SHIFT
;
978 val
|= (vmcr
& ICH_VMCR_CBPR_MASK
) >> ICH_VMCR_CBPR_SHIFT
;
980 vcpu_set_reg(vcpu
, rt
, val
);
983 static void __hyp_text
__vgic_v3_write_ctlr(struct kvm_vcpu
*vcpu
,
986 u32 val
= vcpu_get_reg(vcpu
, rt
);
988 if (val
& ICC_CTLR_EL1_CBPR_MASK
)
989 vmcr
|= ICH_VMCR_CBPR_MASK
;
991 vmcr
&= ~ICH_VMCR_CBPR_MASK
;
993 if (val
& ICC_CTLR_EL1_EOImode_MASK
)
994 vmcr
|= ICH_VMCR_EOIM_MASK
;
996 vmcr
&= ~ICH_VMCR_EOIM_MASK
;
998 write_gicreg(vmcr
, ICH_VMCR_EL2
);
1001 int __hyp_text
__vgic_v3_perform_cpuif_access(struct kvm_vcpu
*vcpu
)
1006 void (*fn
)(struct kvm_vcpu
*, u32
, int);
1010 esr
= kvm_vcpu_get_hsr(vcpu
);
1011 if (vcpu_mode_is_32bit(vcpu
)) {
1012 if (!kvm_condition_valid(vcpu
)) {
1013 __kvm_skip_instr(vcpu
);
1017 sysreg
= esr_cp15_to_sysreg(esr
);
1019 sysreg
= esr_sys64_to_sysreg(esr
);
1022 is_read
= (esr
& ESR_ELx_SYS64_ISS_DIR_MASK
) == ESR_ELx_SYS64_ISS_DIR_READ
;
1025 case SYS_ICC_IAR0_EL1
:
1026 case SYS_ICC_IAR1_EL1
:
1027 if (unlikely(!is_read
))
1029 fn
= __vgic_v3_read_iar
;
1031 case SYS_ICC_EOIR0_EL1
:
1032 case SYS_ICC_EOIR1_EL1
:
1033 if (unlikely(is_read
))
1035 fn
= __vgic_v3_write_eoir
;
1037 case SYS_ICC_IGRPEN1_EL1
:
1039 fn
= __vgic_v3_read_igrpen1
;
1041 fn
= __vgic_v3_write_igrpen1
;
1043 case SYS_ICC_BPR1_EL1
:
1045 fn
= __vgic_v3_read_bpr1
;
1047 fn
= __vgic_v3_write_bpr1
;
1049 case SYS_ICC_AP0Rn_EL1(0):
1050 case SYS_ICC_AP1Rn_EL1(0):
1052 fn
= __vgic_v3_read_apxr0
;
1054 fn
= __vgic_v3_write_apxr0
;
1056 case SYS_ICC_AP0Rn_EL1(1):
1057 case SYS_ICC_AP1Rn_EL1(1):
1059 fn
= __vgic_v3_read_apxr1
;
1061 fn
= __vgic_v3_write_apxr1
;
1063 case SYS_ICC_AP0Rn_EL1(2):
1064 case SYS_ICC_AP1Rn_EL1(2):
1066 fn
= __vgic_v3_read_apxr2
;
1068 fn
= __vgic_v3_write_apxr2
;
1070 case SYS_ICC_AP0Rn_EL1(3):
1071 case SYS_ICC_AP1Rn_EL1(3):
1073 fn
= __vgic_v3_read_apxr3
;
1075 fn
= __vgic_v3_write_apxr3
;
1077 case SYS_ICC_HPPIR0_EL1
:
1078 case SYS_ICC_HPPIR1_EL1
:
1079 if (unlikely(!is_read
))
1081 fn
= __vgic_v3_read_hppir
;
1083 case SYS_ICC_IGRPEN0_EL1
:
1085 fn
= __vgic_v3_read_igrpen0
;
1087 fn
= __vgic_v3_write_igrpen0
;
1089 case SYS_ICC_BPR0_EL1
:
1091 fn
= __vgic_v3_read_bpr0
;
1093 fn
= __vgic_v3_write_bpr0
;
1095 case SYS_ICC_DIR_EL1
:
1096 if (unlikely(is_read
))
1098 fn
= __vgic_v3_write_dir
;
1100 case SYS_ICC_RPR_EL1
:
1101 if (unlikely(!is_read
))
1103 fn
= __vgic_v3_read_rpr
;
1105 case SYS_ICC_CTLR_EL1
:
1107 fn
= __vgic_v3_read_ctlr
;
1109 fn
= __vgic_v3_write_ctlr
;
1111 case SYS_ICC_PMR_EL1
:
1113 fn
= __vgic_v3_read_pmr
;
1115 fn
= __vgic_v3_write_pmr
;
1121 vmcr
= __vgic_v3_read_vmcr();
1122 rt
= kvm_vcpu_sys_get_rt(vcpu
);
1125 __kvm_skip_instr(vcpu
);