1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/irqchip/arm-gic-v3.h>
5 #include <linux/kvm_host.h>
6 #include <kvm/arm_vgic.h>
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/kvm_asm.h>
13 static bool group0_trap
;
14 static bool group1_trap
;
15 static bool common_trap
;
16 static bool gicv4_enable
;
18 void vgic_v3_set_underflow(struct kvm_vcpu
*vcpu
)
20 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
22 cpuif
->vgic_hcr
|= ICH_HCR_UIE
;
25 static bool lr_signals_eoi_mi(u64 lr_val
)
27 return !(lr_val
& ICH_LR_STATE
) && (lr_val
& ICH_LR_EOI
) &&
28 !(lr_val
& ICH_LR_HW
);
31 void vgic_v3_fold_lr_state(struct kvm_vcpu
*vcpu
)
33 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
34 struct vgic_v3_cpu_if
*cpuif
= &vgic_cpu
->vgic_v3
;
35 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
38 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
40 cpuif
->vgic_hcr
&= ~ICH_HCR_UIE
;
42 for (lr
= 0; lr
< vgic_cpu
->used_lrs
; lr
++) {
43 u64 val
= cpuif
->vgic_lr
[lr
];
46 bool is_v2_sgi
= false;
48 cpuid
= val
& GICH_LR_PHYSID_CPUID
;
49 cpuid
>>= GICH_LR_PHYSID_CPUID_SHIFT
;
51 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
52 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
54 intid
= val
& GICH_LR_VIRTUALID
;
55 is_v2_sgi
= vgic_irq_is_sgi(intid
);
58 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
59 if (lr_signals_eoi_mi(val
) && vgic_valid_spi(vcpu
->kvm
, intid
))
60 kvm_notify_acked_irq(vcpu
->kvm
, 0,
61 intid
- VGIC_NR_PRIVATE_IRQS
);
63 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
);
64 if (!irq
) /* An LPI could have been unmapped. */
67 raw_spin_lock(&irq
->irq_lock
);
69 /* Always preserve the active bit */
70 irq
->active
= !!(val
& ICH_LR_ACTIVE_BIT
);
72 if (irq
->active
&& is_v2_sgi
)
73 irq
->active_source
= cpuid
;
75 /* Edge is the only case where we preserve the pending bit */
76 if (irq
->config
== VGIC_CONFIG_EDGE
&&
77 (val
& ICH_LR_PENDING_BIT
)) {
78 irq
->pending_latch
= true;
81 irq
->source
|= (1 << cpuid
);
85 * Clear soft pending state when level irqs have been acked.
87 if (irq
->config
== VGIC_CONFIG_LEVEL
&& !(val
& ICH_LR_STATE
))
88 irq
->pending_latch
= false;
91 * Level-triggered mapped IRQs are special because we only
92 * observe rising edges as input to the VGIC.
94 * If the guest never acked the interrupt we have to sample
95 * the physical line and set the line level, because the
96 * device state could have changed or we simply need to
97 * process the still pending interrupt later.
99 * If this causes us to lower the level, we have to also clear
100 * the physical active state, since we will otherwise never be
101 * told when the interrupt becomes asserted again.
103 if (vgic_irq_is_mapped_level(irq
) && (val
& ICH_LR_PENDING_BIT
)) {
104 irq
->line_level
= vgic_get_phys_line_level(irq
);
106 if (!irq
->line_level
)
107 vgic_irq_set_phys_active(irq
, false);
110 raw_spin_unlock(&irq
->irq_lock
);
111 vgic_put_irq(vcpu
->kvm
, irq
);
114 vgic_cpu
->used_lrs
= 0;
117 /* Requires the irq to be locked already */
118 void vgic_v3_populate_lr(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
, int lr
)
120 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
121 u64 val
= irq
->intid
;
122 bool allow_pending
= true, is_v2_sgi
;
124 is_v2_sgi
= (vgic_irq_is_sgi(irq
->intid
) &&
125 model
== KVM_DEV_TYPE_ARM_VGIC_V2
);
128 val
|= ICH_LR_ACTIVE_BIT
;
130 val
|= irq
->active_source
<< GICH_LR_PHYSID_CPUID_SHIFT
;
131 if (vgic_irq_is_multi_sgi(irq
)) {
132 allow_pending
= false;
139 val
|= ((u64
)irq
->hwintid
) << ICH_LR_PHYS_ID_SHIFT
;
141 * Never set pending+active on a HW interrupt, as the
142 * pending state is kept at the physical distributor
146 allow_pending
= false;
148 if (irq
->config
== VGIC_CONFIG_LEVEL
) {
152 * Software resampling doesn't work very well
153 * if we allow P+A, so let's not do that.
156 allow_pending
= false;
160 if (allow_pending
&& irq_is_pending(irq
)) {
161 val
|= ICH_LR_PENDING_BIT
;
163 if (irq
->config
== VGIC_CONFIG_EDGE
)
164 irq
->pending_latch
= false;
166 if (vgic_irq_is_sgi(irq
->intid
) &&
167 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
168 u32 src
= ffs(irq
->source
);
170 if (WARN_RATELIMIT(!src
, "No SGI source for INTID %d\n",
174 val
|= (src
- 1) << GICH_LR_PHYSID_CPUID_SHIFT
;
175 irq
->source
&= ~(1 << (src
- 1));
177 irq
->pending_latch
= true;
184 * Level-triggered mapped IRQs are special because we only observe
185 * rising edges as input to the VGIC. We therefore lower the line
186 * level here, so that we can take new virtual IRQs. See
187 * vgic_v3_fold_lr_state for more info.
189 if (vgic_irq_is_mapped_level(irq
) && (val
& ICH_LR_PENDING_BIT
))
190 irq
->line_level
= false;
195 val
|= (u64
)irq
->priority
<< ICH_LR_PRIORITY_SHIFT
;
197 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = val
;
200 void vgic_v3_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
202 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = 0;
205 void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
207 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
208 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
211 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
212 vmcr
= (vmcrp
->ackctl
<< ICH_VMCR_ACK_CTL_SHIFT
) &
213 ICH_VMCR_ACK_CTL_MASK
;
214 vmcr
|= (vmcrp
->fiqen
<< ICH_VMCR_FIQ_EN_SHIFT
) &
215 ICH_VMCR_FIQ_EN_MASK
;
218 * When emulating GICv3 on GICv3 with SRE=1 on the
219 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
221 vmcr
= ICH_VMCR_FIQ_EN_MASK
;
224 vmcr
|= (vmcrp
->cbpr
<< ICH_VMCR_CBPR_SHIFT
) & ICH_VMCR_CBPR_MASK
;
225 vmcr
|= (vmcrp
->eoim
<< ICH_VMCR_EOIM_SHIFT
) & ICH_VMCR_EOIM_MASK
;
226 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
227 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
228 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
229 vmcr
|= (vmcrp
->grpen0
<< ICH_VMCR_ENG0_SHIFT
) & ICH_VMCR_ENG0_MASK
;
230 vmcr
|= (vmcrp
->grpen1
<< ICH_VMCR_ENG1_SHIFT
) & ICH_VMCR_ENG1_MASK
;
232 cpu_if
->vgic_vmcr
= vmcr
;
235 void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
237 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
238 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
241 vmcr
= cpu_if
->vgic_vmcr
;
243 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
244 vmcrp
->ackctl
= (vmcr
& ICH_VMCR_ACK_CTL_MASK
) >>
245 ICH_VMCR_ACK_CTL_SHIFT
;
246 vmcrp
->fiqen
= (vmcr
& ICH_VMCR_FIQ_EN_MASK
) >>
247 ICH_VMCR_FIQ_EN_SHIFT
;
250 * When emulating GICv3 on GICv3 with SRE=1 on the
251 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
257 vmcrp
->cbpr
= (vmcr
& ICH_VMCR_CBPR_MASK
) >> ICH_VMCR_CBPR_SHIFT
;
258 vmcrp
->eoim
= (vmcr
& ICH_VMCR_EOIM_MASK
) >> ICH_VMCR_EOIM_SHIFT
;
259 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
260 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
261 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
262 vmcrp
->grpen0
= (vmcr
& ICH_VMCR_ENG0_MASK
) >> ICH_VMCR_ENG0_SHIFT
;
263 vmcrp
->grpen1
= (vmcr
& ICH_VMCR_ENG1_MASK
) >> ICH_VMCR_ENG1_SHIFT
;
266 #define INITIAL_PENDBASER_VALUE \
267 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
268 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
269 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
271 void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
273 struct vgic_v3_cpu_if
*vgic_v3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
276 * By forcing VMCR to zero, the GIC will restore the binary
277 * points to their reset values. Anything else resets to zero
280 vgic_v3
->vgic_vmcr
= 0;
283 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
284 * way, so we force SRE to 1 to demonstrate this to the guest.
285 * Also, we don't support any form of IRQ/FIQ bypass.
286 * This goes with the spec allowing the value to be RAO/WI.
288 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
289 vgic_v3
->vgic_sre
= (ICC_SRE_EL1_DIB
|
292 vcpu
->arch
.vgic_cpu
.pendbaser
= INITIAL_PENDBASER_VALUE
;
294 vgic_v3
->vgic_sre
= 0;
297 vcpu
->arch
.vgic_cpu
.num_id_bits
= (kvm_vgic_global_state
.ich_vtr_el2
&
298 ICH_VTR_ID_BITS_MASK
) >>
299 ICH_VTR_ID_BITS_SHIFT
;
300 vcpu
->arch
.vgic_cpu
.num_pri_bits
= ((kvm_vgic_global_state
.ich_vtr_el2
&
301 ICH_VTR_PRI_BITS_MASK
) >>
302 ICH_VTR_PRI_BITS_SHIFT
) + 1;
304 /* Get the show on the road... */
305 vgic_v3
->vgic_hcr
= ICH_HCR_EN
;
307 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL0
;
309 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL1
;
311 vgic_v3
->vgic_hcr
|= ICH_HCR_TC
;
314 int vgic_v3_lpi_sync_pending_status(struct kvm
*kvm
, struct vgic_irq
*irq
)
316 struct kvm_vcpu
*vcpu
;
317 int byte_offset
, bit_nr
;
325 vcpu
= irq
->target_vcpu
;
329 pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
331 byte_offset
= irq
->intid
/ BITS_PER_BYTE
;
332 bit_nr
= irq
->intid
% BITS_PER_BYTE
;
333 ptr
= pendbase
+ byte_offset
;
335 ret
= kvm_read_guest_lock(kvm
, ptr
, &val
, 1);
339 status
= val
& (1 << bit_nr
);
341 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
342 if (irq
->target_vcpu
!= vcpu
) {
343 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
346 irq
->pending_latch
= status
;
347 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
350 /* clear consumed data */
351 val
&= ~(1 << bit_nr
);
352 ret
= kvm_write_guest_lock(kvm
, ptr
, &val
, 1);
360 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
361 * kvm lock and all vcpu lock must be held
363 int vgic_v3_save_pending_tables(struct kvm
*kvm
)
365 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
366 struct vgic_irq
*irq
;
367 gpa_t last_ptr
= ~(gpa_t
)0;
371 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
372 int byte_offset
, bit_nr
;
373 struct kvm_vcpu
*vcpu
;
377 vcpu
= irq
->target_vcpu
;
381 pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
383 byte_offset
= irq
->intid
/ BITS_PER_BYTE
;
384 bit_nr
= irq
->intid
% BITS_PER_BYTE
;
385 ptr
= pendbase
+ byte_offset
;
387 if (ptr
!= last_ptr
) {
388 ret
= kvm_read_guest_lock(kvm
, ptr
, &val
, 1);
394 stored
= val
& (1U << bit_nr
);
395 if (stored
== irq
->pending_latch
)
398 if (irq
->pending_latch
)
401 val
&= ~(1 << bit_nr
);
403 ret
= kvm_write_guest_lock(kvm
, ptr
, &val
, 1);
411 * vgic_v3_rdist_overlap - check if a region overlaps with any
412 * existing redistributor region
415 * @base: base of the region
416 * @size: size of region
418 * Return: true if there is an overlap
420 bool vgic_v3_rdist_overlap(struct kvm
*kvm
, gpa_t base
, size_t size
)
422 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
423 struct vgic_redist_region
*rdreg
;
425 list_for_each_entry(rdreg
, &d
->rd_regions
, list
) {
426 if ((base
+ size
> rdreg
->base
) &&
427 (base
< rdreg
->base
+ vgic_v3_rd_region_size(kvm
, rdreg
)))
434 * Check for overlapping regions and for regions crossing the end of memory
435 * for base addresses which have already been set.
437 bool vgic_v3_check_base(struct kvm
*kvm
)
439 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
440 struct vgic_redist_region
*rdreg
;
442 if (!IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
) &&
443 d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
< d
->vgic_dist_base
)
446 list_for_each_entry(rdreg
, &d
->rd_regions
, list
) {
447 if (rdreg
->base
+ vgic_v3_rd_region_size(kvm
, rdreg
) <
452 if (IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
))
455 return !vgic_v3_rdist_overlap(kvm
, d
->vgic_dist_base
,
456 KVM_VGIC_V3_DIST_SIZE
);
460 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
461 * which has free space to put a new rdist region.
463 * @rd_regions: redistributor region list head
465 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
466 * Stride between redistributors is 0 and regions are filled in the index order.
468 * Return: the redist region handle, if any, that has space to map a new rdist
471 struct vgic_redist_region
*vgic_v3_rdist_free_slot(struct list_head
*rd_regions
)
473 struct vgic_redist_region
*rdreg
;
475 list_for_each_entry(rdreg
, rd_regions
, list
) {
476 if (!vgic_v3_redist_region_full(rdreg
))
482 struct vgic_redist_region
*vgic_v3_rdist_region_from_index(struct kvm
*kvm
,
485 struct list_head
*rd_regions
= &kvm
->arch
.vgic
.rd_regions
;
486 struct vgic_redist_region
*rdreg
;
488 list_for_each_entry(rdreg
, rd_regions
, list
) {
489 if (rdreg
->index
== index
)
496 int vgic_v3_map_resources(struct kvm
*kvm
)
498 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
499 struct kvm_vcpu
*vcpu
;
506 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
507 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
509 if (IS_VGIC_ADDR_UNDEF(vgic_cpu
->rd_iodev
.base_addr
)) {
510 kvm_debug("vcpu %d redistributor base not set\n", c
);
516 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
)) {
517 kvm_err("Need to set vgic distributor addresses first\n");
522 if (!vgic_v3_check_base(kvm
)) {
523 kvm_err("VGIC redist and dist frames overlap\n");
529 * For a VGICv3 we require the userland to explicitly initialize
530 * the VGIC before we need to use it.
532 if (!vgic_initialized(kvm
)) {
537 ret
= vgic_register_dist_iodev(kvm
, dist
->vgic_dist_base
, VGIC_V3
);
539 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
549 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap
);
551 static int __init
early_group0_trap_cfg(char *buf
)
553 return strtobool(buf
, &group0_trap
);
555 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg
);
557 static int __init
early_group1_trap_cfg(char *buf
)
559 return strtobool(buf
, &group1_trap
);
561 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg
);
563 static int __init
early_common_trap_cfg(char *buf
)
565 return strtobool(buf
, &common_trap
);
567 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg
);
569 static int __init
early_gicv4_enable(char *buf
)
571 return strtobool(buf
, &gicv4_enable
);
573 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable
);
576 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
577 * @info: pointer to the GIC description
579 * Returns 0 if the VGICv3 has been probed successfully, returns an error code
582 int vgic_v3_probe(const struct gic_kvm_info
*info
)
584 u32 ich_vtr_el2
= kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2
);
588 * The ListRegs field is 5 bits, but there is a architectural
589 * maximum of 16 list registers. Just ignore bit 4...
591 kvm_vgic_global_state
.nr_lr
= (ich_vtr_el2
& 0xf) + 1;
592 kvm_vgic_global_state
.can_emulate_gicv2
= false;
593 kvm_vgic_global_state
.ich_vtr_el2
= ich_vtr_el2
;
597 kvm_vgic_global_state
.has_gicv4
= gicv4_enable
;
598 kvm_info("GICv4 support %sabled\n",
599 gicv4_enable
? "en" : "dis");
602 if (!info
->vcpu
.start
) {
603 kvm_info("GICv3: no GICV resource entry\n");
604 kvm_vgic_global_state
.vcpu_base
= 0;
605 } else if (!PAGE_ALIGNED(info
->vcpu
.start
)) {
606 pr_warn("GICV physical address 0x%llx not page aligned\n",
607 (unsigned long long)info
->vcpu
.start
);
608 kvm_vgic_global_state
.vcpu_base
= 0;
610 kvm_vgic_global_state
.vcpu_base
= info
->vcpu
.start
;
611 kvm_vgic_global_state
.can_emulate_gicv2
= true;
612 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2
);
614 kvm_err("Cannot register GICv2 KVM device.\n");
617 kvm_info("vgic-v2@%llx\n", info
->vcpu
.start
);
619 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3
);
621 kvm_err("Cannot register GICv3 KVM device.\n");
622 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2
);
626 if (kvm_vgic_global_state
.vcpu_base
== 0)
627 kvm_info("disabling GICv2 emulation\n");
630 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115
)) {
636 if (group0_trap
|| group1_trap
|| common_trap
) {
637 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
638 group0_trap
? "G0" : "",
639 group1_trap
? "G1" : "",
640 common_trap
? "C" : "");
641 static_branch_enable(&vgic_v3_cpuif_trap
);
644 kvm_vgic_global_state
.vctrl_base
= NULL
;
645 kvm_vgic_global_state
.type
= VGIC_V3
;
646 kvm_vgic_global_state
.max_gic_vcpus
= VGIC_V3_MAX_CPUS
;
651 void vgic_v3_load(struct kvm_vcpu
*vcpu
)
653 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
656 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
657 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
658 * VMCR_EL2 save/restore in the world switch.
660 if (likely(cpu_if
->vgic_sre
))
661 kvm_call_hyp(__vgic_v3_write_vmcr
, cpu_if
->vgic_vmcr
);
663 kvm_call_hyp(__vgic_v3_restore_aprs
, vcpu
);
666 __vgic_v3_activate_traps(vcpu
);
668 WARN_ON(vgic_v4_load(vcpu
));
671 void vgic_v3_vmcr_sync(struct kvm_vcpu
*vcpu
)
673 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
675 if (likely(cpu_if
->vgic_sre
))
676 cpu_if
->vgic_vmcr
= kvm_call_hyp_ret(__vgic_v3_read_vmcr
);
679 void vgic_v3_put(struct kvm_vcpu
*vcpu
)
681 WARN_ON(vgic_v4_put(vcpu
, false));
683 vgic_v3_vmcr_sync(vcpu
);
685 kvm_call_hyp(__vgic_v3_save_aprs
, vcpu
);
688 __vgic_v3_deactivate_traps(vcpu
);