2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 #include <linux/irqchip/arm-gic-v3.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/arm_vgic.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/kvm_mmu.h>
21 #include <asm/kvm_asm.h>
25 static bool group0_trap
;
26 static bool group1_trap
;
27 static bool common_trap
;
28 static bool gicv4_enable
;
30 void vgic_v3_set_underflow(struct kvm_vcpu
*vcpu
)
32 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
34 cpuif
->vgic_hcr
|= ICH_HCR_UIE
;
37 static bool lr_signals_eoi_mi(u64 lr_val
)
39 return !(lr_val
& ICH_LR_STATE
) && (lr_val
& ICH_LR_EOI
) &&
40 !(lr_val
& ICH_LR_HW
);
43 void vgic_v3_fold_lr_state(struct kvm_vcpu
*vcpu
)
45 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
46 struct vgic_v3_cpu_if
*cpuif
= &vgic_cpu
->vgic_v3
;
47 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
51 cpuif
->vgic_hcr
&= ~ICH_HCR_UIE
;
53 for (lr
= 0; lr
< vgic_cpu
->used_lrs
; lr
++) {
54 u64 val
= cpuif
->vgic_lr
[lr
];
57 bool is_v2_sgi
= false;
59 cpuid
= val
& GICH_LR_PHYSID_CPUID
;
60 cpuid
>>= GICH_LR_PHYSID_CPUID_SHIFT
;
62 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
63 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
65 intid
= val
& GICH_LR_VIRTUALID
;
66 is_v2_sgi
= vgic_irq_is_sgi(intid
);
69 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
70 if (lr_signals_eoi_mi(val
) && vgic_valid_spi(vcpu
->kvm
, intid
))
71 kvm_notify_acked_irq(vcpu
->kvm
, 0,
72 intid
- VGIC_NR_PRIVATE_IRQS
);
74 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
);
75 if (!irq
) /* An LPI could have been unmapped. */
78 spin_lock_irqsave(&irq
->irq_lock
, flags
);
80 /* Always preserve the active bit */
81 irq
->active
= !!(val
& ICH_LR_ACTIVE_BIT
);
83 if (irq
->active
&& is_v2_sgi
)
84 irq
->active_source
= cpuid
;
86 /* Edge is the only case where we preserve the pending bit */
87 if (irq
->config
== VGIC_CONFIG_EDGE
&&
88 (val
& ICH_LR_PENDING_BIT
)) {
89 irq
->pending_latch
= true;
92 irq
->source
|= (1 << cpuid
);
96 * Clear soft pending state when level irqs have been acked.
98 if (irq
->config
== VGIC_CONFIG_LEVEL
&& !(val
& ICH_LR_STATE
))
99 irq
->pending_latch
= false;
102 * Level-triggered mapped IRQs are special because we only
103 * observe rising edges as input to the VGIC.
105 * If the guest never acked the interrupt we have to sample
106 * the physical line and set the line level, because the
107 * device state could have changed or we simply need to
108 * process the still pending interrupt later.
110 * If this causes us to lower the level, we have to also clear
111 * the physical active state, since we will otherwise never be
112 * told when the interrupt becomes asserted again.
114 if (vgic_irq_is_mapped_level(irq
) && (val
& ICH_LR_PENDING_BIT
)) {
115 irq
->line_level
= vgic_get_phys_line_level(irq
);
117 if (!irq
->line_level
)
118 vgic_irq_set_phys_active(irq
, false);
121 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
122 vgic_put_irq(vcpu
->kvm
, irq
);
125 vgic_cpu
->used_lrs
= 0;
128 /* Requires the irq to be locked already */
129 void vgic_v3_populate_lr(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
, int lr
)
131 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
132 u64 val
= irq
->intid
;
133 bool allow_pending
= true, is_v2_sgi
;
135 is_v2_sgi
= (vgic_irq_is_sgi(irq
->intid
) &&
136 model
== KVM_DEV_TYPE_ARM_VGIC_V2
);
139 val
|= ICH_LR_ACTIVE_BIT
;
141 val
|= irq
->active_source
<< GICH_LR_PHYSID_CPUID_SHIFT
;
142 if (vgic_irq_is_multi_sgi(irq
)) {
143 allow_pending
= false;
150 val
|= ((u64
)irq
->hwintid
) << ICH_LR_PHYS_ID_SHIFT
;
152 * Never set pending+active on a HW interrupt, as the
153 * pending state is kept at the physical distributor
157 allow_pending
= false;
159 if (irq
->config
== VGIC_CONFIG_LEVEL
) {
163 * Software resampling doesn't work very well
164 * if we allow P+A, so let's not do that.
167 allow_pending
= false;
171 if (allow_pending
&& irq_is_pending(irq
)) {
172 val
|= ICH_LR_PENDING_BIT
;
174 if (irq
->config
== VGIC_CONFIG_EDGE
)
175 irq
->pending_latch
= false;
177 if (vgic_irq_is_sgi(irq
->intid
) &&
178 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
179 u32 src
= ffs(irq
->source
);
182 val
|= (src
- 1) << GICH_LR_PHYSID_CPUID_SHIFT
;
183 irq
->source
&= ~(1 << (src
- 1));
185 irq
->pending_latch
= true;
192 * Level-triggered mapped IRQs are special because we only observe
193 * rising edges as input to the VGIC. We therefore lower the line
194 * level here, so that we can take new virtual IRQs. See
195 * vgic_v3_fold_lr_state for more info.
197 if (vgic_irq_is_mapped_level(irq
) && (val
& ICH_LR_PENDING_BIT
))
198 irq
->line_level
= false;
201 * We currently only support Group1 interrupts, which is a
202 * known defect. This needs to be addressed at some point.
204 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
207 val
|= (u64
)irq
->priority
<< ICH_LR_PRIORITY_SHIFT
;
209 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = val
;
212 void vgic_v3_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
214 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = 0;
217 void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
219 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
220 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
223 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
224 vmcr
= (vmcrp
->ackctl
<< ICH_VMCR_ACK_CTL_SHIFT
) &
225 ICH_VMCR_ACK_CTL_MASK
;
226 vmcr
|= (vmcrp
->fiqen
<< ICH_VMCR_FIQ_EN_SHIFT
) &
227 ICH_VMCR_FIQ_EN_MASK
;
230 * When emulating GICv3 on GICv3 with SRE=1 on the
231 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
233 vmcr
= ICH_VMCR_FIQ_EN_MASK
;
236 vmcr
|= (vmcrp
->cbpr
<< ICH_VMCR_CBPR_SHIFT
) & ICH_VMCR_CBPR_MASK
;
237 vmcr
|= (vmcrp
->eoim
<< ICH_VMCR_EOIM_SHIFT
) & ICH_VMCR_EOIM_MASK
;
238 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
239 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
240 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
241 vmcr
|= (vmcrp
->grpen0
<< ICH_VMCR_ENG0_SHIFT
) & ICH_VMCR_ENG0_MASK
;
242 vmcr
|= (vmcrp
->grpen1
<< ICH_VMCR_ENG1_SHIFT
) & ICH_VMCR_ENG1_MASK
;
244 cpu_if
->vgic_vmcr
= vmcr
;
247 void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
249 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
250 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
253 vmcr
= cpu_if
->vgic_vmcr
;
255 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
256 vmcrp
->ackctl
= (vmcr
& ICH_VMCR_ACK_CTL_MASK
) >>
257 ICH_VMCR_ACK_CTL_SHIFT
;
258 vmcrp
->fiqen
= (vmcr
& ICH_VMCR_FIQ_EN_MASK
) >>
259 ICH_VMCR_FIQ_EN_SHIFT
;
262 * When emulating GICv3 on GICv3 with SRE=1 on the
263 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
269 vmcrp
->cbpr
= (vmcr
& ICH_VMCR_CBPR_MASK
) >> ICH_VMCR_CBPR_SHIFT
;
270 vmcrp
->eoim
= (vmcr
& ICH_VMCR_EOIM_MASK
) >> ICH_VMCR_EOIM_SHIFT
;
271 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
272 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
273 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
274 vmcrp
->grpen0
= (vmcr
& ICH_VMCR_ENG0_MASK
) >> ICH_VMCR_ENG0_SHIFT
;
275 vmcrp
->grpen1
= (vmcr
& ICH_VMCR_ENG1_MASK
) >> ICH_VMCR_ENG1_SHIFT
;
278 #define INITIAL_PENDBASER_VALUE \
279 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
280 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
281 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
283 void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
285 struct vgic_v3_cpu_if
*vgic_v3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
288 * By forcing VMCR to zero, the GIC will restore the binary
289 * points to their reset values. Anything else resets to zero
292 vgic_v3
->vgic_vmcr
= 0;
295 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
296 * way, so we force SRE to 1 to demonstrate this to the guest.
297 * Also, we don't support any form of IRQ/FIQ bypass.
298 * This goes with the spec allowing the value to be RAO/WI.
300 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
301 vgic_v3
->vgic_sre
= (ICC_SRE_EL1_DIB
|
304 vcpu
->arch
.vgic_cpu
.pendbaser
= INITIAL_PENDBASER_VALUE
;
306 vgic_v3
->vgic_sre
= 0;
309 vcpu
->arch
.vgic_cpu
.num_id_bits
= (kvm_vgic_global_state
.ich_vtr_el2
&
310 ICH_VTR_ID_BITS_MASK
) >>
311 ICH_VTR_ID_BITS_SHIFT
;
312 vcpu
->arch
.vgic_cpu
.num_pri_bits
= ((kvm_vgic_global_state
.ich_vtr_el2
&
313 ICH_VTR_PRI_BITS_MASK
) >>
314 ICH_VTR_PRI_BITS_SHIFT
) + 1;
316 /* Get the show on the road... */
317 vgic_v3
->vgic_hcr
= ICH_HCR_EN
;
319 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL0
;
321 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL1
;
323 vgic_v3
->vgic_hcr
|= ICH_HCR_TC
;
326 int vgic_v3_lpi_sync_pending_status(struct kvm
*kvm
, struct vgic_irq
*irq
)
328 struct kvm_vcpu
*vcpu
;
329 int byte_offset
, bit_nr
;
337 vcpu
= irq
->target_vcpu
;
341 pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
343 byte_offset
= irq
->intid
/ BITS_PER_BYTE
;
344 bit_nr
= irq
->intid
% BITS_PER_BYTE
;
345 ptr
= pendbase
+ byte_offset
;
347 ret
= kvm_read_guest_lock(kvm
, ptr
, &val
, 1);
351 status
= val
& (1 << bit_nr
);
353 spin_lock_irqsave(&irq
->irq_lock
, flags
);
354 if (irq
->target_vcpu
!= vcpu
) {
355 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
358 irq
->pending_latch
= status
;
359 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
362 /* clear consumed data */
363 val
&= ~(1 << bit_nr
);
364 ret
= kvm_write_guest(kvm
, ptr
, &val
, 1);
372 * vgic_its_save_pending_tables - Save the pending tables into guest RAM
373 * kvm lock and all vcpu lock must be held
375 int vgic_v3_save_pending_tables(struct kvm
*kvm
)
377 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
378 int last_byte_offset
= -1;
379 struct vgic_irq
*irq
;
383 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
384 int byte_offset
, bit_nr
;
385 struct kvm_vcpu
*vcpu
;
389 vcpu
= irq
->target_vcpu
;
393 pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
395 byte_offset
= irq
->intid
/ BITS_PER_BYTE
;
396 bit_nr
= irq
->intid
% BITS_PER_BYTE
;
397 ptr
= pendbase
+ byte_offset
;
399 if (byte_offset
!= last_byte_offset
) {
400 ret
= kvm_read_guest_lock(kvm
, ptr
, &val
, 1);
403 last_byte_offset
= byte_offset
;
406 stored
= val
& (1U << bit_nr
);
407 if (stored
== irq
->pending_latch
)
410 if (irq
->pending_latch
)
413 val
&= ~(1 << bit_nr
);
415 ret
= kvm_write_guest(kvm
, ptr
, &val
, 1);
423 * vgic_v3_rdist_overlap - check if a region overlaps with any
424 * existing redistributor region
427 * @base: base of the region
428 * @size: size of region
430 * Return: true if there is an overlap
432 bool vgic_v3_rdist_overlap(struct kvm
*kvm
, gpa_t base
, size_t size
)
434 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
435 struct vgic_redist_region
*rdreg
;
437 list_for_each_entry(rdreg
, &d
->rd_regions
, list
) {
438 if ((base
+ size
> rdreg
->base
) &&
439 (base
< rdreg
->base
+ vgic_v3_rd_region_size(kvm
, rdreg
)))
446 * Check for overlapping regions and for regions crossing the end of memory
447 * for base addresses which have already been set.
449 bool vgic_v3_check_base(struct kvm
*kvm
)
451 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
452 struct vgic_redist_region
*rdreg
;
454 if (!IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
) &&
455 d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
< d
->vgic_dist_base
)
458 list_for_each_entry(rdreg
, &d
->rd_regions
, list
) {
459 if (rdreg
->base
+ vgic_v3_rd_region_size(kvm
, rdreg
) <
464 if (IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
))
467 return !vgic_v3_rdist_overlap(kvm
, d
->vgic_dist_base
,
468 KVM_VGIC_V3_DIST_SIZE
);
472 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
473 * which has free space to put a new rdist region.
475 * @rd_regions: redistributor region list head
477 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
478 * Stride between redistributors is 0 and regions are filled in the index order.
480 * Return: the redist region handle, if any, that has space to map a new rdist
483 struct vgic_redist_region
*vgic_v3_rdist_free_slot(struct list_head
*rd_regions
)
485 struct vgic_redist_region
*rdreg
;
487 list_for_each_entry(rdreg
, rd_regions
, list
) {
488 if (!vgic_v3_redist_region_full(rdreg
))
494 struct vgic_redist_region
*vgic_v3_rdist_region_from_index(struct kvm
*kvm
,
497 struct list_head
*rd_regions
= &kvm
->arch
.vgic
.rd_regions
;
498 struct vgic_redist_region
*rdreg
;
500 list_for_each_entry(rdreg
, rd_regions
, list
) {
501 if (rdreg
->index
== index
)
508 int vgic_v3_map_resources(struct kvm
*kvm
)
510 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
511 struct kvm_vcpu
*vcpu
;
518 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
519 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
521 if (IS_VGIC_ADDR_UNDEF(vgic_cpu
->rd_iodev
.base_addr
)) {
522 kvm_debug("vcpu %d redistributor base not set\n", c
);
528 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
)) {
529 kvm_err("Need to set vgic distributor addresses first\n");
534 if (!vgic_v3_check_base(kvm
)) {
535 kvm_err("VGIC redist and dist frames overlap\n");
541 * For a VGICv3 we require the userland to explicitly initialize
542 * the VGIC before we need to use it.
544 if (!vgic_initialized(kvm
)) {
549 ret
= vgic_register_dist_iodev(kvm
, dist
->vgic_dist_base
, VGIC_V3
);
551 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
561 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap
);
563 static int __init
early_group0_trap_cfg(char *buf
)
565 return strtobool(buf
, &group0_trap
);
567 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg
);
569 static int __init
early_group1_trap_cfg(char *buf
)
571 return strtobool(buf
, &group1_trap
);
573 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg
);
575 static int __init
early_common_trap_cfg(char *buf
)
577 return strtobool(buf
, &common_trap
);
579 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg
);
581 static int __init
early_gicv4_enable(char *buf
)
583 return strtobool(buf
, &gicv4_enable
);
585 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable
);
588 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
589 * @node: pointer to the DT node
591 * Returns 0 if a GICv3 has been found, returns an error code otherwise
593 int vgic_v3_probe(const struct gic_kvm_info
*info
)
595 u32 ich_vtr_el2
= kvm_call_hyp(__vgic_v3_get_ich_vtr_el2
);
599 * The ListRegs field is 5 bits, but there is a architectural
600 * maximum of 16 list registers. Just ignore bit 4...
602 kvm_vgic_global_state
.nr_lr
= (ich_vtr_el2
& 0xf) + 1;
603 kvm_vgic_global_state
.can_emulate_gicv2
= false;
604 kvm_vgic_global_state
.ich_vtr_el2
= ich_vtr_el2
;
608 kvm_vgic_global_state
.has_gicv4
= gicv4_enable
;
609 kvm_info("GICv4 support %sabled\n",
610 gicv4_enable
? "en" : "dis");
613 if (!info
->vcpu
.start
) {
614 kvm_info("GICv3: no GICV resource entry\n");
615 kvm_vgic_global_state
.vcpu_base
= 0;
616 } else if (!PAGE_ALIGNED(info
->vcpu
.start
)) {
617 pr_warn("GICV physical address 0x%llx not page aligned\n",
618 (unsigned long long)info
->vcpu
.start
);
619 kvm_vgic_global_state
.vcpu_base
= 0;
621 kvm_vgic_global_state
.vcpu_base
= info
->vcpu
.start
;
622 kvm_vgic_global_state
.can_emulate_gicv2
= true;
623 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2
);
625 kvm_err("Cannot register GICv2 KVM device.\n");
628 kvm_info("vgic-v2@%llx\n", info
->vcpu
.start
);
630 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3
);
632 kvm_err("Cannot register GICv3 KVM device.\n");
633 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2
);
637 if (kvm_vgic_global_state
.vcpu_base
== 0)
638 kvm_info("disabling GICv2 emulation\n");
641 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115
)) {
647 if (group0_trap
|| group1_trap
|| common_trap
) {
648 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
649 group0_trap
? "G0" : "",
650 group1_trap
? "G1" : "",
651 common_trap
? "C" : "");
652 static_branch_enable(&vgic_v3_cpuif_trap
);
655 kvm_vgic_global_state
.vctrl_base
= NULL
;
656 kvm_vgic_global_state
.type
= VGIC_V3
;
657 kvm_vgic_global_state
.max_gic_vcpus
= VGIC_V3_MAX_CPUS
;
662 void vgic_v3_load(struct kvm_vcpu
*vcpu
)
664 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
667 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
668 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
669 * VMCR_EL2 save/restore in the world switch.
671 if (likely(cpu_if
->vgic_sre
))
672 kvm_call_hyp(__vgic_v3_write_vmcr
, cpu_if
->vgic_vmcr
);
674 kvm_call_hyp(__vgic_v3_restore_aprs
, vcpu
);
677 __vgic_v3_activate_traps(vcpu
);
680 void vgic_v3_put(struct kvm_vcpu
*vcpu
)
682 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
684 if (likely(cpu_if
->vgic_sre
))
685 cpu_if
->vgic_vmcr
= kvm_call_hyp(__vgic_v3_read_vmcr
);
687 kvm_call_hyp(__vgic_v3_save_aprs
, vcpu
);
690 __vgic_v3_deactivate_traps(vcpu
);