2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic.h>
20 #include <linux/kvm_host.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
26 static void __hyp_text
save_elrsr(struct kvm_vcpu
*vcpu
, void __iomem
*base
)
28 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
29 int nr_lr
= (kern_hyp_va(&kvm_vgic_global_state
))->nr_lr
;
32 elrsr0
= readl_relaxed(base
+ GICH_ELRSR0
);
33 if (unlikely(nr_lr
> 32))
34 elrsr1
= readl_relaxed(base
+ GICH_ELRSR1
);
38 cpu_if
->vgic_elrsr
= ((u64
)elrsr1
<< 32) | elrsr0
;
41 static void __hyp_text
save_lrs(struct kvm_vcpu
*vcpu
, void __iomem
*base
)
43 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
45 u64 used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
47 for (i
= 0; i
< used_lrs
; i
++) {
48 if (cpu_if
->vgic_elrsr
& (1UL << i
))
49 cpu_if
->vgic_lr
[i
] &= ~GICH_LR_STATE
;
51 cpu_if
->vgic_lr
[i
] = readl_relaxed(base
+ GICH_LR0
+ (i
* 4));
53 writel_relaxed(0, base
+ GICH_LR0
+ (i
* 4));
57 /* vcpu is already in the HYP VA space */
58 void __hyp_text
__vgic_v2_save_state(struct kvm_vcpu
*vcpu
)
60 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
61 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
62 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
63 void __iomem
*base
= kern_hyp_va(vgic
->vctrl_base
);
64 u64 used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
70 cpu_if
->vgic_apr
= readl_relaxed(base
+ GICH_APR
);
72 save_elrsr(vcpu
, base
);
75 writel_relaxed(0, base
+ GICH_HCR
);
77 cpu_if
->vgic_elrsr
= ~0UL;
82 /* vcpu is already in the HYP VA space */
83 void __hyp_text
__vgic_v2_restore_state(struct kvm_vcpu
*vcpu
)
85 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
86 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
87 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
88 void __iomem
*base
= kern_hyp_va(vgic
->vctrl_base
);
90 u64 used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
96 writel_relaxed(cpu_if
->vgic_hcr
, base
+ GICH_HCR
);
97 writel_relaxed(cpu_if
->vgic_apr
, base
+ GICH_APR
);
98 for (i
= 0; i
< used_lrs
; i
++) {
99 writel_relaxed(cpu_if
->vgic_lr
[i
],
100 base
+ GICH_LR0
+ (i
* 4));
107 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
110 * @vcpu: the offending vcpu
113 * 1: GICV access successfully performed
114 * 0: Not a GICV access
115 * -1: Illegal GICV access
117 int __hyp_text
__vgic_v2_perform_cpuif_access(struct kvm_vcpu
*vcpu
)
119 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
120 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
121 phys_addr_t fault_ipa
;
125 /* Build the full address */
126 fault_ipa
= kvm_vcpu_get_fault_ipa(vcpu
);
127 fault_ipa
|= kvm_vcpu_get_hfar(vcpu
) & GENMASK(11, 0);
129 /* If not for GICV, move on */
130 if (fault_ipa
< vgic
->vgic_cpu_base
||
131 fault_ipa
>= (vgic
->vgic_cpu_base
+ KVM_VGIC_V2_CPU_SIZE
))
134 /* Reject anything but a 32bit access */
135 if (kvm_vcpu_dabt_get_as(vcpu
) != sizeof(u32
))
138 /* Not aligned? Don't bother */
142 rd
= kvm_vcpu_dabt_get_rd(vcpu
);
143 addr
= kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state
))->vcpu_base_va
);
144 addr
+= fault_ipa
- vgic
->vgic_cpu_base
;
146 if (kvm_vcpu_dabt_iswrite(vcpu
)) {
147 u32 data
= vcpu_data_guest_to_host(vcpu
,
148 vcpu_get_reg(vcpu
, rd
),
150 writel_relaxed(data
, addr
);
152 u32 data
= readl_relaxed(addr
);
153 vcpu_set_reg(vcpu
, rd
, vcpu_data_host_to_guest(vcpu
, data
,