2 * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/interrupt.h>
24 #include <linux/irqchip/arm-gic.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
30 static struct vgic_lr
vgic_v2_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
32 struct vgic_lr lr_desc
;
33 u32 val
= vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_lr
[lr
];
35 lr_desc
.irq
= val
& GICH_LR_VIRTUALID
;
36 if (lr_desc
.irq
<= 15)
37 lr_desc
.source
= (val
>> GICH_LR_PHYSID_CPUID_SHIFT
) & 0x7;
42 if (val
& GICH_LR_PENDING_BIT
)
43 lr_desc
.state
|= LR_STATE_PENDING
;
44 if (val
& GICH_LR_ACTIVE_BIT
)
45 lr_desc
.state
|= LR_STATE_ACTIVE
;
46 if (val
& GICH_LR_EOI
)
47 lr_desc
.state
|= LR_EOI_INT
;
48 if (val
& GICH_LR_HW
) {
49 lr_desc
.state
|= LR_HW
;
50 lr_desc
.hwirq
= (val
& GICH_LR_PHYSID_CPUID
) >> GICH_LR_PHYSID_CPUID_SHIFT
;
56 static void vgic_v2_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
57 struct vgic_lr lr_desc
)
63 if (lr_desc
.state
& LR_STATE_PENDING
)
64 lr_val
|= GICH_LR_PENDING_BIT
;
65 if (lr_desc
.state
& LR_STATE_ACTIVE
)
66 lr_val
|= GICH_LR_ACTIVE_BIT
;
67 if (lr_desc
.state
& LR_EOI_INT
)
68 lr_val
|= GICH_LR_EOI
;
70 if (lr_desc
.state
& LR_HW
) {
72 lr_val
|= (u32
)lr_desc
.hwirq
<< GICH_LR_PHYSID_CPUID_SHIFT
;
75 if (lr_desc
.irq
< VGIC_NR_SGIS
)
76 lr_val
|= (lr_desc
.source
<< GICH_LR_PHYSID_CPUID_SHIFT
);
78 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_lr
[lr
] = lr_val
;
80 if (!(lr_desc
.state
& LR_STATE_MASK
))
81 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
|= (1ULL << lr
);
83 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
&= ~(1ULL << lr
);
86 static u64
vgic_v2_get_elrsr(const struct kvm_vcpu
*vcpu
)
88 return vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
;
91 static u64
vgic_v2_get_eisr(const struct kvm_vcpu
*vcpu
)
93 return vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_eisr
;
96 static void vgic_v2_clear_eisr(struct kvm_vcpu
*vcpu
)
98 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_eisr
= 0;
101 static u32
vgic_v2_get_interrupt_status(const struct kvm_vcpu
*vcpu
)
103 u32 misr
= vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_misr
;
106 if (misr
& GICH_MISR_EOI
)
107 ret
|= INT_STATUS_EOI
;
108 if (misr
& GICH_MISR_U
)
109 ret
|= INT_STATUS_UNDERFLOW
;
114 static void vgic_v2_enable_underflow(struct kvm_vcpu
*vcpu
)
116 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_hcr
|= GICH_HCR_UIE
;
119 static void vgic_v2_disable_underflow(struct kvm_vcpu
*vcpu
)
121 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_hcr
&= ~GICH_HCR_UIE
;
124 static void vgic_v2_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
126 u32 vmcr
= vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_vmcr
;
128 vmcrp
->ctlr
= (vmcr
& GICH_VMCR_CTRL_MASK
) >> GICH_VMCR_CTRL_SHIFT
;
129 vmcrp
->abpr
= (vmcr
& GICH_VMCR_ALIAS_BINPOINT_MASK
) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT
;
130 vmcrp
->bpr
= (vmcr
& GICH_VMCR_BINPOINT_MASK
) >> GICH_VMCR_BINPOINT_SHIFT
;
131 vmcrp
->pmr
= (vmcr
& GICH_VMCR_PRIMASK_MASK
) >> GICH_VMCR_PRIMASK_SHIFT
;
134 static void vgic_v2_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
138 vmcr
= (vmcrp
->ctlr
<< GICH_VMCR_CTRL_SHIFT
) & GICH_VMCR_CTRL_MASK
;
139 vmcr
|= (vmcrp
->abpr
<< GICH_VMCR_ALIAS_BINPOINT_SHIFT
) & GICH_VMCR_ALIAS_BINPOINT_MASK
;
140 vmcr
|= (vmcrp
->bpr
<< GICH_VMCR_BINPOINT_SHIFT
) & GICH_VMCR_BINPOINT_MASK
;
141 vmcr
|= (vmcrp
->pmr
<< GICH_VMCR_PRIMASK_SHIFT
) & GICH_VMCR_PRIMASK_MASK
;
143 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_vmcr
= vmcr
;
146 static void vgic_v2_enable(struct kvm_vcpu
*vcpu
)
149 * By forcing VMCR to zero, the GIC will restore the binary
150 * points to their reset values. Anything else resets to zero
153 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_vmcr
= 0;
154 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
= ~0;
156 /* Get the show on the road... */
157 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_hcr
= GICH_HCR_EN
;
160 static const struct vgic_ops vgic_v2_ops
= {
161 .get_lr
= vgic_v2_get_lr
,
162 .set_lr
= vgic_v2_set_lr
,
163 .get_elrsr
= vgic_v2_get_elrsr
,
164 .get_eisr
= vgic_v2_get_eisr
,
165 .clear_eisr
= vgic_v2_clear_eisr
,
166 .get_interrupt_status
= vgic_v2_get_interrupt_status
,
167 .enable_underflow
= vgic_v2_enable_underflow
,
168 .disable_underflow
= vgic_v2_disable_underflow
,
169 .get_vmcr
= vgic_v2_get_vmcr
,
170 .set_vmcr
= vgic_v2_set_vmcr
,
171 .enable
= vgic_v2_enable
,
174 static struct vgic_params vgic_v2_params
;
176 static void vgic_cpu_init_lrs(void *params
)
178 struct vgic_params
*vgic
= params
;
181 for (i
= 0; i
< vgic
->nr_lr
; i
++)
182 writel_relaxed(0, vgic
->vctrl_base
+ GICH_LR0
+ (i
* 4));
186 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller
187 * @gic_kvm_info: pointer to the GIC description
188 * @ops: address of a pointer to the GICv2 operations
189 * @params: address of a pointer to HW-specific parameters
191 * Returns 0 if a GICv2 has been found, with the low level operations
192 * in *ops and the HW parameters in *params. Returns an error code
195 int vgic_v2_probe(const struct gic_kvm_info
*gic_kvm_info
,
196 const struct vgic_ops
**ops
,
197 const struct vgic_params
**params
)
200 struct vgic_params
*vgic
= &vgic_v2_params
;
201 const struct resource
*vctrl_res
= &gic_kvm_info
->vctrl
;
202 const struct resource
*vcpu_res
= &gic_kvm_info
->vcpu
;
204 if (!gic_kvm_info
->maint_irq
) {
205 kvm_err("error getting vgic maintenance irq\n");
209 vgic
->maint_irq
= gic_kvm_info
->maint_irq
;
211 if (!gic_kvm_info
->vctrl
.start
) {
212 kvm_err("GICH not present in the firmware table\n");
217 vgic
->vctrl_base
= ioremap(gic_kvm_info
->vctrl
.start
,
218 resource_size(&gic_kvm_info
->vctrl
));
219 if (!vgic
->vctrl_base
) {
220 kvm_err("Cannot ioremap GICH\n");
225 vgic
->nr_lr
= readl_relaxed(vgic
->vctrl_base
+ GICH_VTR
);
226 vgic
->nr_lr
= (vgic
->nr_lr
& 0x3f) + 1;
228 ret
= create_hyp_io_mappings(vgic
->vctrl_base
,
229 vgic
->vctrl_base
+ resource_size(vctrl_res
),
232 kvm_err("Cannot map VCTRL into hyp\n");
236 if (!PAGE_ALIGNED(vcpu_res
->start
)) {
237 kvm_err("GICV physical address 0x%llx not page aligned\n",
238 (unsigned long long)vcpu_res
->start
);
243 if (!PAGE_ALIGNED(resource_size(vcpu_res
))) {
244 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
245 (unsigned long long)resource_size(vcpu_res
),
251 vgic
->can_emulate_gicv2
= true;
252 kvm_register_device_ops(&kvm_arm_vgic_v2_ops
, KVM_DEV_TYPE_ARM_VGIC_V2
);
254 vgic
->vcpu_base
= vcpu_res
->start
;
256 kvm_info("GICH base=0x%llx, GICV base=0x%llx, IRQ=%d\n",
257 gic_kvm_info
->vctrl
.start
, vgic
->vcpu_base
, vgic
->maint_irq
);
259 vgic
->type
= VGIC_V2
;
260 vgic
->max_gic_vcpus
= VGIC_V2_MAX_CPUS
;
262 on_each_cpu(vgic_cpu_init_lrs
, vgic
, 1);
269 iounmap(vgic
->vctrl_base
);