2 * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/interrupt.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
27 #include <linux/irqchip/arm-gic.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
33 static struct vgic_lr
vgic_v2_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
35 struct vgic_lr lr_desc
;
36 u32 val
= vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_lr
[lr
];
38 lr_desc
.irq
= val
& GICH_LR_VIRTUALID
;
39 if (lr_desc
.irq
<= 15)
40 lr_desc
.source
= (val
>> GICH_LR_PHYSID_CPUID_SHIFT
) & 0x7;
45 if (val
& GICH_LR_PENDING_BIT
)
46 lr_desc
.state
|= LR_STATE_PENDING
;
47 if (val
& GICH_LR_ACTIVE_BIT
)
48 lr_desc
.state
|= LR_STATE_ACTIVE
;
49 if (val
& GICH_LR_EOI
)
50 lr_desc
.state
|= LR_EOI_INT
;
55 static void vgic_v2_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
56 struct vgic_lr lr_desc
)
58 u32 lr_val
= (lr_desc
.source
<< GICH_LR_PHYSID_CPUID_SHIFT
) | lr_desc
.irq
;
60 if (lr_desc
.state
& LR_STATE_PENDING
)
61 lr_val
|= GICH_LR_PENDING_BIT
;
62 if (lr_desc
.state
& LR_STATE_ACTIVE
)
63 lr_val
|= GICH_LR_ACTIVE_BIT
;
64 if (lr_desc
.state
& LR_EOI_INT
)
65 lr_val
|= GICH_LR_EOI
;
67 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_lr
[lr
] = lr_val
;
70 static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
71 struct vgic_lr lr_desc
)
73 if (!(lr_desc
.state
& LR_STATE_MASK
))
74 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
|= (1ULL << lr
);
76 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
&= ~(1ULL << lr
);
79 static u64
vgic_v2_get_elrsr(const struct kvm_vcpu
*vcpu
)
81 return vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_elrsr
;
84 static u64
vgic_v2_get_eisr(const struct kvm_vcpu
*vcpu
)
86 return vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_eisr
;
89 static void vgic_v2_clear_eisr(struct kvm_vcpu
*vcpu
)
91 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_eisr
= 0;
94 static u32
vgic_v2_get_interrupt_status(const struct kvm_vcpu
*vcpu
)
96 u32 misr
= vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_misr
;
99 if (misr
& GICH_MISR_EOI
)
100 ret
|= INT_STATUS_EOI
;
101 if (misr
& GICH_MISR_U
)
102 ret
|= INT_STATUS_UNDERFLOW
;
107 static void vgic_v2_enable_underflow(struct kvm_vcpu
*vcpu
)
109 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_hcr
|= GICH_HCR_UIE
;
112 static void vgic_v2_disable_underflow(struct kvm_vcpu
*vcpu
)
114 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_hcr
&= ~GICH_HCR_UIE
;
117 static void vgic_v2_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
119 u32 vmcr
= vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_vmcr
;
121 vmcrp
->ctlr
= (vmcr
& GICH_VMCR_CTRL_MASK
) >> GICH_VMCR_CTRL_SHIFT
;
122 vmcrp
->abpr
= (vmcr
& GICH_VMCR_ALIAS_BINPOINT_MASK
) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT
;
123 vmcrp
->bpr
= (vmcr
& GICH_VMCR_BINPOINT_MASK
) >> GICH_VMCR_BINPOINT_SHIFT
;
124 vmcrp
->pmr
= (vmcr
& GICH_VMCR_PRIMASK_MASK
) >> GICH_VMCR_PRIMASK_SHIFT
;
127 static void vgic_v2_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
131 vmcr
= (vmcrp
->ctlr
<< GICH_VMCR_CTRL_SHIFT
) & GICH_VMCR_CTRL_MASK
;
132 vmcr
|= (vmcrp
->abpr
<< GICH_VMCR_ALIAS_BINPOINT_SHIFT
) & GICH_VMCR_ALIAS_BINPOINT_MASK
;
133 vmcr
|= (vmcrp
->bpr
<< GICH_VMCR_BINPOINT_SHIFT
) & GICH_VMCR_BINPOINT_MASK
;
134 vmcr
|= (vmcrp
->pmr
<< GICH_VMCR_PRIMASK_SHIFT
) & GICH_VMCR_PRIMASK_MASK
;
136 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_vmcr
= vmcr
;
139 static void vgic_v2_enable(struct kvm_vcpu
*vcpu
)
142 * By forcing VMCR to zero, the GIC will restore the binary
143 * points to their reset values. Anything else resets to zero
146 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_vmcr
= 0;
148 /* Get the show on the road... */
149 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_hcr
= GICH_HCR_EN
;
152 static const struct vgic_ops vgic_v2_ops
= {
153 .get_lr
= vgic_v2_get_lr
,
154 .set_lr
= vgic_v2_set_lr
,
155 .sync_lr_elrsr
= vgic_v2_sync_lr_elrsr
,
156 .get_elrsr
= vgic_v2_get_elrsr
,
157 .get_eisr
= vgic_v2_get_eisr
,
158 .clear_eisr
= vgic_v2_clear_eisr
,
159 .get_interrupt_status
= vgic_v2_get_interrupt_status
,
160 .enable_underflow
= vgic_v2_enable_underflow
,
161 .disable_underflow
= vgic_v2_disable_underflow
,
162 .get_vmcr
= vgic_v2_get_vmcr
,
163 .set_vmcr
= vgic_v2_set_vmcr
,
164 .enable
= vgic_v2_enable
,
167 static struct vgic_params vgic_v2_params
;
170 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
171 * @node: pointer to the DT node
172 * @ops: address of a pointer to the GICv2 operations
173 * @params: address of a pointer to HW-specific parameters
175 * Returns 0 if a GICv2 has been found, with the low level operations
176 * in *ops and the HW parameters in *params. Returns an error code
179 int vgic_v2_probe(struct device_node
*vgic_node
,
180 const struct vgic_ops
**ops
,
181 const struct vgic_params
**params
)
184 struct resource vctrl_res
;
185 struct resource vcpu_res
;
186 struct vgic_params
*vgic
= &vgic_v2_params
;
188 vgic
->maint_irq
= irq_of_parse_and_map(vgic_node
, 0);
189 if (!vgic
->maint_irq
) {
190 kvm_err("error getting vgic maintenance irq from DT\n");
195 ret
= of_address_to_resource(vgic_node
, 2, &vctrl_res
);
197 kvm_err("Cannot obtain GICH resource\n");
201 vgic
->vctrl_base
= of_iomap(vgic_node
, 2);
202 if (!vgic
->vctrl_base
) {
203 kvm_err("Cannot ioremap GICH\n");
208 vgic
->nr_lr
= readl_relaxed(vgic
->vctrl_base
+ GICH_VTR
);
209 vgic
->nr_lr
= (vgic
->nr_lr
& 0x3f) + 1;
211 ret
= create_hyp_io_mappings(vgic
->vctrl_base
,
212 vgic
->vctrl_base
+ resource_size(&vctrl_res
),
215 kvm_err("Cannot map VCTRL into hyp\n");
219 if (of_address_to_resource(vgic_node
, 3, &vcpu_res
)) {
220 kvm_err("Cannot obtain GICV resource\n");
225 if (!PAGE_ALIGNED(vcpu_res
.start
)) {
226 kvm_err("GICV physical address 0x%llx not page aligned\n",
227 (unsigned long long)vcpu_res
.start
);
232 if (!PAGE_ALIGNED(resource_size(&vcpu_res
))) {
233 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
234 (unsigned long long)resource_size(&vcpu_res
),
240 vgic
->can_emulate_gicv2
= true;
241 kvm_register_device_ops(&kvm_arm_vgic_v2_ops
, KVM_DEV_TYPE_ARM_VGIC_V2
);
243 vgic
->vcpu_base
= vcpu_res
.start
;
245 kvm_info("%s@%llx IRQ%d\n", vgic_node
->name
,
246 vctrl_res
.start
, vgic
->maint_irq
);
248 vgic
->type
= VGIC_V2
;
249 vgic
->max_gic_vcpus
= VGIC_V2_MAX_CPUS
;
255 iounmap(vgic
->vctrl_base
);
257 of_node_put(vgic_node
);