2 * Copyright (C) 2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/interrupt.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
27 #include <linux/irqchip/arm-gic-v3.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
33 /* These are for GICv2 emulation only */
34 #define GICH_LR_VIRTUALID (0x3ffUL << 0)
35 #define GICH_LR_PHYSID_CPUID_SHIFT (10)
36 #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
39 * LRs are stored in reverse order in memory. make sure we index them
42 #define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
44 static u32 ich_vtr_el2
;
46 static struct vgic_lr
vgic_v3_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
48 struct vgic_lr lr_desc
;
49 u64 val
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[LR_INDEX(lr
)];
51 lr_desc
.irq
= val
& GICH_LR_VIRTUALID
;
52 if (lr_desc
.irq
<= 15)
53 lr_desc
.source
= (val
>> GICH_LR_PHYSID_CPUID_SHIFT
) & 0x7;
58 if (val
& ICH_LR_PENDING_BIT
)
59 lr_desc
.state
|= LR_STATE_PENDING
;
60 if (val
& ICH_LR_ACTIVE_BIT
)
61 lr_desc
.state
|= LR_STATE_ACTIVE
;
63 lr_desc
.state
|= LR_EOI_INT
;
68 static void vgic_v3_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
69 struct vgic_lr lr_desc
)
71 u64 lr_val
= (((u32
)lr_desc
.source
<< GICH_LR_PHYSID_CPUID_SHIFT
) |
74 if (lr_desc
.state
& LR_STATE_PENDING
)
75 lr_val
|= ICH_LR_PENDING_BIT
;
76 if (lr_desc
.state
& LR_STATE_ACTIVE
)
77 lr_val
|= ICH_LR_ACTIVE_BIT
;
78 if (lr_desc
.state
& LR_EOI_INT
)
81 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[LR_INDEX(lr
)] = lr_val
;
84 static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
85 struct vgic_lr lr_desc
)
87 if (!(lr_desc
.state
& LR_STATE_MASK
))
88 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_elrsr
|= (1U << lr
);
91 static u64
vgic_v3_get_elrsr(const struct kvm_vcpu
*vcpu
)
93 return vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_elrsr
;
96 static u64
vgic_v3_get_eisr(const struct kvm_vcpu
*vcpu
)
98 return vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_eisr
;
101 static u32
vgic_v3_get_interrupt_status(const struct kvm_vcpu
*vcpu
)
103 u32 misr
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_misr
;
106 if (misr
& ICH_MISR_EOI
)
107 ret
|= INT_STATUS_EOI
;
108 if (misr
& ICH_MISR_U
)
109 ret
|= INT_STATUS_UNDERFLOW
;
114 static void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
116 u32 vmcr
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
;
118 vmcrp
->ctlr
= (vmcr
& ICH_VMCR_CTLR_MASK
) >> ICH_VMCR_CTLR_SHIFT
;
119 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
120 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
121 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
124 static void vgic_v3_enable_underflow(struct kvm_vcpu
*vcpu
)
126 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_hcr
|= ICH_HCR_UIE
;
129 static void vgic_v3_disable_underflow(struct kvm_vcpu
*vcpu
)
131 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_hcr
&= ~ICH_HCR_UIE
;
134 static void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
138 vmcr
= (vmcrp
->ctlr
<< ICH_VMCR_CTLR_SHIFT
) & ICH_VMCR_CTLR_MASK
;
139 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
140 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
141 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
143 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
= vmcr
;
146 static void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
149 * By forcing VMCR to zero, the GIC will restore the binary
150 * points to their reset values. Anything else resets to zero
153 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
= 0;
155 /* Get the show on the road... */
156 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_hcr
= ICH_HCR_EN
;
159 static const struct vgic_ops vgic_v3_ops
= {
160 .get_lr
= vgic_v3_get_lr
,
161 .set_lr
= vgic_v3_set_lr
,
162 .sync_lr_elrsr
= vgic_v3_sync_lr_elrsr
,
163 .get_elrsr
= vgic_v3_get_elrsr
,
164 .get_eisr
= vgic_v3_get_eisr
,
165 .get_interrupt_status
= vgic_v3_get_interrupt_status
,
166 .enable_underflow
= vgic_v3_enable_underflow
,
167 .disable_underflow
= vgic_v3_disable_underflow
,
168 .get_vmcr
= vgic_v3_get_vmcr
,
169 .set_vmcr
= vgic_v3_set_vmcr
,
170 .enable
= vgic_v3_enable
,
173 static struct vgic_params vgic_v3_params
;
176 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
177 * @node: pointer to the DT node
178 * @ops: address of a pointer to the GICv3 operations
179 * @params: address of a pointer to HW-specific parameters
181 * Returns 0 if a GICv3 has been found, with the low level operations
182 * in *ops and the HW parameters in *params. Returns an error code
185 int vgic_v3_probe(struct device_node
*vgic_node
,
186 const struct vgic_ops
**ops
,
187 const struct vgic_params
**params
)
191 struct resource vcpu_res
;
192 struct vgic_params
*vgic
= &vgic_v3_params
;
194 vgic
->maint_irq
= irq_of_parse_and_map(vgic_node
, 0);
195 if (!vgic
->maint_irq
) {
196 kvm_err("error getting vgic maintenance irq from DT\n");
201 ich_vtr_el2
= kvm_call_hyp(__vgic_v3_get_ich_vtr_el2
);
204 * The ListRegs field is 5 bits, but there is a architectural
205 * maximum of 16 list registers. Just ignore bit 4...
207 vgic
->nr_lr
= (ich_vtr_el2
& 0xf) + 1;
209 if (of_property_read_u32(vgic_node
, "#redistributor-regions", &gicv_idx
))
212 gicv_idx
+= 3; /* Also skip GICD, GICC, GICH */
213 if (of_address_to_resource(vgic_node
, gicv_idx
, &vcpu_res
)) {
214 kvm_err("Cannot obtain GICV region\n");
219 if (!PAGE_ALIGNED(vcpu_res
.start
)) {
220 kvm_err("GICV physical address 0x%llx not page aligned\n",
221 (unsigned long long)vcpu_res
.start
);
226 if (!PAGE_ALIGNED(resource_size(&vcpu_res
))) {
227 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
228 (unsigned long long)resource_size(&vcpu_res
),
234 vgic
->vcpu_base
= vcpu_res
.start
;
235 vgic
->vctrl_base
= NULL
;
236 vgic
->type
= VGIC_V3
;
238 kvm_info("%s@%llx IRQ%d\n", vgic_node
->name
,
239 vcpu_res
.start
, vgic
->maint_irq
);
245 of_node_put(vgic_node
);