2 * VGIC system registers handling functions for AArch64 mode
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <asm/kvm_emulate.h>
21 static bool access_gic_ctlr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
22 const struct sys_reg_desc
*r
)
24 u32 host_pri_bits
, host_id_bits
, host_seis
, host_a3v
, seis
, a3v
;
25 struct vgic_cpu
*vgic_v3_cpu
= &vcpu
->arch
.vgic_cpu
;
26 struct vgic_vmcr vmcr
;
29 vgic_get_vmcr(vcpu
, &vmcr
);
34 * Disallow restoring VM state if not supported by this
37 host_pri_bits
= ((val
& ICC_CTLR_EL1_PRI_BITS_MASK
) >>
38 ICC_CTLR_EL1_PRI_BITS_SHIFT
) + 1;
39 if (host_pri_bits
> vgic_v3_cpu
->num_pri_bits
)
42 vgic_v3_cpu
->num_pri_bits
= host_pri_bits
;
44 host_id_bits
= (val
& ICC_CTLR_EL1_ID_BITS_MASK
) >>
45 ICC_CTLR_EL1_ID_BITS_SHIFT
;
46 if (host_id_bits
> vgic_v3_cpu
->num_id_bits
)
49 vgic_v3_cpu
->num_id_bits
= host_id_bits
;
51 host_seis
= ((kvm_vgic_global_state
.ich_vtr_el2
&
52 ICH_VTR_SEIS_MASK
) >> ICH_VTR_SEIS_SHIFT
);
53 seis
= (val
& ICC_CTLR_EL1_SEIS_MASK
) >>
54 ICC_CTLR_EL1_SEIS_SHIFT
;
55 if (host_seis
!= seis
)
58 host_a3v
= ((kvm_vgic_global_state
.ich_vtr_el2
&
59 ICH_VTR_A3V_MASK
) >> ICH_VTR_A3V_SHIFT
);
60 a3v
= (val
& ICC_CTLR_EL1_A3V_MASK
) >> ICC_CTLR_EL1_A3V_SHIFT
;
65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
66 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
68 vmcr
.cbpr
= (val
& ICC_CTLR_EL1_CBPR_MASK
) >> ICC_CTLR_EL1_CBPR_SHIFT
;
69 vmcr
.eoim
= (val
& ICC_CTLR_EL1_EOImode_MASK
) >> ICC_CTLR_EL1_EOImode_SHIFT
;
70 vgic_set_vmcr(vcpu
, &vmcr
);
73 val
|= (vgic_v3_cpu
->num_pri_bits
- 1) <<
74 ICC_CTLR_EL1_PRI_BITS_SHIFT
;
75 val
|= vgic_v3_cpu
->num_id_bits
<< ICC_CTLR_EL1_ID_BITS_SHIFT
;
76 val
|= ((kvm_vgic_global_state
.ich_vtr_el2
&
77 ICH_VTR_SEIS_MASK
) >> ICH_VTR_SEIS_SHIFT
) <<
78 ICC_CTLR_EL1_SEIS_SHIFT
;
79 val
|= ((kvm_vgic_global_state
.ich_vtr_el2
&
80 ICH_VTR_A3V_MASK
) >> ICH_VTR_A3V_SHIFT
) <<
81 ICC_CTLR_EL1_A3V_SHIFT
;
83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
84 * Extract it directly using ICC_CTLR_EL1 reg definitions.
86 val
|= (vmcr
.cbpr
<< ICC_CTLR_EL1_CBPR_SHIFT
) & ICC_CTLR_EL1_CBPR_MASK
;
87 val
|= (vmcr
.eoim
<< ICC_CTLR_EL1_EOImode_SHIFT
) & ICC_CTLR_EL1_EOImode_MASK
;
95 static bool access_gic_pmr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
96 const struct sys_reg_desc
*r
)
98 struct vgic_vmcr vmcr
;
100 vgic_get_vmcr(vcpu
, &vmcr
);
102 vmcr
.pmr
= (p
->regval
& ICC_PMR_EL1_MASK
) >> ICC_PMR_EL1_SHIFT
;
103 vgic_set_vmcr(vcpu
, &vmcr
);
105 p
->regval
= (vmcr
.pmr
<< ICC_PMR_EL1_SHIFT
) & ICC_PMR_EL1_MASK
;
111 static bool access_gic_bpr0(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
112 const struct sys_reg_desc
*r
)
114 struct vgic_vmcr vmcr
;
116 vgic_get_vmcr(vcpu
, &vmcr
);
118 vmcr
.bpr
= (p
->regval
& ICC_BPR0_EL1_MASK
) >>
120 vgic_set_vmcr(vcpu
, &vmcr
);
122 p
->regval
= (vmcr
.bpr
<< ICC_BPR0_EL1_SHIFT
) &
129 static bool access_gic_bpr1(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
130 const struct sys_reg_desc
*r
)
132 struct vgic_vmcr vmcr
;
137 vgic_get_vmcr(vcpu
, &vmcr
);
140 vmcr
.abpr
= (p
->regval
& ICC_BPR1_EL1_MASK
) >>
142 vgic_set_vmcr(vcpu
, &vmcr
);
144 p
->regval
= (vmcr
.abpr
<< ICC_BPR1_EL1_SHIFT
) &
149 p
->regval
= min((vmcr
.bpr
+ 1), 7U);
155 static bool access_gic_grpen0(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
156 const struct sys_reg_desc
*r
)
158 struct vgic_vmcr vmcr
;
160 vgic_get_vmcr(vcpu
, &vmcr
);
162 vmcr
.grpen0
= (p
->regval
& ICC_IGRPEN0_EL1_MASK
) >>
163 ICC_IGRPEN0_EL1_SHIFT
;
164 vgic_set_vmcr(vcpu
, &vmcr
);
166 p
->regval
= (vmcr
.grpen0
<< ICC_IGRPEN0_EL1_SHIFT
) &
167 ICC_IGRPEN0_EL1_MASK
;
173 static bool access_gic_grpen1(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
174 const struct sys_reg_desc
*r
)
176 struct vgic_vmcr vmcr
;
178 vgic_get_vmcr(vcpu
, &vmcr
);
180 vmcr
.grpen1
= (p
->regval
& ICC_IGRPEN1_EL1_MASK
) >>
181 ICC_IGRPEN1_EL1_SHIFT
;
182 vgic_set_vmcr(vcpu
, &vmcr
);
184 p
->regval
= (vmcr
.grpen1
<< ICC_IGRPEN1_EL1_SHIFT
) &
185 ICC_IGRPEN1_EL1_MASK
;
191 static void vgic_v3_access_apr_reg(struct kvm_vcpu
*vcpu
,
192 struct sys_reg_params
*p
, u8 apr
, u8 idx
)
194 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
198 ap_reg
= &vgicv3
->vgic_ap1r
[idx
];
200 ap_reg
= &vgicv3
->vgic_ap0r
[idx
];
208 static bool access_gic_aprn(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
209 const struct sys_reg_desc
*r
, u8 apr
)
213 if (idx
> vgic_v3_max_apr_idx(vcpu
))
216 vgic_v3_access_apr_reg(vcpu
, p
, apr
, idx
);
225 static bool access_gic_ap0r(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
226 const struct sys_reg_desc
*r
)
229 return access_gic_aprn(vcpu
, p
, r
, 0);
232 static bool access_gic_ap1r(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
233 const struct sys_reg_desc
*r
)
235 return access_gic_aprn(vcpu
, p
, r
, 1);
238 static bool access_gic_sre(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
239 const struct sys_reg_desc
*r
)
241 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
243 /* Validate SRE bit */
245 if (!(p
->regval
& ICC_SRE_EL1_SRE
))
248 p
->regval
= vgicv3
->vgic_sre
;
253 static const struct sys_reg_desc gic_v3_icc_reg_descs
[] = {
254 { SYS_DESC(SYS_ICC_PMR_EL1
), access_gic_pmr
},
255 { SYS_DESC(SYS_ICC_BPR0_EL1
), access_gic_bpr0
},
256 { SYS_DESC(SYS_ICC_AP0R0_EL1
), access_gic_ap0r
},
257 { SYS_DESC(SYS_ICC_AP0R1_EL1
), access_gic_ap0r
},
258 { SYS_DESC(SYS_ICC_AP0R2_EL1
), access_gic_ap0r
},
259 { SYS_DESC(SYS_ICC_AP0R3_EL1
), access_gic_ap0r
},
260 { SYS_DESC(SYS_ICC_AP1R0_EL1
), access_gic_ap1r
},
261 { SYS_DESC(SYS_ICC_AP1R1_EL1
), access_gic_ap1r
},
262 { SYS_DESC(SYS_ICC_AP1R2_EL1
), access_gic_ap1r
},
263 { SYS_DESC(SYS_ICC_AP1R3_EL1
), access_gic_ap1r
},
264 { SYS_DESC(SYS_ICC_BPR1_EL1
), access_gic_bpr1
},
265 { SYS_DESC(SYS_ICC_CTLR_EL1
), access_gic_ctlr
},
266 { SYS_DESC(SYS_ICC_SRE_EL1
), access_gic_sre
},
267 { SYS_DESC(SYS_ICC_IGRPEN0_EL1
), access_gic_grpen0
},
268 { SYS_DESC(SYS_ICC_IGRPEN1_EL1
), access_gic_grpen1
},
271 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu
*vcpu
, bool is_write
, u64 id
,
274 struct sys_reg_params params
;
275 u64 sysreg
= (id
& KVM_DEV_ARM_VGIC_SYSREG_MASK
) | KVM_REG_SIZE_U64
;
277 params
.regval
= *reg
;
278 params
.is_write
= is_write
;
279 params
.is_aarch32
= false;
280 params
.is_32bit
= false;
282 if (find_reg_by_id(sysreg
, ¶ms
, gic_v3_icc_reg_descs
,
283 ARRAY_SIZE(gic_v3_icc_reg_descs
)))
289 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
, u64 id
,
292 struct sys_reg_params params
;
293 const struct sys_reg_desc
*r
;
294 u64 sysreg
= (id
& KVM_DEV_ARM_VGIC_SYSREG_MASK
) | KVM_REG_SIZE_U64
;
297 params
.regval
= *reg
;
298 params
.is_write
= is_write
;
299 params
.is_aarch32
= false;
300 params
.is_32bit
= false;
302 r
= find_reg_by_id(sysreg
, ¶ms
, gic_v3_icc_reg_descs
,
303 ARRAY_SIZE(gic_v3_icc_reg_descs
));
307 if (!r
->access(vcpu
, ¶ms
, r
))
311 *reg
= params
.regval
;