1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGIC system registers handling functions for AArch64 mode
6 #include <linux/irqchip/arm-gic-v3.h>
8 #include <linux/kvm_host.h>
9 #include <asm/kvm_emulate.h>
10 #include "vgic/vgic.h"
13 static bool access_gic_ctlr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
14 const struct sys_reg_desc
*r
)
16 u32 host_pri_bits
, host_id_bits
, host_seis
, host_a3v
, seis
, a3v
;
17 struct vgic_cpu
*vgic_v3_cpu
= &vcpu
->arch
.vgic_cpu
;
18 struct vgic_vmcr vmcr
;
21 vgic_get_vmcr(vcpu
, &vmcr
);
26 * Disallow restoring VM state if not supported by this
29 host_pri_bits
= ((val
& ICC_CTLR_EL1_PRI_BITS_MASK
) >>
30 ICC_CTLR_EL1_PRI_BITS_SHIFT
) + 1;
31 if (host_pri_bits
> vgic_v3_cpu
->num_pri_bits
)
34 vgic_v3_cpu
->num_pri_bits
= host_pri_bits
;
36 host_id_bits
= (val
& ICC_CTLR_EL1_ID_BITS_MASK
) >>
37 ICC_CTLR_EL1_ID_BITS_SHIFT
;
38 if (host_id_bits
> vgic_v3_cpu
->num_id_bits
)
41 vgic_v3_cpu
->num_id_bits
= host_id_bits
;
43 host_seis
= ((kvm_vgic_global_state
.ich_vtr_el2
&
44 ICH_VTR_SEIS_MASK
) >> ICH_VTR_SEIS_SHIFT
);
45 seis
= (val
& ICC_CTLR_EL1_SEIS_MASK
) >>
46 ICC_CTLR_EL1_SEIS_SHIFT
;
47 if (host_seis
!= seis
)
50 host_a3v
= ((kvm_vgic_global_state
.ich_vtr_el2
&
51 ICH_VTR_A3V_MASK
) >> ICH_VTR_A3V_SHIFT
);
52 a3v
= (val
& ICC_CTLR_EL1_A3V_MASK
) >> ICC_CTLR_EL1_A3V_SHIFT
;
57 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
58 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
60 vmcr
.cbpr
= (val
& ICC_CTLR_EL1_CBPR_MASK
) >> ICC_CTLR_EL1_CBPR_SHIFT
;
61 vmcr
.eoim
= (val
& ICC_CTLR_EL1_EOImode_MASK
) >> ICC_CTLR_EL1_EOImode_SHIFT
;
62 vgic_set_vmcr(vcpu
, &vmcr
);
65 val
|= (vgic_v3_cpu
->num_pri_bits
- 1) <<
66 ICC_CTLR_EL1_PRI_BITS_SHIFT
;
67 val
|= vgic_v3_cpu
->num_id_bits
<< ICC_CTLR_EL1_ID_BITS_SHIFT
;
68 val
|= ((kvm_vgic_global_state
.ich_vtr_el2
&
69 ICH_VTR_SEIS_MASK
) >> ICH_VTR_SEIS_SHIFT
) <<
70 ICC_CTLR_EL1_SEIS_SHIFT
;
71 val
|= ((kvm_vgic_global_state
.ich_vtr_el2
&
72 ICH_VTR_A3V_MASK
) >> ICH_VTR_A3V_SHIFT
) <<
73 ICC_CTLR_EL1_A3V_SHIFT
;
75 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
76 * Extract it directly using ICC_CTLR_EL1 reg definitions.
78 val
|= (vmcr
.cbpr
<< ICC_CTLR_EL1_CBPR_SHIFT
) & ICC_CTLR_EL1_CBPR_MASK
;
79 val
|= (vmcr
.eoim
<< ICC_CTLR_EL1_EOImode_SHIFT
) & ICC_CTLR_EL1_EOImode_MASK
;
87 static bool access_gic_pmr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
88 const struct sys_reg_desc
*r
)
90 struct vgic_vmcr vmcr
;
92 vgic_get_vmcr(vcpu
, &vmcr
);
94 vmcr
.pmr
= (p
->regval
& ICC_PMR_EL1_MASK
) >> ICC_PMR_EL1_SHIFT
;
95 vgic_set_vmcr(vcpu
, &vmcr
);
97 p
->regval
= (vmcr
.pmr
<< ICC_PMR_EL1_SHIFT
) & ICC_PMR_EL1_MASK
;
103 static bool access_gic_bpr0(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
104 const struct sys_reg_desc
*r
)
106 struct vgic_vmcr vmcr
;
108 vgic_get_vmcr(vcpu
, &vmcr
);
110 vmcr
.bpr
= (p
->regval
& ICC_BPR0_EL1_MASK
) >>
112 vgic_set_vmcr(vcpu
, &vmcr
);
114 p
->regval
= (vmcr
.bpr
<< ICC_BPR0_EL1_SHIFT
) &
121 static bool access_gic_bpr1(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
122 const struct sys_reg_desc
*r
)
124 struct vgic_vmcr vmcr
;
129 vgic_get_vmcr(vcpu
, &vmcr
);
132 vmcr
.abpr
= (p
->regval
& ICC_BPR1_EL1_MASK
) >>
134 vgic_set_vmcr(vcpu
, &vmcr
);
136 p
->regval
= (vmcr
.abpr
<< ICC_BPR1_EL1_SHIFT
) &
141 p
->regval
= min((vmcr
.bpr
+ 1), 7U);
147 static bool access_gic_grpen0(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
148 const struct sys_reg_desc
*r
)
150 struct vgic_vmcr vmcr
;
152 vgic_get_vmcr(vcpu
, &vmcr
);
154 vmcr
.grpen0
= (p
->regval
& ICC_IGRPEN0_EL1_MASK
) >>
155 ICC_IGRPEN0_EL1_SHIFT
;
156 vgic_set_vmcr(vcpu
, &vmcr
);
158 p
->regval
= (vmcr
.grpen0
<< ICC_IGRPEN0_EL1_SHIFT
) &
159 ICC_IGRPEN0_EL1_MASK
;
165 static bool access_gic_grpen1(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
166 const struct sys_reg_desc
*r
)
168 struct vgic_vmcr vmcr
;
170 vgic_get_vmcr(vcpu
, &vmcr
);
172 vmcr
.grpen1
= (p
->regval
& ICC_IGRPEN1_EL1_MASK
) >>
173 ICC_IGRPEN1_EL1_SHIFT
;
174 vgic_set_vmcr(vcpu
, &vmcr
);
176 p
->regval
= (vmcr
.grpen1
<< ICC_IGRPEN1_EL1_SHIFT
) &
177 ICC_IGRPEN1_EL1_MASK
;
183 static void vgic_v3_access_apr_reg(struct kvm_vcpu
*vcpu
,
184 struct sys_reg_params
*p
, u8 apr
, u8 idx
)
186 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
190 ap_reg
= &vgicv3
->vgic_ap1r
[idx
];
192 ap_reg
= &vgicv3
->vgic_ap0r
[idx
];
200 static bool access_gic_aprn(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
201 const struct sys_reg_desc
*r
, u8 apr
)
205 if (idx
> vgic_v3_max_apr_idx(vcpu
))
208 vgic_v3_access_apr_reg(vcpu
, p
, apr
, idx
);
217 static bool access_gic_ap0r(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
218 const struct sys_reg_desc
*r
)
221 return access_gic_aprn(vcpu
, p
, r
, 0);
224 static bool access_gic_ap1r(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
225 const struct sys_reg_desc
*r
)
227 return access_gic_aprn(vcpu
, p
, r
, 1);
230 static bool access_gic_sre(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
231 const struct sys_reg_desc
*r
)
233 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
235 /* Validate SRE bit */
237 if (!(p
->regval
& ICC_SRE_EL1_SRE
))
240 p
->regval
= vgicv3
->vgic_sre
;
245 static const struct sys_reg_desc gic_v3_icc_reg_descs
[] = {
246 { SYS_DESC(SYS_ICC_PMR_EL1
), access_gic_pmr
},
247 { SYS_DESC(SYS_ICC_BPR0_EL1
), access_gic_bpr0
},
248 { SYS_DESC(SYS_ICC_AP0R0_EL1
), access_gic_ap0r
},
249 { SYS_DESC(SYS_ICC_AP0R1_EL1
), access_gic_ap0r
},
250 { SYS_DESC(SYS_ICC_AP0R2_EL1
), access_gic_ap0r
},
251 { SYS_DESC(SYS_ICC_AP0R3_EL1
), access_gic_ap0r
},
252 { SYS_DESC(SYS_ICC_AP1R0_EL1
), access_gic_ap1r
},
253 { SYS_DESC(SYS_ICC_AP1R1_EL1
), access_gic_ap1r
},
254 { SYS_DESC(SYS_ICC_AP1R2_EL1
), access_gic_ap1r
},
255 { SYS_DESC(SYS_ICC_AP1R3_EL1
), access_gic_ap1r
},
256 { SYS_DESC(SYS_ICC_BPR1_EL1
), access_gic_bpr1
},
257 { SYS_DESC(SYS_ICC_CTLR_EL1
), access_gic_ctlr
},
258 { SYS_DESC(SYS_ICC_SRE_EL1
), access_gic_sre
},
259 { SYS_DESC(SYS_ICC_IGRPEN0_EL1
), access_gic_grpen0
},
260 { SYS_DESC(SYS_ICC_IGRPEN1_EL1
), access_gic_grpen1
},
263 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu
*vcpu
, bool is_write
, u64 id
,
266 struct sys_reg_params params
;
267 u64 sysreg
= (id
& KVM_DEV_ARM_VGIC_SYSREG_MASK
) | KVM_REG_SIZE_U64
;
269 params
.regval
= *reg
;
270 params
.is_write
= is_write
;
272 if (find_reg_by_id(sysreg
, ¶ms
, gic_v3_icc_reg_descs
,
273 ARRAY_SIZE(gic_v3_icc_reg_descs
)))
279 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
, u64 id
,
282 struct sys_reg_params params
;
283 const struct sys_reg_desc
*r
;
284 u64 sysreg
= (id
& KVM_DEV_ARM_VGIC_SYSREG_MASK
) | KVM_REG_SIZE_U64
;
287 params
.regval
= *reg
;
288 params
.is_write
= is_write
;
290 r
= find_reg_by_id(sysreg
, ¶ms
, gic_v3_icc_reg_descs
,
291 ARRAY_SIZE(gic_v3_icc_reg_descs
));
295 if (!r
->access(vcpu
, ¶ms
, r
))
299 *reg
= params
.regval
;