Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / kvm / vgic-sys-reg-v3.c
blob9e7c486b48c2ef83abbfc1ae1b5725f1dba54c22
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC system registers handling functions for AArch64 mode
4 */
6 #include <linux/irqchip/arm-gic-v3.h>
7 #include <linux/kvm.h>
8 #include <linux/kvm_host.h>
9 #include <asm/kvm_emulate.h>
10 #include "vgic/vgic.h"
11 #include "sys_regs.h"
13 static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
14 u64 val)
16 u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
17 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
18 struct vgic_vmcr vmcr;
20 vgic_get_vmcr(vcpu, &vmcr);
23 * Disallow restoring VM state if not supported by this
24 * hardware.
26 host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
27 if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
28 return -EINVAL;
30 vgic_v3_cpu->num_pri_bits = host_pri_bits;
32 host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
33 if (host_id_bits > vgic_v3_cpu->num_id_bits)
34 return -EINVAL;
36 vgic_v3_cpu->num_id_bits = host_id_bits;
38 host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
39 seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
40 if (host_seis != seis)
41 return -EINVAL;
43 host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
44 a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
45 if (host_a3v != a3v)
46 return -EINVAL;
49 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
50 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
52 vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
53 vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
54 vgic_set_vmcr(vcpu, &vmcr);
56 return 0;
59 static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
60 u64 *valp)
62 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
63 struct vgic_vmcr vmcr;
64 u64 val;
66 vgic_get_vmcr(vcpu, &vmcr);
67 val = 0;
68 val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
69 val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
70 val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
71 FIELD_GET(ICH_VTR_SEIS_MASK,
72 kvm_vgic_global_state.ich_vtr_el2));
73 val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
74 FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
76 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
77 * Extract it directly using ICC_CTLR_EL1 reg definitions.
79 val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
80 val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
82 *valp = val;
84 return 0;
87 static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
88 u64 val)
90 struct vgic_vmcr vmcr;
92 vgic_get_vmcr(vcpu, &vmcr);
93 vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
94 vgic_set_vmcr(vcpu, &vmcr);
96 return 0;
99 static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
100 u64 *val)
102 struct vgic_vmcr vmcr;
104 vgic_get_vmcr(vcpu, &vmcr);
105 *val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
107 return 0;
110 static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
111 u64 val)
113 struct vgic_vmcr vmcr;
115 vgic_get_vmcr(vcpu, &vmcr);
116 vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
117 vgic_set_vmcr(vcpu, &vmcr);
119 return 0;
122 static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
123 u64 *val)
125 struct vgic_vmcr vmcr;
127 vgic_get_vmcr(vcpu, &vmcr);
128 *val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
130 return 0;
133 static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
134 u64 val)
136 struct vgic_vmcr vmcr;
138 vgic_get_vmcr(vcpu, &vmcr);
139 if (!vmcr.cbpr) {
140 vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
141 vgic_set_vmcr(vcpu, &vmcr);
144 return 0;
147 static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
148 u64 *val)
150 struct vgic_vmcr vmcr;
152 vgic_get_vmcr(vcpu, &vmcr);
153 if (!vmcr.cbpr)
154 *val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
155 else
156 *val = min((vmcr.bpr + 1), 7U);
159 return 0;
162 static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
163 u64 val)
165 struct vgic_vmcr vmcr;
167 vgic_get_vmcr(vcpu, &vmcr);
168 vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
169 vgic_set_vmcr(vcpu, &vmcr);
171 return 0;
174 static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
175 u64 *val)
177 struct vgic_vmcr vmcr;
179 vgic_get_vmcr(vcpu, &vmcr);
180 *val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
182 return 0;
185 static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
186 u64 val)
188 struct vgic_vmcr vmcr;
190 vgic_get_vmcr(vcpu, &vmcr);
191 vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
192 vgic_set_vmcr(vcpu, &vmcr);
194 return 0;
197 static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
198 u64 *val)
200 struct vgic_vmcr vmcr;
202 vgic_get_vmcr(vcpu, &vmcr);
203 *val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
205 return 0;
208 static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx)
210 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
212 if (apr)
213 vgicv3->vgic_ap1r[idx] = val;
214 else
215 vgicv3->vgic_ap0r[idx] = val;
218 static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx)
220 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
222 if (apr)
223 return vgicv3->vgic_ap1r[idx];
224 else
225 return vgicv3->vgic_ap0r[idx];
228 static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
229 u64 val)
232 u8 idx = r->Op2 & 3;
234 if (idx > vgic_v3_max_apr_idx(vcpu))
235 return -EINVAL;
237 set_apr_reg(vcpu, val, 0, idx);
238 return 0;
241 static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
242 u64 *val)
244 u8 idx = r->Op2 & 3;
246 if (idx > vgic_v3_max_apr_idx(vcpu))
247 return -EINVAL;
249 *val = get_apr_reg(vcpu, 0, idx);
251 return 0;
254 static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
255 u64 val)
258 u8 idx = r->Op2 & 3;
260 if (idx > vgic_v3_max_apr_idx(vcpu))
261 return -EINVAL;
263 set_apr_reg(vcpu, val, 1, idx);
264 return 0;
267 static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
268 u64 *val)
270 u8 idx = r->Op2 & 3;
272 if (idx > vgic_v3_max_apr_idx(vcpu))
273 return -EINVAL;
275 *val = get_apr_reg(vcpu, 1, idx);
277 return 0;
280 static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
281 u64 val)
283 /* Validate SRE bit */
284 if (!(val & ICC_SRE_EL1_SRE))
285 return -EINVAL;
287 return 0;
290 static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
291 u64 *val)
293 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
295 *val = vgicv3->vgic_sre;
297 return 0;
300 static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
301 { SYS_DESC(SYS_ICC_PMR_EL1),
302 .set_user = set_gic_pmr, .get_user = get_gic_pmr, },
303 { SYS_DESC(SYS_ICC_BPR0_EL1),
304 .set_user = set_gic_bpr0, .get_user = get_gic_bpr0, },
305 { SYS_DESC(SYS_ICC_AP0R0_EL1),
306 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
307 { SYS_DESC(SYS_ICC_AP0R1_EL1),
308 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
309 { SYS_DESC(SYS_ICC_AP0R2_EL1),
310 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
311 { SYS_DESC(SYS_ICC_AP0R3_EL1),
312 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
313 { SYS_DESC(SYS_ICC_AP1R0_EL1),
314 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
315 { SYS_DESC(SYS_ICC_AP1R1_EL1),
316 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
317 { SYS_DESC(SYS_ICC_AP1R2_EL1),
318 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
319 { SYS_DESC(SYS_ICC_AP1R3_EL1),
320 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
321 { SYS_DESC(SYS_ICC_BPR1_EL1),
322 .set_user = set_gic_bpr1, .get_user = get_gic_bpr1, },
323 { SYS_DESC(SYS_ICC_CTLR_EL1),
324 .set_user = set_gic_ctlr, .get_user = get_gic_ctlr, },
325 { SYS_DESC(SYS_ICC_SRE_EL1),
326 .set_user = set_gic_sre, .get_user = get_gic_sre, },
327 { SYS_DESC(SYS_ICC_IGRPEN0_EL1),
328 .set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
329 { SYS_DESC(SYS_ICC_IGRPEN1_EL1),
330 .set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
333 static u64 attr_to_id(u64 attr)
335 return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
336 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr),
337 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr),
338 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr),
339 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr));
342 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
344 if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
345 ARRAY_SIZE(gic_v3_icc_reg_descs)))
346 return 0;
348 return -ENXIO;
351 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
352 struct kvm_device_attr *attr,
353 bool is_write)
355 struct kvm_one_reg reg = {
356 .id = attr_to_id(attr->attr),
357 .addr = attr->addr,
360 if (is_write)
361 return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
362 ARRAY_SIZE(gic_v3_icc_reg_descs));
363 else
364 return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
365 ARRAY_SIZE(gic_v3_icc_reg_descs));