1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bsearch.h>
13 #include <linux/kvm_host.h>
15 #include <linux/printk.h>
16 #include <linux/uaccess.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
22 #include <asm/kvm_arm.h>
23 #include <asm/kvm_coproc.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_host.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/perf_event.h>
29 #include <asm/sysreg.h>
31 #include <trace/events/kvm.h>
38 * All of this file is extremly similar to the ARM coproc.c, but the
39 * types are different. My gut feeling is that it should be pretty
40 * easy to merge, but that would be an ABI breakage -- again. VFP
41 * would also need to be abstracted.
43 * For AArch32, we only take care of what is being trapped. Anything
44 * that has to do with init and userspace access has to go via the
48 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
49 struct sys_reg_params
*params
,
50 const struct sys_reg_desc
*r
)
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params
);
54 kvm_inject_undefined(vcpu
);
58 static bool write_to_read_only(struct kvm_vcpu
*vcpu
,
59 struct sys_reg_params
*params
,
60 const struct sys_reg_desc
*r
)
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params
);
64 kvm_inject_undefined(vcpu
);
68 u64
vcpu_read_sys_reg(const struct kvm_vcpu
*vcpu
, int reg
)
70 if (!vcpu
->arch
.sysregs_loaded_on_cpu
)
74 * System registers listed in the switch are not saved on every
75 * exit from the guest but are only saved on vcpu_put.
77 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
78 * should never be listed below, because the guest cannot modify its
79 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
80 * thread when emulating cross-VCPU communication.
83 case CSSELR_EL1
: return read_sysreg_s(SYS_CSSELR_EL1
);
84 case SCTLR_EL1
: return read_sysreg_s(SYS_SCTLR_EL12
);
85 case ACTLR_EL1
: return read_sysreg_s(SYS_ACTLR_EL1
);
86 case CPACR_EL1
: return read_sysreg_s(SYS_CPACR_EL12
);
87 case TTBR0_EL1
: return read_sysreg_s(SYS_TTBR0_EL12
);
88 case TTBR1_EL1
: return read_sysreg_s(SYS_TTBR1_EL12
);
89 case TCR_EL1
: return read_sysreg_s(SYS_TCR_EL12
);
90 case ESR_EL1
: return read_sysreg_s(SYS_ESR_EL12
);
91 case AFSR0_EL1
: return read_sysreg_s(SYS_AFSR0_EL12
);
92 case AFSR1_EL1
: return read_sysreg_s(SYS_AFSR1_EL12
);
93 case FAR_EL1
: return read_sysreg_s(SYS_FAR_EL12
);
94 case MAIR_EL1
: return read_sysreg_s(SYS_MAIR_EL12
);
95 case VBAR_EL1
: return read_sysreg_s(SYS_VBAR_EL12
);
96 case CONTEXTIDR_EL1
: return read_sysreg_s(SYS_CONTEXTIDR_EL12
);
97 case TPIDR_EL0
: return read_sysreg_s(SYS_TPIDR_EL0
);
98 case TPIDRRO_EL0
: return read_sysreg_s(SYS_TPIDRRO_EL0
);
99 case TPIDR_EL1
: return read_sysreg_s(SYS_TPIDR_EL1
);
100 case AMAIR_EL1
: return read_sysreg_s(SYS_AMAIR_EL12
);
101 case CNTKCTL_EL1
: return read_sysreg_s(SYS_CNTKCTL_EL12
);
102 case PAR_EL1
: return read_sysreg_s(SYS_PAR_EL1
);
103 case DACR32_EL2
: return read_sysreg_s(SYS_DACR32_EL2
);
104 case IFSR32_EL2
: return read_sysreg_s(SYS_IFSR32_EL2
);
105 case DBGVCR32_EL2
: return read_sysreg_s(SYS_DBGVCR32_EL2
);
109 return __vcpu_sys_reg(vcpu
, reg
);
112 void vcpu_write_sys_reg(struct kvm_vcpu
*vcpu
, u64 val
, int reg
)
114 if (!vcpu
->arch
.sysregs_loaded_on_cpu
)
115 goto immediate_write
;
118 * System registers listed in the switch are not restored on every
119 * entry to the guest but are only restored on vcpu_load.
121 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
122 * should never be listed below, because the the MPIDR should only be
123 * set once, before running the VCPU, and never changed later.
126 case CSSELR_EL1
: write_sysreg_s(val
, SYS_CSSELR_EL1
); return;
127 case SCTLR_EL1
: write_sysreg_s(val
, SYS_SCTLR_EL12
); return;
128 case ACTLR_EL1
: write_sysreg_s(val
, SYS_ACTLR_EL1
); return;
129 case CPACR_EL1
: write_sysreg_s(val
, SYS_CPACR_EL12
); return;
130 case TTBR0_EL1
: write_sysreg_s(val
, SYS_TTBR0_EL12
); return;
131 case TTBR1_EL1
: write_sysreg_s(val
, SYS_TTBR1_EL12
); return;
132 case TCR_EL1
: write_sysreg_s(val
, SYS_TCR_EL12
); return;
133 case ESR_EL1
: write_sysreg_s(val
, SYS_ESR_EL12
); return;
134 case AFSR0_EL1
: write_sysreg_s(val
, SYS_AFSR0_EL12
); return;
135 case AFSR1_EL1
: write_sysreg_s(val
, SYS_AFSR1_EL12
); return;
136 case FAR_EL1
: write_sysreg_s(val
, SYS_FAR_EL12
); return;
137 case MAIR_EL1
: write_sysreg_s(val
, SYS_MAIR_EL12
); return;
138 case VBAR_EL1
: write_sysreg_s(val
, SYS_VBAR_EL12
); return;
139 case CONTEXTIDR_EL1
: write_sysreg_s(val
, SYS_CONTEXTIDR_EL12
); return;
140 case TPIDR_EL0
: write_sysreg_s(val
, SYS_TPIDR_EL0
); return;
141 case TPIDRRO_EL0
: write_sysreg_s(val
, SYS_TPIDRRO_EL0
); return;
142 case TPIDR_EL1
: write_sysreg_s(val
, SYS_TPIDR_EL1
); return;
143 case AMAIR_EL1
: write_sysreg_s(val
, SYS_AMAIR_EL12
); return;
144 case CNTKCTL_EL1
: write_sysreg_s(val
, SYS_CNTKCTL_EL12
); return;
145 case PAR_EL1
: write_sysreg_s(val
, SYS_PAR_EL1
); return;
146 case DACR32_EL2
: write_sysreg_s(val
, SYS_DACR32_EL2
); return;
147 case IFSR32_EL2
: write_sysreg_s(val
, SYS_IFSR32_EL2
); return;
148 case DBGVCR32_EL2
: write_sysreg_s(val
, SYS_DBGVCR32_EL2
); return;
152 __vcpu_sys_reg(vcpu
, reg
) = val
;
155 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
156 static u32 cache_levels
;
158 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
159 #define CSSELR_MAX 12
161 /* Which cache CCSIDR represents depends on CSSELR value. */
162 static u32
get_ccsidr(u32 csselr
)
166 /* Make sure noone else changes CSSELR during this! */
168 write_sysreg(csselr
, csselr_el1
);
170 ccsidr
= read_sysreg(ccsidr_el1
);
177 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
179 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
180 struct sys_reg_params
*p
,
181 const struct sys_reg_desc
*r
)
184 return read_from_write_only(vcpu
, p
, r
);
187 * Only track S/W ops if we don't have FWB. It still indicates
188 * that the guest is a bit broken (S/W operations should only
189 * be done by firmware, knowing that there is only a single
190 * CPU left in the system, and certainly not from non-secure
193 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
))
194 kvm_set_way_flush(vcpu
);
200 * Generic accessor for VM registers. Only called as long as HCR_TVM
201 * is set. If the guest enables the MMU, we stop trapping the VM
202 * sys_regs and leave it in complete control of the caches.
204 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
205 struct sys_reg_params
*p
,
206 const struct sys_reg_desc
*r
)
208 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
212 BUG_ON(!p
->is_write
);
214 /* See the 32bit mapping in kvm_host.h */
218 if (!p
->is_aarch32
|| !p
->is_32bit
) {
221 val
= vcpu_read_sys_reg(vcpu
, reg
);
223 val
= (p
->regval
<< 32) | (u64
)lower_32_bits(val
);
225 val
= ((u64
)upper_32_bits(val
) << 32) |
226 lower_32_bits(p
->regval
);
228 vcpu_write_sys_reg(vcpu
, val
, reg
);
230 kvm_toggle_cache(vcpu
, was_enabled
);
235 * Trap handler for the GICv3 SGI generation system register.
236 * Forward the request to the VGIC emulation.
237 * The cp15_64 code makes sure this automatically works
238 * for both AArch64 and AArch32 accesses.
240 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
241 struct sys_reg_params
*p
,
242 const struct sys_reg_desc
*r
)
247 return read_from_write_only(vcpu
, p
, r
);
250 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
251 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
252 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
253 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
258 default: /* Keep GCC quiet */
259 case 0: /* ICC_SGI1R */
262 case 1: /* ICC_ASGI1R */
263 case 2: /* ICC_SGI0R */
269 default: /* Keep GCC quiet */
270 case 5: /* ICC_SGI1R_EL1 */
273 case 6: /* ICC_ASGI1R_EL1 */
274 case 7: /* ICC_SGI0R_EL1 */
280 vgic_v3_dispatch_sgi(vcpu
, p
->regval
, g1
);
285 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
286 struct sys_reg_params
*p
,
287 const struct sys_reg_desc
*r
)
290 return ignore_write(vcpu
, p
);
292 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
296 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
297 struct sys_reg_params
*p
,
298 const struct sys_reg_desc
*r
)
301 return ignore_write(vcpu
, p
);
303 return read_zero(vcpu
, p
);
307 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
308 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
309 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
310 * treat it separately.
312 static bool trap_loregion(struct kvm_vcpu
*vcpu
,
313 struct sys_reg_params
*p
,
314 const struct sys_reg_desc
*r
)
316 u64 val
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1
);
317 u32 sr
= sys_reg((u32
)r
->Op0
, (u32
)r
->Op1
,
318 (u32
)r
->CRn
, (u32
)r
->CRm
, (u32
)r
->Op2
);
320 if (!(val
& (0xfUL
<< ID_AA64MMFR1_LOR_SHIFT
))) {
321 kvm_inject_undefined(vcpu
);
325 if (p
->is_write
&& sr
== SYS_LORID_EL1
)
326 return write_to_read_only(vcpu
, p
, r
);
328 return trap_raz_wi(vcpu
, p
, r
);
331 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
332 struct sys_reg_params
*p
,
333 const struct sys_reg_desc
*r
)
336 return ignore_write(vcpu
, p
);
338 p
->regval
= (1 << 3);
343 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
344 struct sys_reg_params
*p
,
345 const struct sys_reg_desc
*r
)
348 return ignore_write(vcpu
, p
);
350 p
->regval
= read_sysreg(dbgauthstatus_el1
);
356 * We want to avoid world-switching all the DBG registers all the
359 * - If we've touched any debug register, it is likely that we're
360 * going to touch more of them. It then makes sense to disable the
361 * traps and start doing the save/restore dance
362 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
363 * then mandatory to save/restore the registers, as the guest
366 * For this, we use a DIRTY bit, indicating the guest has modified the
367 * debug registers, used as follow:
370 * - If the dirty bit is set (because we're coming back from trapping),
371 * disable the traps, save host registers, restore guest registers.
372 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
373 * set the dirty bit, disable the traps, save host registers,
374 * restore guest registers.
375 * - Otherwise, enable the traps
378 * - If the dirty bit is set, save guest registers, restore host
379 * registers and clear the dirty bit. This ensure that the host can
380 * now use the debug registers.
382 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
383 struct sys_reg_params
*p
,
384 const struct sys_reg_desc
*r
)
387 vcpu_write_sys_reg(vcpu
, p
->regval
, r
->reg
);
388 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
390 p
->regval
= vcpu_read_sys_reg(vcpu
, r
->reg
);
393 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
399 * reg_to_dbg/dbg_to_reg
401 * A 32 bit write to a debug register leave top bits alone
402 * A 32 bit read from a debug register only returns the bottom bits
404 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
405 * hyp.S code switches between host and guest values in future.
407 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
408 struct sys_reg_params
*p
,
415 val
|= ((*dbg_reg
>> 32) << 32);
419 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
422 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
423 struct sys_reg_params
*p
,
426 p
->regval
= *dbg_reg
;
428 p
->regval
&= 0xffffffffUL
;
431 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
432 struct sys_reg_params
*p
,
433 const struct sys_reg_desc
*rd
)
435 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
438 reg_to_dbg(vcpu
, p
, dbg_reg
);
440 dbg_to_reg(vcpu
, p
, dbg_reg
);
442 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
447 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
448 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
450 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
452 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
457 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
458 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
460 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
462 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
467 static void reset_bvr(struct kvm_vcpu
*vcpu
,
468 const struct sys_reg_desc
*rd
)
470 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
] = rd
->val
;
473 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
474 struct sys_reg_params
*p
,
475 const struct sys_reg_desc
*rd
)
477 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
480 reg_to_dbg(vcpu
, p
, dbg_reg
);
482 dbg_to_reg(vcpu
, p
, dbg_reg
);
484 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
489 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
490 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
492 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
494 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
500 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
501 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
503 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
505 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
510 static void reset_bcr(struct kvm_vcpu
*vcpu
,
511 const struct sys_reg_desc
*rd
)
513 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
] = rd
->val
;
516 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
517 struct sys_reg_params
*p
,
518 const struct sys_reg_desc
*rd
)
520 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
523 reg_to_dbg(vcpu
, p
, dbg_reg
);
525 dbg_to_reg(vcpu
, p
, dbg_reg
);
527 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
,
528 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
]);
533 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
534 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
536 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
538 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
543 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
544 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
546 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
548 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
553 static void reset_wvr(struct kvm_vcpu
*vcpu
,
554 const struct sys_reg_desc
*rd
)
556 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
] = rd
->val
;
559 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
560 struct sys_reg_params
*p
,
561 const struct sys_reg_desc
*rd
)
563 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
566 reg_to_dbg(vcpu
, p
, dbg_reg
);
568 dbg_to_reg(vcpu
, p
, dbg_reg
);
570 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
575 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
576 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
578 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
580 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
585 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
586 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
588 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
590 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
595 static void reset_wcr(struct kvm_vcpu
*vcpu
,
596 const struct sys_reg_desc
*rd
)
598 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
] = rd
->val
;
601 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
603 u64 amair
= read_sysreg(amair_el1
);
604 vcpu_write_sys_reg(vcpu
, amair
, AMAIR_EL1
);
607 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
612 * Map the vcpu_id into the first three affinity level fields of
613 * the MPIDR. We limit the number of VCPUs in level 0 due to a
614 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
615 * of the GICv3 to be able to address each CPU directly when
618 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
619 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
620 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
621 vcpu_write_sys_reg(vcpu
, (1ULL << 31) | mpidr
, MPIDR_EL1
);
624 static void reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
628 pmcr
= read_sysreg(pmcr_el0
);
630 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
631 * except PMCR.E resetting to zero.
633 val
= ((pmcr
& ~ARMV8_PMU_PMCR_MASK
)
634 | (ARMV8_PMU_PMCR_MASK
& 0xdecafbad)) & (~ARMV8_PMU_PMCR_E
);
635 if (!system_supports_32bit_el0())
636 val
|= ARMV8_PMU_PMCR_LC
;
637 __vcpu_sys_reg(vcpu
, r
->reg
) = val
;
640 static bool check_pmu_access_disabled(struct kvm_vcpu
*vcpu
, u64 flags
)
642 u64 reg
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
643 bool enabled
= (reg
& flags
) || vcpu_mode_priv(vcpu
);
646 kvm_inject_undefined(vcpu
);
651 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
653 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_EN
);
656 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
658 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
);
661 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
663 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
);
666 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
668 return check_pmu_access_disabled(vcpu
, ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
);
671 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
672 const struct sys_reg_desc
*r
)
676 if (!kvm_arm_pmu_v3_ready(vcpu
))
677 return trap_raz_wi(vcpu
, p
, r
);
679 if (pmu_access_el0_disabled(vcpu
))
683 /* Only update writeable bits of PMCR */
684 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
);
685 val
&= ~ARMV8_PMU_PMCR_MASK
;
686 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
687 if (!system_supports_32bit_el0())
688 val
|= ARMV8_PMU_PMCR_LC
;
689 __vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
690 kvm_pmu_handle_pmcr(vcpu
, val
);
691 kvm_vcpu_pmu_restore_guest(vcpu
);
693 /* PMCR.P & PMCR.C are RAZ */
694 val
= __vcpu_sys_reg(vcpu
, PMCR_EL0
)
695 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
702 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
703 const struct sys_reg_desc
*r
)
705 if (!kvm_arm_pmu_v3_ready(vcpu
))
706 return trap_raz_wi(vcpu
, p
, r
);
708 if (pmu_access_event_counter_el0_disabled(vcpu
))
712 __vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
714 /* return PMSELR.SEL field */
715 p
->regval
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
716 & ARMV8_PMU_COUNTER_MASK
;
721 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
722 const struct sys_reg_desc
*r
)
726 if (!kvm_arm_pmu_v3_ready(vcpu
))
727 return trap_raz_wi(vcpu
, p
, r
);
731 if (pmu_access_el0_disabled(vcpu
))
735 pmceid
= read_sysreg(pmceid0_el0
);
737 pmceid
= read_sysreg(pmceid1_el0
);
744 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
748 pmcr
= __vcpu_sys_reg(vcpu
, PMCR_EL0
);
749 val
= (pmcr
>> ARMV8_PMU_PMCR_N_SHIFT
) & ARMV8_PMU_PMCR_N_MASK
;
750 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
) {
751 kvm_inject_undefined(vcpu
);
758 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
759 struct sys_reg_params
*p
,
760 const struct sys_reg_desc
*r
)
764 if (!kvm_arm_pmu_v3_ready(vcpu
))
765 return trap_raz_wi(vcpu
, p
, r
);
767 if (r
->CRn
== 9 && r
->CRm
== 13) {
770 if (pmu_access_event_counter_el0_disabled(vcpu
))
773 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
)
774 & ARMV8_PMU_COUNTER_MASK
;
775 } else if (r
->Op2
== 0) {
777 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
780 idx
= ARMV8_PMU_CYCLE_IDX
;
784 } else if (r
->CRn
== 0 && r
->CRm
== 9) {
786 if (pmu_access_event_counter_el0_disabled(vcpu
))
789 idx
= ARMV8_PMU_CYCLE_IDX
;
790 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
792 if (pmu_access_event_counter_el0_disabled(vcpu
))
795 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
800 if (!pmu_counter_idx_valid(vcpu
, idx
))
804 if (pmu_access_el0_disabled(vcpu
))
807 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
809 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
815 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
816 const struct sys_reg_desc
*r
)
820 if (!kvm_arm_pmu_v3_ready(vcpu
))
821 return trap_raz_wi(vcpu
, p
, r
);
823 if (pmu_access_el0_disabled(vcpu
))
826 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
828 idx
= __vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
829 reg
= PMEVTYPER0_EL0
+ idx
;
830 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
831 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
832 if (idx
== ARMV8_PMU_CYCLE_IDX
)
836 reg
= PMEVTYPER0_EL0
+ idx
;
841 if (!pmu_counter_idx_valid(vcpu
, idx
))
845 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
846 __vcpu_sys_reg(vcpu
, reg
) = p
->regval
& ARMV8_PMU_EVTYPE_MASK
;
847 kvm_vcpu_pmu_restore_guest(vcpu
);
849 p
->regval
= __vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_MASK
;
855 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
856 const struct sys_reg_desc
*r
)
860 if (!kvm_arm_pmu_v3_ready(vcpu
))
861 return trap_raz_wi(vcpu
, p
, r
);
863 if (pmu_access_el0_disabled(vcpu
))
866 mask
= kvm_pmu_valid_counter_mask(vcpu
);
868 val
= p
->regval
& mask
;
870 /* accessing PMCNTENSET_EL0 */
871 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
872 kvm_pmu_enable_counter_mask(vcpu
, val
);
873 kvm_vcpu_pmu_restore_guest(vcpu
);
875 /* accessing PMCNTENCLR_EL0 */
876 __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
877 kvm_pmu_disable_counter_mask(vcpu
, val
);
880 p
->regval
= __vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & mask
;
886 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
887 const struct sys_reg_desc
*r
)
889 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
891 if (!kvm_arm_pmu_v3_ready(vcpu
))
892 return trap_raz_wi(vcpu
, p
, r
);
894 if (!vcpu_mode_priv(vcpu
)) {
895 kvm_inject_undefined(vcpu
);
900 u64 val
= p
->regval
& mask
;
903 /* accessing PMINTENSET_EL1 */
904 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
906 /* accessing PMINTENCLR_EL1 */
907 __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
909 p
->regval
= __vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) & mask
;
915 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
916 const struct sys_reg_desc
*r
)
918 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
920 if (!kvm_arm_pmu_v3_ready(vcpu
))
921 return trap_raz_wi(vcpu
, p
, r
);
923 if (pmu_access_el0_disabled(vcpu
))
928 /* accessing PMOVSSET_EL0 */
929 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) |= (p
->regval
& mask
);
931 /* accessing PMOVSCLR_EL0 */
932 __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
934 p
->regval
= __vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) & mask
;
940 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
941 const struct sys_reg_desc
*r
)
945 if (!kvm_arm_pmu_v3_ready(vcpu
))
946 return trap_raz_wi(vcpu
, p
, r
);
949 return read_from_write_only(vcpu
, p
, r
);
951 if (pmu_write_swinc_el0_disabled(vcpu
))
954 mask
= kvm_pmu_valid_counter_mask(vcpu
);
955 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
959 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
960 const struct sys_reg_desc
*r
)
962 if (!kvm_arm_pmu_v3_ready(vcpu
))
963 return trap_raz_wi(vcpu
, p
, r
);
966 if (!vcpu_mode_priv(vcpu
)) {
967 kvm_inject_undefined(vcpu
);
971 __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) =
972 p
->regval
& ARMV8_PMU_USERENR_MASK
;
974 p
->regval
= __vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
975 & ARMV8_PMU_USERENR_MASK
;
981 #define reg_to_encoding(x) \
982 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
983 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
985 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
986 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
987 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
988 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
989 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
990 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
991 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
992 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
993 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
994 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
996 /* Macro to expand the PMEVCNTRn_EL0 register */
997 #define PMU_PMEVCNTR_EL0(n) \
998 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
999 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1001 /* Macro to expand the PMEVTYPERn_EL0 register */
1002 #define PMU_PMEVTYPER_EL0(n) \
1003 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1004 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1006 static bool trap_ptrauth(struct kvm_vcpu
*vcpu
,
1007 struct sys_reg_params
*p
,
1008 const struct sys_reg_desc
*rd
)
1010 kvm_arm_vcpu_ptrauth_trap(vcpu
);
1013 * Return false for both cases as we never skip the trapped
1016 * - Either we re-execute the same key register access instruction
1017 * after enabling ptrauth.
1018 * - Or an UNDEF is injected as ptrauth is not supported/enabled.
1023 static unsigned int ptrauth_visibility(const struct kvm_vcpu
*vcpu
,
1024 const struct sys_reg_desc
*rd
)
1026 return vcpu_has_ptrauth(vcpu
) ? 0 : REG_HIDDEN_USER
| REG_HIDDEN_GUEST
;
1029 #define __PTRAUTH_KEY(k) \
1030 { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
1031 .visibility = ptrauth_visibility}
1033 #define PTRAUTH_KEY(k) \
1034 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1035 __PTRAUTH_KEY(k ## KEYHI_EL1)
1037 static bool access_arch_timer(struct kvm_vcpu
*vcpu
,
1038 struct sys_reg_params
*p
,
1039 const struct sys_reg_desc
*r
)
1041 enum kvm_arch_timers tmr
;
1042 enum kvm_arch_timer_regs treg
;
1043 u64 reg
= reg_to_encoding(r
);
1046 case SYS_CNTP_TVAL_EL0
:
1047 case SYS_AARCH32_CNTP_TVAL
:
1049 treg
= TIMER_REG_TVAL
;
1051 case SYS_CNTP_CTL_EL0
:
1052 case SYS_AARCH32_CNTP_CTL
:
1054 treg
= TIMER_REG_CTL
;
1056 case SYS_CNTP_CVAL_EL0
:
1057 case SYS_AARCH32_CNTP_CVAL
:
1059 treg
= TIMER_REG_CVAL
;
1066 kvm_arm_timer_write_sysreg(vcpu
, tmr
, treg
, p
->regval
);
1068 p
->regval
= kvm_arm_timer_read_sysreg(vcpu
, tmr
, treg
);
1073 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1074 static u64
read_id_reg(const struct kvm_vcpu
*vcpu
,
1075 struct sys_reg_desc
const *r
, bool raz
)
1077 u32 id
= sys_reg((u32
)r
->Op0
, (u32
)r
->Op1
,
1078 (u32
)r
->CRn
, (u32
)r
->CRm
, (u32
)r
->Op2
);
1079 u64 val
= raz
? 0 : read_sanitised_ftr_reg(id
);
1081 if (id
== SYS_ID_AA64PFR0_EL1
&& !vcpu_has_sve(vcpu
)) {
1082 val
&= ~(0xfUL
<< ID_AA64PFR0_SVE_SHIFT
);
1083 } else if (id
== SYS_ID_AA64ISAR1_EL1
&& !vcpu_has_ptrauth(vcpu
)) {
1084 val
&= ~((0xfUL
<< ID_AA64ISAR1_APA_SHIFT
) |
1085 (0xfUL
<< ID_AA64ISAR1_API_SHIFT
) |
1086 (0xfUL
<< ID_AA64ISAR1_GPA_SHIFT
) |
1087 (0xfUL
<< ID_AA64ISAR1_GPI_SHIFT
));
1093 /* cpufeature ID register access trap handlers */
1095 static bool __access_id_reg(struct kvm_vcpu
*vcpu
,
1096 struct sys_reg_params
*p
,
1097 const struct sys_reg_desc
*r
,
1101 return write_to_read_only(vcpu
, p
, r
);
1103 p
->regval
= read_id_reg(vcpu
, r
, raz
);
1107 static bool access_id_reg(struct kvm_vcpu
*vcpu
,
1108 struct sys_reg_params
*p
,
1109 const struct sys_reg_desc
*r
)
1111 return __access_id_reg(vcpu
, p
, r
, false);
1114 static bool access_raz_id_reg(struct kvm_vcpu
*vcpu
,
1115 struct sys_reg_params
*p
,
1116 const struct sys_reg_desc
*r
)
1118 return __access_id_reg(vcpu
, p
, r
, true);
1121 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
);
1122 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
);
1123 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
);
1125 /* Visibility overrides for SVE-specific control registers */
1126 static unsigned int sve_visibility(const struct kvm_vcpu
*vcpu
,
1127 const struct sys_reg_desc
*rd
)
1129 if (vcpu_has_sve(vcpu
))
1132 return REG_HIDDEN_USER
| REG_HIDDEN_GUEST
;
1135 /* Visibility overrides for SVE-specific ID registers */
1136 static unsigned int sve_id_visibility(const struct kvm_vcpu
*vcpu
,
1137 const struct sys_reg_desc
*rd
)
1139 if (vcpu_has_sve(vcpu
))
1142 return REG_HIDDEN_USER
;
1145 /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
1146 static u64
guest_id_aa64zfr0_el1(const struct kvm_vcpu
*vcpu
)
1148 if (!vcpu_has_sve(vcpu
))
1151 return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1
);
1154 static bool access_id_aa64zfr0_el1(struct kvm_vcpu
*vcpu
,
1155 struct sys_reg_params
*p
,
1156 const struct sys_reg_desc
*rd
)
1159 return write_to_read_only(vcpu
, p
, rd
);
1161 p
->regval
= guest_id_aa64zfr0_el1(vcpu
);
1165 static int get_id_aa64zfr0_el1(struct kvm_vcpu
*vcpu
,
1166 const struct sys_reg_desc
*rd
,
1167 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1171 if (WARN_ON(!vcpu_has_sve(vcpu
)))
1174 val
= guest_id_aa64zfr0_el1(vcpu
);
1175 return reg_to_user(uaddr
, &val
, reg
->id
);
1178 static int set_id_aa64zfr0_el1(struct kvm_vcpu
*vcpu
,
1179 const struct sys_reg_desc
*rd
,
1180 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1182 const u64 id
= sys_reg_to_index(rd
);
1186 if (WARN_ON(!vcpu_has_sve(vcpu
)))
1189 err
= reg_from_user(&val
, uaddr
, id
);
1193 /* This is what we mean by invariant: you can't change it. */
1194 if (val
!= guest_id_aa64zfr0_el1(vcpu
))
1201 * cpufeature ID register user accessors
1203 * For now, these registers are immutable for userspace, so no values
1204 * are stored, and for set_id_reg() we don't allow the effective value
1207 static int __get_id_reg(const struct kvm_vcpu
*vcpu
,
1208 const struct sys_reg_desc
*rd
, void __user
*uaddr
,
1211 const u64 id
= sys_reg_to_index(rd
);
1212 const u64 val
= read_id_reg(vcpu
, rd
, raz
);
1214 return reg_to_user(uaddr
, &val
, id
);
1217 static int __set_id_reg(const struct kvm_vcpu
*vcpu
,
1218 const struct sys_reg_desc
*rd
, void __user
*uaddr
,
1221 const u64 id
= sys_reg_to_index(rd
);
1225 err
= reg_from_user(&val
, uaddr
, id
);
1229 /* This is what we mean by invariant: you can't change it. */
1230 if (val
!= read_id_reg(vcpu
, rd
, raz
))
1236 static int get_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1237 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1239 return __get_id_reg(vcpu
, rd
, uaddr
, false);
1242 static int set_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1243 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1245 return __set_id_reg(vcpu
, rd
, uaddr
, false);
1248 static int get_raz_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1249 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1251 return __get_id_reg(vcpu
, rd
, uaddr
, true);
1254 static int set_raz_id_reg(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
1255 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
1257 return __set_id_reg(vcpu
, rd
, uaddr
, true);
1260 static bool access_ctr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1261 const struct sys_reg_desc
*r
)
1264 return write_to_read_only(vcpu
, p
, r
);
1266 p
->regval
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
1270 static bool access_clidr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1271 const struct sys_reg_desc
*r
)
1274 return write_to_read_only(vcpu
, p
, r
);
1276 p
->regval
= read_sysreg(clidr_el1
);
1280 static bool access_csselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1281 const struct sys_reg_desc
*r
)
1284 vcpu_write_sys_reg(vcpu
, p
->regval
, r
->reg
);
1286 p
->regval
= vcpu_read_sys_reg(vcpu
, r
->reg
);
1290 static bool access_ccsidr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
1291 const struct sys_reg_desc
*r
)
1296 return write_to_read_only(vcpu
, p
, r
);
1298 csselr
= vcpu_read_sys_reg(vcpu
, CSSELR_EL1
);
1299 p
->regval
= get_ccsidr(csselr
);
1302 * Guests should not be doing cache operations by set/way at all, and
1303 * for this reason, we trap them and attempt to infer the intent, so
1304 * that we can flush the entire guest's address space at the appropriate
1306 * To prevent this trapping from causing performance problems, let's
1307 * expose the geometry of all data and unified caches (which are
1308 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1309 * [If guests should attempt to infer aliasing properties from the
1310 * geometry (which is not permitted by the architecture), they would
1311 * only do so for virtually indexed caches.]
1313 if (!(csselr
& 1)) // data or unified cache
1314 p
->regval
&= ~GENMASK(27, 3);
1318 /* sys_reg_desc initialiser for known cpufeature ID registers */
1319 #define ID_SANITISED(name) { \
1320 SYS_DESC(SYS_##name), \
1321 .access = access_id_reg, \
1322 .get_user = get_id_reg, \
1323 .set_user = set_id_reg, \
1327 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1328 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1329 * (1 <= crm < 8, 0 <= Op2 < 8).
1331 #define ID_UNALLOCATED(crm, op2) { \
1332 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1333 .access = access_raz_id_reg, \
1334 .get_user = get_raz_id_reg, \
1335 .set_user = set_raz_id_reg, \
1339 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1340 * For now, these are exposed just like unallocated ID regs: they appear
1341 * RAZ for the guest.
1343 #define ID_HIDDEN(name) { \
1344 SYS_DESC(SYS_##name), \
1345 .access = access_raz_id_reg, \
1346 .get_user = get_raz_id_reg, \
1347 .set_user = set_raz_id_reg, \
1351 * Architected system registers.
1352 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1354 * Debug handling: We do trap most, if not all debug related system
1355 * registers. The implementation is good enough to ensure that a guest
1356 * can use these with minimal performance degradation. The drawback is
1357 * that we don't implement any of the external debug, none of the
1358 * OSlock protocol. This should be revisited if we ever encounter a
1359 * more demanding guest...
1361 static const struct sys_reg_desc sys_reg_descs
[] = {
1362 { SYS_DESC(SYS_DC_ISW
), access_dcsw
},
1363 { SYS_DESC(SYS_DC_CSW
), access_dcsw
},
1364 { SYS_DESC(SYS_DC_CISW
), access_dcsw
},
1366 DBG_BCR_BVR_WCR_WVR_EL1(0),
1367 DBG_BCR_BVR_WCR_WVR_EL1(1),
1368 { SYS_DESC(SYS_MDCCINT_EL1
), trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
1369 { SYS_DESC(SYS_MDSCR_EL1
), trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
1370 DBG_BCR_BVR_WCR_WVR_EL1(2),
1371 DBG_BCR_BVR_WCR_WVR_EL1(3),
1372 DBG_BCR_BVR_WCR_WVR_EL1(4),
1373 DBG_BCR_BVR_WCR_WVR_EL1(5),
1374 DBG_BCR_BVR_WCR_WVR_EL1(6),
1375 DBG_BCR_BVR_WCR_WVR_EL1(7),
1376 DBG_BCR_BVR_WCR_WVR_EL1(8),
1377 DBG_BCR_BVR_WCR_WVR_EL1(9),
1378 DBG_BCR_BVR_WCR_WVR_EL1(10),
1379 DBG_BCR_BVR_WCR_WVR_EL1(11),
1380 DBG_BCR_BVR_WCR_WVR_EL1(12),
1381 DBG_BCR_BVR_WCR_WVR_EL1(13),
1382 DBG_BCR_BVR_WCR_WVR_EL1(14),
1383 DBG_BCR_BVR_WCR_WVR_EL1(15),
1385 { SYS_DESC(SYS_MDRAR_EL1
), trap_raz_wi
},
1386 { SYS_DESC(SYS_OSLAR_EL1
), trap_raz_wi
},
1387 { SYS_DESC(SYS_OSLSR_EL1
), trap_oslsr_el1
},
1388 { SYS_DESC(SYS_OSDLR_EL1
), trap_raz_wi
},
1389 { SYS_DESC(SYS_DBGPRCR_EL1
), trap_raz_wi
},
1390 { SYS_DESC(SYS_DBGCLAIMSET_EL1
), trap_raz_wi
},
1391 { SYS_DESC(SYS_DBGCLAIMCLR_EL1
), trap_raz_wi
},
1392 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1
), trap_dbgauthstatus_el1
},
1394 { SYS_DESC(SYS_MDCCSR_EL0
), trap_raz_wi
},
1395 { SYS_DESC(SYS_DBGDTR_EL0
), trap_raz_wi
},
1396 // DBGDTR[TR]X_EL0 share the same encoding
1397 { SYS_DESC(SYS_DBGDTRTX_EL0
), trap_raz_wi
},
1399 { SYS_DESC(SYS_DBGVCR32_EL2
), NULL
, reset_val
, DBGVCR32_EL2
, 0 },
1401 { SYS_DESC(SYS_MPIDR_EL1
), NULL
, reset_mpidr
, MPIDR_EL1
},
1404 * ID regs: all ID_SANITISED() entries here must have corresponding
1405 * entries in arm64_ftr_regs[].
1408 /* AArch64 mappings of the AArch32 ID registers */
1410 ID_SANITISED(ID_PFR0_EL1
),
1411 ID_SANITISED(ID_PFR1_EL1
),
1412 ID_SANITISED(ID_DFR0_EL1
),
1413 ID_HIDDEN(ID_AFR0_EL1
),
1414 ID_SANITISED(ID_MMFR0_EL1
),
1415 ID_SANITISED(ID_MMFR1_EL1
),
1416 ID_SANITISED(ID_MMFR2_EL1
),
1417 ID_SANITISED(ID_MMFR3_EL1
),
1420 ID_SANITISED(ID_ISAR0_EL1
),
1421 ID_SANITISED(ID_ISAR1_EL1
),
1422 ID_SANITISED(ID_ISAR2_EL1
),
1423 ID_SANITISED(ID_ISAR3_EL1
),
1424 ID_SANITISED(ID_ISAR4_EL1
),
1425 ID_SANITISED(ID_ISAR5_EL1
),
1426 ID_SANITISED(ID_MMFR4_EL1
),
1427 ID_SANITISED(ID_ISAR6_EL1
),
1430 ID_SANITISED(MVFR0_EL1
),
1431 ID_SANITISED(MVFR1_EL1
),
1432 ID_SANITISED(MVFR2_EL1
),
1433 ID_UNALLOCATED(3,3),
1434 ID_UNALLOCATED(3,4),
1435 ID_UNALLOCATED(3,5),
1436 ID_UNALLOCATED(3,6),
1437 ID_UNALLOCATED(3,7),
1439 /* AArch64 ID registers */
1441 ID_SANITISED(ID_AA64PFR0_EL1
),
1442 ID_SANITISED(ID_AA64PFR1_EL1
),
1443 ID_UNALLOCATED(4,2),
1444 ID_UNALLOCATED(4,3),
1445 { SYS_DESC(SYS_ID_AA64ZFR0_EL1
), access_id_aa64zfr0_el1
, .get_user
= get_id_aa64zfr0_el1
, .set_user
= set_id_aa64zfr0_el1
, .visibility
= sve_id_visibility
},
1446 ID_UNALLOCATED(4,5),
1447 ID_UNALLOCATED(4,6),
1448 ID_UNALLOCATED(4,7),
1451 ID_SANITISED(ID_AA64DFR0_EL1
),
1452 ID_SANITISED(ID_AA64DFR1_EL1
),
1453 ID_UNALLOCATED(5,2),
1454 ID_UNALLOCATED(5,3),
1455 ID_HIDDEN(ID_AA64AFR0_EL1
),
1456 ID_HIDDEN(ID_AA64AFR1_EL1
),
1457 ID_UNALLOCATED(5,6),
1458 ID_UNALLOCATED(5,7),
1461 ID_SANITISED(ID_AA64ISAR0_EL1
),
1462 ID_SANITISED(ID_AA64ISAR1_EL1
),
1463 ID_UNALLOCATED(6,2),
1464 ID_UNALLOCATED(6,3),
1465 ID_UNALLOCATED(6,4),
1466 ID_UNALLOCATED(6,5),
1467 ID_UNALLOCATED(6,6),
1468 ID_UNALLOCATED(6,7),
1471 ID_SANITISED(ID_AA64MMFR0_EL1
),
1472 ID_SANITISED(ID_AA64MMFR1_EL1
),
1473 ID_SANITISED(ID_AA64MMFR2_EL1
),
1474 ID_UNALLOCATED(7,3),
1475 ID_UNALLOCATED(7,4),
1476 ID_UNALLOCATED(7,5),
1477 ID_UNALLOCATED(7,6),
1478 ID_UNALLOCATED(7,7),
1480 { SYS_DESC(SYS_SCTLR_EL1
), access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
1481 { SYS_DESC(SYS_CPACR_EL1
), NULL
, reset_val
, CPACR_EL1
, 0 },
1482 { SYS_DESC(SYS_ZCR_EL1
), NULL
, reset_val
, ZCR_EL1
, 0, .visibility
= sve_visibility
},
1483 { SYS_DESC(SYS_TTBR0_EL1
), access_vm_reg
, reset_unknown
, TTBR0_EL1
},
1484 { SYS_DESC(SYS_TTBR1_EL1
), access_vm_reg
, reset_unknown
, TTBR1_EL1
},
1485 { SYS_DESC(SYS_TCR_EL1
), access_vm_reg
, reset_val
, TCR_EL1
, 0 },
1493 { SYS_DESC(SYS_AFSR0_EL1
), access_vm_reg
, reset_unknown
, AFSR0_EL1
},
1494 { SYS_DESC(SYS_AFSR1_EL1
), access_vm_reg
, reset_unknown
, AFSR1_EL1
},
1495 { SYS_DESC(SYS_ESR_EL1
), access_vm_reg
, reset_unknown
, ESR_EL1
},
1497 { SYS_DESC(SYS_ERRIDR_EL1
), trap_raz_wi
},
1498 { SYS_DESC(SYS_ERRSELR_EL1
), trap_raz_wi
},
1499 { SYS_DESC(SYS_ERXFR_EL1
), trap_raz_wi
},
1500 { SYS_DESC(SYS_ERXCTLR_EL1
), trap_raz_wi
},
1501 { SYS_DESC(SYS_ERXSTATUS_EL1
), trap_raz_wi
},
1502 { SYS_DESC(SYS_ERXADDR_EL1
), trap_raz_wi
},
1503 { SYS_DESC(SYS_ERXMISC0_EL1
), trap_raz_wi
},
1504 { SYS_DESC(SYS_ERXMISC1_EL1
), trap_raz_wi
},
1506 { SYS_DESC(SYS_FAR_EL1
), access_vm_reg
, reset_unknown
, FAR_EL1
},
1507 { SYS_DESC(SYS_PAR_EL1
), NULL
, reset_unknown
, PAR_EL1
},
1509 { SYS_DESC(SYS_PMINTENSET_EL1
), access_pminten
, reset_unknown
, PMINTENSET_EL1
},
1510 { SYS_DESC(SYS_PMINTENCLR_EL1
), access_pminten
, NULL
, PMINTENSET_EL1
},
1512 { SYS_DESC(SYS_MAIR_EL1
), access_vm_reg
, reset_unknown
, MAIR_EL1
},
1513 { SYS_DESC(SYS_AMAIR_EL1
), access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
1515 { SYS_DESC(SYS_LORSA_EL1
), trap_loregion
},
1516 { SYS_DESC(SYS_LOREA_EL1
), trap_loregion
},
1517 { SYS_DESC(SYS_LORN_EL1
), trap_loregion
},
1518 { SYS_DESC(SYS_LORC_EL1
), trap_loregion
},
1519 { SYS_DESC(SYS_LORID_EL1
), trap_loregion
},
1521 { SYS_DESC(SYS_VBAR_EL1
), NULL
, reset_val
, VBAR_EL1
, 0 },
1522 { SYS_DESC(SYS_DISR_EL1
), NULL
, reset_val
, DISR_EL1
, 0 },
1524 { SYS_DESC(SYS_ICC_IAR0_EL1
), write_to_read_only
},
1525 { SYS_DESC(SYS_ICC_EOIR0_EL1
), read_from_write_only
},
1526 { SYS_DESC(SYS_ICC_HPPIR0_EL1
), write_to_read_only
},
1527 { SYS_DESC(SYS_ICC_DIR_EL1
), read_from_write_only
},
1528 { SYS_DESC(SYS_ICC_RPR_EL1
), write_to_read_only
},
1529 { SYS_DESC(SYS_ICC_SGI1R_EL1
), access_gic_sgi
},
1530 { SYS_DESC(SYS_ICC_ASGI1R_EL1
), access_gic_sgi
},
1531 { SYS_DESC(SYS_ICC_SGI0R_EL1
), access_gic_sgi
},
1532 { SYS_DESC(SYS_ICC_IAR1_EL1
), write_to_read_only
},
1533 { SYS_DESC(SYS_ICC_EOIR1_EL1
), read_from_write_only
},
1534 { SYS_DESC(SYS_ICC_HPPIR1_EL1
), write_to_read_only
},
1535 { SYS_DESC(SYS_ICC_SRE_EL1
), access_gic_sre
},
1537 { SYS_DESC(SYS_CONTEXTIDR_EL1
), access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
1538 { SYS_DESC(SYS_TPIDR_EL1
), NULL
, reset_unknown
, TPIDR_EL1
},
1540 { SYS_DESC(SYS_CNTKCTL_EL1
), NULL
, reset_val
, CNTKCTL_EL1
, 0},
1542 { SYS_DESC(SYS_CCSIDR_EL1
), access_ccsidr
},
1543 { SYS_DESC(SYS_CLIDR_EL1
), access_clidr
},
1544 { SYS_DESC(SYS_CSSELR_EL1
), access_csselr
, reset_unknown
, CSSELR_EL1
},
1545 { SYS_DESC(SYS_CTR_EL0
), access_ctr
},
1547 { SYS_DESC(SYS_PMCR_EL0
), access_pmcr
, reset_pmcr
, PMCR_EL0
},
1548 { SYS_DESC(SYS_PMCNTENSET_EL0
), access_pmcnten
, reset_unknown
, PMCNTENSET_EL0
},
1549 { SYS_DESC(SYS_PMCNTENCLR_EL0
), access_pmcnten
, NULL
, PMCNTENSET_EL0
},
1550 { SYS_DESC(SYS_PMOVSCLR_EL0
), access_pmovs
, NULL
, PMOVSSET_EL0
},
1551 { SYS_DESC(SYS_PMSWINC_EL0
), access_pmswinc
, reset_unknown
, PMSWINC_EL0
},
1552 { SYS_DESC(SYS_PMSELR_EL0
), access_pmselr
, reset_unknown
, PMSELR_EL0
},
1553 { SYS_DESC(SYS_PMCEID0_EL0
), access_pmceid
},
1554 { SYS_DESC(SYS_PMCEID1_EL0
), access_pmceid
},
1555 { SYS_DESC(SYS_PMCCNTR_EL0
), access_pmu_evcntr
, reset_unknown
, PMCCNTR_EL0
},
1556 { SYS_DESC(SYS_PMXEVTYPER_EL0
), access_pmu_evtyper
},
1557 { SYS_DESC(SYS_PMXEVCNTR_EL0
), access_pmu_evcntr
},
1559 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1560 * in 32bit mode. Here we choose to reset it as zero for consistency.
1562 { SYS_DESC(SYS_PMUSERENR_EL0
), access_pmuserenr
, reset_val
, PMUSERENR_EL0
, 0 },
1563 { SYS_DESC(SYS_PMOVSSET_EL0
), access_pmovs
, reset_unknown
, PMOVSSET_EL0
},
1565 { SYS_DESC(SYS_TPIDR_EL0
), NULL
, reset_unknown
, TPIDR_EL0
},
1566 { SYS_DESC(SYS_TPIDRRO_EL0
), NULL
, reset_unknown
, TPIDRRO_EL0
},
1568 { SYS_DESC(SYS_CNTP_TVAL_EL0
), access_arch_timer
},
1569 { SYS_DESC(SYS_CNTP_CTL_EL0
), access_arch_timer
},
1570 { SYS_DESC(SYS_CNTP_CVAL_EL0
), access_arch_timer
},
1573 PMU_PMEVCNTR_EL0(0),
1574 PMU_PMEVCNTR_EL0(1),
1575 PMU_PMEVCNTR_EL0(2),
1576 PMU_PMEVCNTR_EL0(3),
1577 PMU_PMEVCNTR_EL0(4),
1578 PMU_PMEVCNTR_EL0(5),
1579 PMU_PMEVCNTR_EL0(6),
1580 PMU_PMEVCNTR_EL0(7),
1581 PMU_PMEVCNTR_EL0(8),
1582 PMU_PMEVCNTR_EL0(9),
1583 PMU_PMEVCNTR_EL0(10),
1584 PMU_PMEVCNTR_EL0(11),
1585 PMU_PMEVCNTR_EL0(12),
1586 PMU_PMEVCNTR_EL0(13),
1587 PMU_PMEVCNTR_EL0(14),
1588 PMU_PMEVCNTR_EL0(15),
1589 PMU_PMEVCNTR_EL0(16),
1590 PMU_PMEVCNTR_EL0(17),
1591 PMU_PMEVCNTR_EL0(18),
1592 PMU_PMEVCNTR_EL0(19),
1593 PMU_PMEVCNTR_EL0(20),
1594 PMU_PMEVCNTR_EL0(21),
1595 PMU_PMEVCNTR_EL0(22),
1596 PMU_PMEVCNTR_EL0(23),
1597 PMU_PMEVCNTR_EL0(24),
1598 PMU_PMEVCNTR_EL0(25),
1599 PMU_PMEVCNTR_EL0(26),
1600 PMU_PMEVCNTR_EL0(27),
1601 PMU_PMEVCNTR_EL0(28),
1602 PMU_PMEVCNTR_EL0(29),
1603 PMU_PMEVCNTR_EL0(30),
1604 /* PMEVTYPERn_EL0 */
1605 PMU_PMEVTYPER_EL0(0),
1606 PMU_PMEVTYPER_EL0(1),
1607 PMU_PMEVTYPER_EL0(2),
1608 PMU_PMEVTYPER_EL0(3),
1609 PMU_PMEVTYPER_EL0(4),
1610 PMU_PMEVTYPER_EL0(5),
1611 PMU_PMEVTYPER_EL0(6),
1612 PMU_PMEVTYPER_EL0(7),
1613 PMU_PMEVTYPER_EL0(8),
1614 PMU_PMEVTYPER_EL0(9),
1615 PMU_PMEVTYPER_EL0(10),
1616 PMU_PMEVTYPER_EL0(11),
1617 PMU_PMEVTYPER_EL0(12),
1618 PMU_PMEVTYPER_EL0(13),
1619 PMU_PMEVTYPER_EL0(14),
1620 PMU_PMEVTYPER_EL0(15),
1621 PMU_PMEVTYPER_EL0(16),
1622 PMU_PMEVTYPER_EL0(17),
1623 PMU_PMEVTYPER_EL0(18),
1624 PMU_PMEVTYPER_EL0(19),
1625 PMU_PMEVTYPER_EL0(20),
1626 PMU_PMEVTYPER_EL0(21),
1627 PMU_PMEVTYPER_EL0(22),
1628 PMU_PMEVTYPER_EL0(23),
1629 PMU_PMEVTYPER_EL0(24),
1630 PMU_PMEVTYPER_EL0(25),
1631 PMU_PMEVTYPER_EL0(26),
1632 PMU_PMEVTYPER_EL0(27),
1633 PMU_PMEVTYPER_EL0(28),
1634 PMU_PMEVTYPER_EL0(29),
1635 PMU_PMEVTYPER_EL0(30),
1637 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1638 * in 32bit mode. Here we choose to reset it as zero for consistency.
1640 { SYS_DESC(SYS_PMCCFILTR_EL0
), access_pmu_evtyper
, reset_val
, PMCCFILTR_EL0
, 0 },
1642 { SYS_DESC(SYS_DACR32_EL2
), NULL
, reset_unknown
, DACR32_EL2
},
1643 { SYS_DESC(SYS_IFSR32_EL2
), NULL
, reset_unknown
, IFSR32_EL2
},
1644 { SYS_DESC(SYS_FPEXC32_EL2
), NULL
, reset_val
, FPEXC32_EL2
, 0x700 },
1647 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
1648 struct sys_reg_params
*p
,
1649 const struct sys_reg_desc
*r
)
1652 return ignore_write(vcpu
, p
);
1654 u64 dfr
= read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1
);
1655 u64 pfr
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
1656 u32 el3
= !!cpuid_feature_extract_unsigned_field(pfr
, ID_AA64PFR0_EL3_SHIFT
);
1658 p
->regval
= ((((dfr
>> ID_AA64DFR0_WRPS_SHIFT
) & 0xf) << 28) |
1659 (((dfr
>> ID_AA64DFR0_BRPS_SHIFT
) & 0xf) << 24) |
1660 (((dfr
>> ID_AA64DFR0_CTX_CMPS_SHIFT
) & 0xf) << 20)
1661 | (6 << 16) | (el3
<< 14) | (el3
<< 12));
1666 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
1667 struct sys_reg_params
*p
,
1668 const struct sys_reg_desc
*r
)
1671 vcpu_cp14(vcpu
, r
->reg
) = p
->regval
;
1672 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
1674 p
->regval
= vcpu_cp14(vcpu
, r
->reg
);
1680 /* AArch32 debug register mappings
1682 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1683 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1685 * All control registers and watchpoint value registers are mapped to
1686 * the lower 32 bits of their AArch64 equivalents. We share the trap
1687 * handlers with the above AArch64 code which checks what mode the
1691 static bool trap_xvr(struct kvm_vcpu
*vcpu
,
1692 struct sys_reg_params
*p
,
1693 const struct sys_reg_desc
*rd
)
1695 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
1700 val
&= 0xffffffffUL
;
1701 val
|= p
->regval
<< 32;
1704 vcpu
->arch
.flags
|= KVM_ARM64_DEBUG_DIRTY
;
1706 p
->regval
= *dbg_reg
>> 32;
1709 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
1714 #define DBG_BCR_BVR_WCR_WVR(n) \
1716 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1718 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1720 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1722 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1724 #define DBGBXVR(n) \
1725 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1728 * Trapped cp14 registers. We generally ignore most of the external
1729 * debug, on the principle that they don't really make sense to a
1730 * guest. Revisit this one day, would this principle change.
1732 static const struct sys_reg_desc cp14_regs
[] = {
1734 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
1736 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
1738 DBG_BCR_BVR_WCR_WVR(0),
1740 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
1741 DBG_BCR_BVR_WCR_WVR(1),
1743 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
1745 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
1746 DBG_BCR_BVR_WCR_WVR(2),
1747 /* DBGDTR[RT]Xint */
1748 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
1749 /* DBGDTR[RT]Xext */
1750 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
1751 DBG_BCR_BVR_WCR_WVR(3),
1752 DBG_BCR_BVR_WCR_WVR(4),
1753 DBG_BCR_BVR_WCR_WVR(5),
1755 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
1757 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
1758 DBG_BCR_BVR_WCR_WVR(6),
1760 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
1761 DBG_BCR_BVR_WCR_WVR(7),
1762 DBG_BCR_BVR_WCR_WVR(8),
1763 DBG_BCR_BVR_WCR_WVR(9),
1764 DBG_BCR_BVR_WCR_WVR(10),
1765 DBG_BCR_BVR_WCR_WVR(11),
1766 DBG_BCR_BVR_WCR_WVR(12),
1767 DBG_BCR_BVR_WCR_WVR(13),
1768 DBG_BCR_BVR_WCR_WVR(14),
1769 DBG_BCR_BVR_WCR_WVR(15),
1771 /* DBGDRAR (32bit) */
1772 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
1776 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
1779 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
1783 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
1786 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
1799 /* DBGDSAR (32bit) */
1800 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
1803 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
1805 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
1807 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
1809 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
1811 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
1813 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
1816 /* Trapped cp14 64bit registers */
1817 static const struct sys_reg_desc cp14_64_regs
[] = {
1818 /* DBGDRAR (64bit) */
1819 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
1821 /* DBGDSAR (64bit) */
1822 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
1825 /* Macro to expand the PMEVCNTRn register */
1826 #define PMU_PMEVCNTR(n) \
1828 { Op1(0), CRn(0b1110), \
1829 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1832 /* Macro to expand the PMEVTYPERn register */
1833 #define PMU_PMEVTYPER(n) \
1835 { Op1(0), CRn(0b1110), \
1836 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1837 access_pmu_evtyper }
1840 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1841 * depending on the way they are accessed (as a 32bit or a 64bit
1844 static const struct sys_reg_desc cp15_regs
[] = {
1845 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr
},
1846 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c1_SCTLR
},
1847 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1848 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
1849 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
1850 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
1851 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
1852 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
1853 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
1854 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
1855 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
1856 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
1859 * DC{C,I,CI}SW operations:
1861 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
1862 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
1863 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
1866 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr
},
1867 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten
},
1868 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten
},
1869 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs
},
1870 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc
},
1871 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr
},
1872 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid
},
1873 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid
},
1874 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr
},
1875 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper
},
1876 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr
},
1877 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr
},
1878 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten
},
1879 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten
},
1880 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs
},
1882 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
1883 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
1884 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
1885 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
1888 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
1890 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
1893 { SYS_DESC(SYS_AARCH32_CNTP_TVAL
), access_arch_timer
},
1894 { SYS_DESC(SYS_AARCH32_CNTP_CTL
), access_arch_timer
},
1961 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper
},
1963 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr
},
1964 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr
},
1965 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr
, NULL
, c0_CSSELR
},
1968 static const struct sys_reg_desc cp15_64_regs
[] = {
1969 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1970 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr
},
1971 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI1R */
1972 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
1973 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_ASGI1R */
1974 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
}, /* ICC_SGI0R */
1975 { SYS_DESC(SYS_AARCH32_CNTP_CVAL
), access_arch_timer
},
1978 /* Target specific emulation tables */
1979 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
1981 void kvm_register_target_sys_reg_table(unsigned int target
,
1982 struct kvm_sys_reg_target_table
*table
)
1984 target_tables
[target
] = table
;
1987 /* Get specific register table for this target. */
1988 static const struct sys_reg_desc
*get_target_table(unsigned target
,
1992 struct kvm_sys_reg_target_table
*table
;
1994 table
= target_tables
[target
];
1996 *num
= table
->table64
.num
;
1997 return table
->table64
.table
;
1999 *num
= table
->table32
.num
;
2000 return table
->table32
.table
;
2004 static int match_sys_reg(const void *key
, const void *elt
)
2006 const unsigned long pval
= (unsigned long)key
;
2007 const struct sys_reg_desc
*r
= elt
;
2009 return pval
- reg_to_encoding(r
);
2012 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
2013 const struct sys_reg_desc table
[],
2016 unsigned long pval
= reg_to_encoding(params
);
2018 return bsearch((void *)pval
, table
, num
, sizeof(table
[0]), match_sys_reg
);
2021 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2023 kvm_inject_undefined(vcpu
);
2027 static void perform_access(struct kvm_vcpu
*vcpu
,
2028 struct sys_reg_params
*params
,
2029 const struct sys_reg_desc
*r
)
2031 trace_kvm_sys_access(*vcpu_pc(vcpu
), params
, r
);
2033 /* Check for regs disabled by runtime config */
2034 if (sysreg_hidden_from_guest(vcpu
, r
)) {
2035 kvm_inject_undefined(vcpu
);
2040 * Not having an accessor means that we have configured a trap
2041 * that we don't know how to handle. This certainly qualifies
2042 * as a gross bug that should be fixed right away.
2046 /* Skip instruction if instructed so */
2047 if (likely(r
->access(vcpu
, params
, r
)))
2048 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
2052 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2053 * call the corresponding trap handler.
2055 * @params: pointer to the descriptor of the access
2056 * @table: array of trap descriptors
2057 * @num: size of the trap descriptor array
2059 * Return 0 if the access has been handled, and -1 if not.
2061 static int emulate_cp(struct kvm_vcpu
*vcpu
,
2062 struct sys_reg_params
*params
,
2063 const struct sys_reg_desc
*table
,
2066 const struct sys_reg_desc
*r
;
2069 return -1; /* Not handled */
2071 r
= find_reg(params
, table
, num
);
2074 perform_access(vcpu
, params
, r
);
2082 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
2083 struct sys_reg_params
*params
)
2085 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
2089 case ESR_ELx_EC_CP15_32
:
2090 case ESR_ELx_EC_CP15_64
:
2093 case ESR_ELx_EC_CP14_MR
:
2094 case ESR_ELx_EC_CP14_64
:
2101 print_sys_reg_msg(params
,
2102 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2103 cp
, *vcpu_pc(vcpu
), *vcpu_cpsr(vcpu
));
2104 kvm_inject_undefined(vcpu
);
2108 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2109 * @vcpu: The VCPU pointer
2110 * @run: The kvm_run struct
2112 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
2113 const struct sys_reg_desc
*global
,
2115 const struct sys_reg_desc
*target_specific
,
2118 struct sys_reg_params params
;
2119 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
2120 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
2121 int Rt2
= (hsr
>> 10) & 0x1f;
2123 params
.is_aarch32
= true;
2124 params
.is_32bit
= false;
2125 params
.CRm
= (hsr
>> 1) & 0xf;
2126 params
.is_write
= ((hsr
& 1) == 0);
2129 params
.Op1
= (hsr
>> 16) & 0xf;
2134 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2135 * backends between AArch32 and AArch64, we get away with it.
2137 if (params
.is_write
) {
2138 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
2139 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
2143 * Try to emulate the coprocessor access using the target
2144 * specific table first, and using the global table afterwards.
2145 * If either of the tables contains a handler, handle the
2146 * potential register operation in the case of a read and return
2149 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
2150 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
2151 /* Split up the value between registers for the read side */
2152 if (!params
.is_write
) {
2153 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
2154 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
2160 unhandled_cp_access(vcpu
, ¶ms
);
2165 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2166 * @vcpu: The VCPU pointer
2167 * @run: The kvm_run struct
2169 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
2170 const struct sys_reg_desc
*global
,
2172 const struct sys_reg_desc
*target_specific
,
2175 struct sys_reg_params params
;
2176 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
2177 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
2179 params
.is_aarch32
= true;
2180 params
.is_32bit
= true;
2181 params
.CRm
= (hsr
>> 1) & 0xf;
2182 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
2183 params
.is_write
= ((hsr
& 1) == 0);
2184 params
.CRn
= (hsr
>> 10) & 0xf;
2186 params
.Op1
= (hsr
>> 14) & 0x7;
2187 params
.Op2
= (hsr
>> 17) & 0x7;
2189 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
2190 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
2191 if (!params
.is_write
)
2192 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
2196 unhandled_cp_access(vcpu
, ¶ms
);
2200 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2202 const struct sys_reg_desc
*target_specific
;
2205 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
2206 return kvm_handle_cp_64(vcpu
,
2207 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
2208 target_specific
, num
);
2211 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2213 const struct sys_reg_desc
*target_specific
;
2216 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
2217 return kvm_handle_cp_32(vcpu
,
2218 cp15_regs
, ARRAY_SIZE(cp15_regs
),
2219 target_specific
, num
);
2222 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2224 return kvm_handle_cp_64(vcpu
,
2225 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
2229 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2231 return kvm_handle_cp_32(vcpu
,
2232 cp14_regs
, ARRAY_SIZE(cp14_regs
),
2236 static bool is_imp_def_sys_reg(struct sys_reg_params
*params
)
2238 // See ARM DDI 0487E.a, section D12.3.2
2239 return params
->Op0
== 3 && (params
->CRn
& 0b1011) == 0b1011;
2242 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
2243 struct sys_reg_params
*params
)
2246 const struct sys_reg_desc
*table
, *r
;
2248 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2250 /* Search target-specific then generic table. */
2251 r
= find_reg(params
, table
, num
);
2253 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2256 perform_access(vcpu
, params
, r
);
2257 } else if (is_imp_def_sys_reg(params
)) {
2258 kvm_inject_undefined(vcpu
);
2260 print_sys_reg_msg(params
,
2261 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2262 *vcpu_pc(vcpu
), *vcpu_cpsr(vcpu
));
2263 kvm_inject_undefined(vcpu
);
2268 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
2269 const struct sys_reg_desc
*table
, size_t num
,
2270 unsigned long *bmap
)
2274 for (i
= 0; i
< num
; i
++)
2275 if (table
[i
].reset
) {
2276 int reg
= table
[i
].reg
;
2278 table
[i
].reset(vcpu
, &table
[i
]);
2279 if (reg
> 0 && reg
< NR_SYS_REGS
)
2285 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2286 * @vcpu: The VCPU pointer
2287 * @run: The kvm_run struct
2289 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
2291 struct sys_reg_params params
;
2292 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
2293 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
2296 trace_kvm_handle_sys_reg(esr
);
2298 params
.is_aarch32
= false;
2299 params
.is_32bit
= false;
2300 params
.Op0
= (esr
>> 20) & 3;
2301 params
.Op1
= (esr
>> 14) & 0x7;
2302 params
.CRn
= (esr
>> 10) & 0xf;
2303 params
.CRm
= (esr
>> 1) & 0xf;
2304 params
.Op2
= (esr
>> 17) & 0x7;
2305 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
2306 params
.is_write
= !(esr
& 1);
2308 ret
= emulate_sys_reg(vcpu
, ¶ms
);
2310 if (!params
.is_write
)
2311 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
2315 /******************************************************************************
2317 *****************************************************************************/
2319 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
2321 switch (id
& KVM_REG_SIZE_MASK
) {
2322 case KVM_REG_SIZE_U64
:
2323 /* Any unused index bits means it's not valid. */
2324 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
2325 | KVM_REG_ARM_COPROC_MASK
2326 | KVM_REG_ARM64_SYSREG_OP0_MASK
2327 | KVM_REG_ARM64_SYSREG_OP1_MASK
2328 | KVM_REG_ARM64_SYSREG_CRN_MASK
2329 | KVM_REG_ARM64_SYSREG_CRM_MASK
2330 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
2332 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
2333 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
2334 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
2335 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
2336 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
2337 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
2338 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
2339 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
2340 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
2341 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
2348 const struct sys_reg_desc
*find_reg_by_id(u64 id
,
2349 struct sys_reg_params
*params
,
2350 const struct sys_reg_desc table
[],
2353 if (!index_to_params(id
, params
))
2356 return find_reg(params
, table
, num
);
2359 /* Decode an index value, and find the sys_reg_desc entry. */
2360 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
2364 const struct sys_reg_desc
*table
, *r
;
2365 struct sys_reg_params params
;
2367 /* We only do sys_reg for now. */
2368 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
2371 if (!index_to_params(id
, ¶ms
))
2374 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2375 r
= find_reg(¶ms
, table
, num
);
2377 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2379 /* Not saved in the sys_reg array and not otherwise accessible? */
2380 if (r
&& !(r
->reg
|| r
->get_user
))
2387 * These are the invariant sys_reg registers: we let the guest see the
2388 * host versions of these, so they're part of the guest state.
2390 * A future CPU may provide a mechanism to present different values to
2391 * the guest, or a future kvm may trap them.
2394 #define FUNCTION_INVARIANT(reg) \
2395 static void get_##reg(struct kvm_vcpu *v, \
2396 const struct sys_reg_desc *r) \
2398 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2401 FUNCTION_INVARIANT(midr_el1
)
2402 FUNCTION_INVARIANT(revidr_el1
)
2403 FUNCTION_INVARIANT(clidr_el1
)
2404 FUNCTION_INVARIANT(aidr_el1
)
2406 static void get_ctr_el0(struct kvm_vcpu
*v
, const struct sys_reg_desc
*r
)
2408 ((struct sys_reg_desc
*)r
)->val
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
2411 /* ->val is filled in by kvm_sys_reg_table_init() */
2412 static struct sys_reg_desc invariant_sys_regs
[] = {
2413 { SYS_DESC(SYS_MIDR_EL1
), NULL
, get_midr_el1
},
2414 { SYS_DESC(SYS_REVIDR_EL1
), NULL
, get_revidr_el1
},
2415 { SYS_DESC(SYS_CLIDR_EL1
), NULL
, get_clidr_el1
},
2416 { SYS_DESC(SYS_AIDR_EL1
), NULL
, get_aidr_el1
},
2417 { SYS_DESC(SYS_CTR_EL0
), NULL
, get_ctr_el0
},
2420 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
2422 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
2427 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
2429 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
2434 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
2436 struct sys_reg_params params
;
2437 const struct sys_reg_desc
*r
;
2439 r
= find_reg_by_id(id
, ¶ms
, invariant_sys_regs
,
2440 ARRAY_SIZE(invariant_sys_regs
));
2444 return reg_to_user(uaddr
, &r
->val
, id
);
2447 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
2449 struct sys_reg_params params
;
2450 const struct sys_reg_desc
*r
;
2452 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
2454 r
= find_reg_by_id(id
, ¶ms
, invariant_sys_regs
,
2455 ARRAY_SIZE(invariant_sys_regs
));
2459 err
= reg_from_user(&val
, uaddr
, id
);
2463 /* This is what we mean by invariant: you can't change it. */
2470 static bool is_valid_cache(u32 val
)
2474 if (val
>= CSSELR_MAX
)
2477 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2479 ctype
= (cache_levels
>> (level
* 3)) & 7;
2482 case 0: /* No cache */
2484 case 1: /* Instruction cache only */
2486 case 2: /* Data cache only */
2487 case 4: /* Unified cache */
2489 case 3: /* Separate instruction and data caches */
2491 default: /* Reserved: we can't know instruction or data. */
2496 static int demux_c15_get(u64 id
, void __user
*uaddr
)
2499 u32 __user
*uval
= uaddr
;
2501 /* Fail if we have unknown bits set. */
2502 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2503 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2506 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2507 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2508 if (KVM_REG_SIZE(id
) != 4)
2510 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2511 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2512 if (!is_valid_cache(val
))
2515 return put_user(get_ccsidr(val
), uval
);
2521 static int demux_c15_set(u64 id
, void __user
*uaddr
)
2524 u32 __user
*uval
= uaddr
;
2526 /* Fail if we have unknown bits set. */
2527 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2528 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2531 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2532 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2533 if (KVM_REG_SIZE(id
) != 4)
2535 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2536 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2537 if (!is_valid_cache(val
))
2540 if (get_user(newval
, uval
))
2543 /* This is also invariant: you can't change it. */
2544 if (newval
!= get_ccsidr(val
))
2552 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2554 const struct sys_reg_desc
*r
;
2555 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2557 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2558 return demux_c15_get(reg
->id
, uaddr
);
2560 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2563 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2565 return get_invariant_sys_reg(reg
->id
, uaddr
);
2567 /* Check for regs disabled by runtime config */
2568 if (sysreg_hidden_from_user(vcpu
, r
))
2572 return (r
->get_user
)(vcpu
, r
, reg
, uaddr
);
2574 return reg_to_user(uaddr
, &__vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
2577 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2579 const struct sys_reg_desc
*r
;
2580 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2582 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2583 return demux_c15_set(reg
->id
, uaddr
);
2585 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2588 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2590 return set_invariant_sys_reg(reg
->id
, uaddr
);
2592 /* Check for regs disabled by runtime config */
2593 if (sysreg_hidden_from_user(vcpu
, r
))
2597 return (r
->set_user
)(vcpu
, r
, reg
, uaddr
);
2599 return reg_from_user(&__vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
2602 static unsigned int num_demux_regs(void)
2604 unsigned int i
, count
= 0;
2606 for (i
= 0; i
< CSSELR_MAX
; i
++)
2607 if (is_valid_cache(i
))
2613 static int write_demux_regids(u64 __user
*uindices
)
2615 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
2618 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
2619 for (i
= 0; i
< CSSELR_MAX
; i
++) {
2620 if (!is_valid_cache(i
))
2622 if (put_user(val
| i
, uindices
))
2629 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
2631 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
2632 KVM_REG_ARM64_SYSREG
|
2633 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
2634 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
2635 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
2636 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
2637 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
2640 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
2645 if (put_user(sys_reg_to_index(reg
), *uind
))
2652 static int walk_one_sys_reg(const struct kvm_vcpu
*vcpu
,
2653 const struct sys_reg_desc
*rd
,
2655 unsigned int *total
)
2658 * Ignore registers we trap but don't save,
2659 * and for which no custom user accessor is provided.
2661 if (!(rd
->reg
|| rd
->get_user
))
2664 if (sysreg_hidden_from_user(vcpu
, rd
))
2667 if (!copy_reg_to_user(rd
, uind
))
2674 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2675 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
2677 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
2678 unsigned int total
= 0;
2682 /* We check for duplicates here, to allow arch-specific overrides. */
2683 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
2686 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
2688 BUG_ON(i1
== end1
|| i2
== end2
);
2690 /* Walk carefully, as both tables may refer to the same register. */
2692 int cmp
= cmp_sys_reg(i1
, i2
);
2693 /* target-specific overrides generic entry. */
2695 err
= walk_one_sys_reg(vcpu
, i1
, &uind
, &total
);
2697 err
= walk_one_sys_reg(vcpu
, i2
, &uind
, &total
);
2702 if (cmp
<= 0 && ++i1
== end1
)
2704 if (cmp
>= 0 && ++i2
== end2
)
2710 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
2712 return ARRAY_SIZE(invariant_sys_regs
)
2714 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
2717 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
2722 /* Then give them all the invariant registers' indices. */
2723 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
2724 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
2729 err
= walk_sys_regs(vcpu
, uindices
);
2734 return write_demux_regids(uindices
);
2737 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
2741 for (i
= 1; i
< n
; i
++) {
2742 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
2743 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
2751 void kvm_sys_reg_table_init(void)
2754 struct sys_reg_desc clidr
;
2756 /* Make sure tables are unique and in order. */
2757 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
2758 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
2759 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
2760 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
2761 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
2762 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
2764 /* We abuse the reset function to overwrite the table itself. */
2765 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
2766 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
2769 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2771 * If software reads the Cache Type fields from Ctype1
2772 * upwards, once it has seen a value of 0b000, no caches
2773 * exist at further-out levels of the hierarchy. So, for
2774 * example, if Ctype3 is the first Cache Type field with a
2775 * value of 0b000, the values of Ctype4 to Ctype7 must be
2778 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
2779 cache_levels
= clidr
.val
;
2780 for (i
= 0; i
< 7; i
++)
2781 if (((cache_levels
>> (i
*3)) & 7) == 0)
2783 /* Clear all higher bits. */
2784 cache_levels
&= (1 << (i
*3))-1;
2788 * kvm_reset_sys_regs - sets system registers to reset value
2789 * @vcpu: The VCPU pointer
2791 * This function finds the right table above and sets the registers on the
2792 * virtual CPU struct to their architecturally defined reset values.
2794 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
2797 const struct sys_reg_desc
*table
;
2798 DECLARE_BITMAP(bmap
, NR_SYS_REGS
) = { 0, };
2800 /* Generic chip reset first (so target could override). */
2801 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
), bmap
);
2803 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2804 reset_sys_reg_descs(vcpu
, table
, num
, bmap
);
2806 for (num
= 1; num
< NR_SYS_REGS
; num
++) {
2807 if (WARN(!test_bit(num
, bmap
),
2808 "Didn't reset __vcpu_sys_reg(%zi)\n", num
))