1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2024 - Google LLC
4 * Author: Marc Zyngier <maz@kernel.org>
6 * Primitive PAuth emulation for ERETAA/ERETAB.
8 * This code assumes that is is run from EL2, and that it is part of
9 * the emulation of ERETAx for a guest hypervisor. That's a lot of
10 * baked-in assumptions and shortcuts.
12 * Do no reuse for anything else!
15 #include <linux/kvm_host.h>
17 #include <asm/gpr-num.h>
18 #include <asm/kvm_emulate.h>
19 #include <asm/pointer_auth.h>
21 /* PACGA Xd, Xn, Xm */
22 #define PACGA(d,n,m) \
23 asm volatile(__DEFINE_ASM_GPR_NUMS \
24 ".inst 0x9AC03000 |" \
25 "(.L__gpr_num_%[Rd] << 0) |" \
26 "(.L__gpr_num_%[Rn] << 5) |" \
27 "(.L__gpr_num_%[Rm] << 16)\n" \
29 : [Rn] "r" ((n)), [Rm] "r" ((m)))
31 static u64
compute_pac(struct kvm_vcpu
*vcpu
, u64 ptr
,
32 struct ptrauth_key ikey
)
34 struct ptrauth_key gkey
;
39 if (!vcpu_get_flag(vcpu
, SYSREGS_ON_CPU
))
40 mod
= __vcpu_sys_reg(vcpu
, SP_EL2
);
42 mod
= read_sysreg(sp_el1
);
44 gkey
.lo
= read_sysreg_s(SYS_APGAKEYLO_EL1
);
45 gkey
.hi
= read_sysreg_s(SYS_APGAKEYHI_EL1
);
47 __ptrauth_key_install_nosync(APGA
, ikey
);
53 __ptrauth_key_install_nosync(APGA
, gkey
);
57 /* PAC in the top 32bits */
61 static bool effective_tbi(struct kvm_vcpu
*vcpu
, bool bit55
)
63 u64 tcr
= vcpu_read_sys_reg(vcpu
, TCR_EL2
);
67 * Since we are authenticating an instruction address, we have
68 * to take TBID into account. If E2H==0, ignore VA[55], as
69 * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
70 * this case, this is likely a guest bug...
72 if (!vcpu_el2_e2h_is_set(vcpu
)) {
77 tbid
= tcr
& TCR_TBID1
;
80 tbid
= tcr
& TCR_TBID0
;
86 static int compute_bottom_pac(struct kvm_vcpu
*vcpu
, bool bit55
)
88 static const int maxtxsz
= 39; // Revisit these two values once
89 static const int mintxsz
= 16; // (if) we support TTST/LVA/LVA2
90 u64 tcr
= vcpu_read_sys_reg(vcpu
, TCR_EL2
);
93 if (!vcpu_el2_e2h_is_set(vcpu
) || !bit55
)
94 txsz
= FIELD_GET(TCR_T0SZ_MASK
, tcr
);
96 txsz
= FIELD_GET(TCR_T1SZ_MASK
, tcr
);
98 return 64 - clamp(txsz
, mintxsz
, maxtxsz
);
101 static u64
compute_pac_mask(struct kvm_vcpu
*vcpu
, bool bit55
)
106 bottom_pac
= compute_bottom_pac(vcpu
, bit55
);
108 mask
= GENMASK(54, bottom_pac
);
109 if (!effective_tbi(vcpu
, bit55
))
110 mask
|= GENMASK(63, 56);
115 static u64
to_canonical_addr(struct kvm_vcpu
*vcpu
, u64 ptr
, u64 mask
)
117 bool bit55
= !!(ptr
& BIT(55));
125 static u64
corrupt_addr(struct kvm_vcpu
*vcpu
, u64 ptr
)
127 bool bit55
= !!(ptr
& BIT(55));
128 u64 mask
, error_code
;
131 if (effective_tbi(vcpu
, bit55
)) {
132 mask
= GENMASK(54, 53);
135 mask
= GENMASK(62, 61);
139 if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu
)))
140 error_code
= 2 << shift
;
142 error_code
= 1 << shift
;
151 * Authenticate an ERETAA/ERETAB instruction, returning true if the
152 * authentication succeeded and false otherwise. In all cases, *elr
153 * contains the VA to ERET to. Potential exception injection is left
156 bool kvm_auth_eretax(struct kvm_vcpu
*vcpu
, u64
*elr
)
158 u64 sctlr
= vcpu_read_sys_reg(vcpu
, SCTLR_EL2
);
159 u64 esr
= kvm_vcpu_get_esr(vcpu
);
160 u64 ptr
, cptr
, pac
, mask
;
161 struct ptrauth_key ikey
;
163 *elr
= ptr
= vcpu_read_sys_reg(vcpu
, ELR_EL2
);
165 /* We assume we're already in the context of an ERETAx */
166 if (esr_iss_is_eretab(esr
)) {
167 if (!(sctlr
& SCTLR_EL1_EnIB
))
170 ikey
.lo
= __vcpu_sys_reg(vcpu
, APIBKEYLO_EL1
);
171 ikey
.hi
= __vcpu_sys_reg(vcpu
, APIBKEYHI_EL1
);
173 if (!(sctlr
& SCTLR_EL1_EnIA
))
176 ikey
.lo
= __vcpu_sys_reg(vcpu
, APIAKEYLO_EL1
);
177 ikey
.hi
= __vcpu_sys_reg(vcpu
, APIAKEYHI_EL1
);
180 mask
= compute_pac_mask(vcpu
, !!(ptr
& BIT(55)));
181 cptr
= to_canonical_addr(vcpu
, ptr
, mask
);
183 pac
= compute_pac(vcpu
, cptr
, ikey
);
186 * Slightly deviate from the pseudocode: if we have a PAC
187 * match with the signed pointer, then it must be good.
188 * Anything after this point is pure error handling.
190 if ((pac
& mask
) == (ptr
& mask
)) {
196 * Authentication failed, corrupt the canonical address if
197 * PAuth2 isn't implemented, or some XORing if it is.
199 if (!kvm_has_pauth(vcpu
->kvm
, PAuth2
))
200 cptr
= corrupt_addr(vcpu
, cptr
);
202 cptr
= ptr
^ (pac
& mask
);