Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / kvm / pauth.c
blobd5eb3ae876be4f58f99dda387d12c1a5b38dd53e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 - Google LLC
4 * Author: Marc Zyngier <maz@kernel.org>
6 * Primitive PAuth emulation for ERETAA/ERETAB.
8 * This code assumes that is is run from EL2, and that it is part of
9 * the emulation of ERETAx for a guest hypervisor. That's a lot of
10 * baked-in assumptions and shortcuts.
12 * Do no reuse for anything else!
15 #include <linux/kvm_host.h>
17 #include <asm/gpr-num.h>
18 #include <asm/kvm_emulate.h>
19 #include <asm/pointer_auth.h>
21 /* PACGA Xd, Xn, Xm */
22 #define PACGA(d,n,m) \
23 asm volatile(__DEFINE_ASM_GPR_NUMS \
24 ".inst 0x9AC03000 |" \
25 "(.L__gpr_num_%[Rd] << 0) |" \
26 "(.L__gpr_num_%[Rn] << 5) |" \
27 "(.L__gpr_num_%[Rm] << 16)\n" \
28 : [Rd] "=r" ((d)) \
29 : [Rn] "r" ((n)), [Rm] "r" ((m)))
31 static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr,
32 struct ptrauth_key ikey)
34 struct ptrauth_key gkey;
35 u64 mod, pac = 0;
37 preempt_disable();
39 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
40 mod = __vcpu_sys_reg(vcpu, SP_EL2);
41 else
42 mod = read_sysreg(sp_el1);
44 gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1);
45 gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1);
47 __ptrauth_key_install_nosync(APGA, ikey);
48 isb();
50 PACGA(pac, ptr, mod);
51 isb();
53 __ptrauth_key_install_nosync(APGA, gkey);
55 preempt_enable();
57 /* PAC in the top 32bits */
58 return pac;
61 static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55)
63 u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
64 bool tbi, tbid;
67 * Since we are authenticating an instruction address, we have
68 * to take TBID into account. If E2H==0, ignore VA[55], as
69 * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
70 * this case, this is likely a guest bug...
72 if (!vcpu_el2_e2h_is_set(vcpu)) {
73 tbi = tcr & BIT(20);
74 tbid = tcr & BIT(29);
75 } else if (bit55) {
76 tbi = tcr & TCR_TBI1;
77 tbid = tcr & TCR_TBID1;
78 } else {
79 tbi = tcr & TCR_TBI0;
80 tbid = tcr & TCR_TBID0;
83 return tbi && !tbid;
86 static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55)
88 static const int maxtxsz = 39; // Revisit these two values once
89 static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2
90 u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
91 int txsz;
93 if (!vcpu_el2_e2h_is_set(vcpu) || !bit55)
94 txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
95 else
96 txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
98 return 64 - clamp(txsz, mintxsz, maxtxsz);
101 static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55)
103 int bottom_pac;
104 u64 mask;
106 bottom_pac = compute_bottom_pac(vcpu, bit55);
108 mask = GENMASK(54, bottom_pac);
109 if (!effective_tbi(vcpu, bit55))
110 mask |= GENMASK(63, 56);
112 return mask;
115 static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask)
117 bool bit55 = !!(ptr & BIT(55));
119 if (bit55)
120 return ptr | mask;
122 return ptr & ~mask;
125 static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr)
127 bool bit55 = !!(ptr & BIT(55));
128 u64 mask, error_code;
129 int shift;
131 if (effective_tbi(vcpu, bit55)) {
132 mask = GENMASK(54, 53);
133 shift = 53;
134 } else {
135 mask = GENMASK(62, 61);
136 shift = 61;
139 if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu)))
140 error_code = 2 << shift;
141 else
142 error_code = 1 << shift;
144 ptr &= ~mask;
145 ptr |= error_code;
147 return ptr;
151 * Authenticate an ERETAA/ERETAB instruction, returning true if the
152 * authentication succeeded and false otherwise. In all cases, *elr
153 * contains the VA to ERET to. Potential exception injection is left
154 * to the caller.
156 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
158 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
159 u64 esr = kvm_vcpu_get_esr(vcpu);
160 u64 ptr, cptr, pac, mask;
161 struct ptrauth_key ikey;
163 *elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2);
165 /* We assume we're already in the context of an ERETAx */
166 if (esr_iss_is_eretab(esr)) {
167 if (!(sctlr & SCTLR_EL1_EnIB))
168 return true;
170 ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1);
171 ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1);
172 } else {
173 if (!(sctlr & SCTLR_EL1_EnIA))
174 return true;
176 ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1);
177 ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1);
180 mask = compute_pac_mask(vcpu, !!(ptr & BIT(55)));
181 cptr = to_canonical_addr(vcpu, ptr, mask);
183 pac = compute_pac(vcpu, cptr, ikey);
186 * Slightly deviate from the pseudocode: if we have a PAC
187 * match with the signed pointer, then it must be good.
188 * Anything after this point is pure error handling.
190 if ((pac & mask) == (ptr & mask)) {
191 *elr = cptr;
192 return true;
196 * Authentication failed, corrupt the canonical address if
197 * PAuth2 isn't implemented, or some XORing if it is.
199 if (!kvm_has_pauth(vcpu->kvm, PAuth2))
200 cptr = corrupt_addr(vcpu, cptr);
201 else
202 cptr = ptr ^ (pac & mask);
204 *elr = cptr;
205 return false;