1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
5 * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * RSEQ_SIG uses the udf A32 instruction with an uncommon immediate operand
12 * value 0x5de3. This traps if user-space reaches this instruction by mistake,
13 * and the uncommon operand ensures the kernel does not move the instruction
14 * pointer to attacker-controlled code on rseq abort.
16 * The instruction pattern in the A32 instruction set is:
18 * e7f5def3 udf #24035 ; 0x5de3
20 * This translates to the following instruction pattern in the T16 instruction
24 * def3 udf #243 ; 0xf3
27 * - ARMv6+ big endian (BE8):
29 * ARMv6+ -mbig-endian generates mixed endianness code vs data: little-endian
30 * code and big-endian data. The data value of the signature needs to have its
31 * byte order reversed to generate the trap instruction:
35 * Translates to this A32 instruction pattern:
37 * e7f5def3 udf #24035 ; 0x5de3
39 * Translates to this T16 instruction pattern:
41 * def3 udf #243 ; 0xf3
44 * - Prior to ARMv6 big endian (BE32):
46 * Prior to ARMv6, -mbig-endian generates big-endian code and data
47 * (which match), so the endianness of the data representation of the
48 * signature should not be reversed. However, the choice between BE32
49 * and BE8 is done by the linker, so we cannot know whether code and
50 * data endianness will be mixed before the linker is invoked. So rather
51 * than try to play tricks with the linker, the rseq signature is simply
52 * data (not a trap instruction) prior to ARMv6 on big endian. This is
53 * why the signature is expressed as data (.word) rather than as
54 * instruction (.inst) in assembler.
58 #define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */
60 #define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */
63 #define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
64 #define rseq_smp_rmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
65 #define rseq_smp_wmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
67 #define rseq_smp_load_acquire(p) \
69 rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
74 #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
76 #define rseq_smp_store_release(p, v) \
79 RSEQ_WRITE_ONCE(*(p), v); \
82 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
83 post_commit_offset, abort_ip) \
84 ".pushsection __rseq_cs, \"aw\"\n\t" \
86 __rseq_str(label) ":\n\t" \
87 ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
88 ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
90 ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
91 ".word " __rseq_str(label) "b, 0x0\n\t" \
94 #define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
95 __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
96 (post_commit_ip - start_ip), abort_ip)
99 * Exit points of a rseq critical section consist of all instructions outside
100 * of the critical section where a critical section can either branch to or
101 * reach through the normal course of its execution. The abort IP and the
102 * post-commit IP are already part of the __rseq_cs section and should not be
103 * explicitly defined as additional exit points. Knowing all exit points is
104 * useful to assist debuggers stepping over the critical section.
106 #define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
107 ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
108 ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \
111 #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
113 "adr r0, " __rseq_str(cs_label) "\n\t" \
114 "str r0, %[" __rseq_str(rseq_cs) "]\n\t" \
115 __rseq_str(label) ":\n\t"
117 #define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
119 "ldr r0, %[" __rseq_str(current_cpu_id) "]\n\t" \
120 "cmp %[" __rseq_str(cpu_id) "], r0\n\t" \
121 "bne " __rseq_str(label) "\n\t"
123 #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
124 abort_label, version, flags, \
125 start_ip, post_commit_offset, abort_ip) \
127 __rseq_str(table_label) ":\n\t" \
128 ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
129 ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
130 ".word " __rseq_str(RSEQ_SIG) "\n\t" \
131 __rseq_str(label) ":\n\t" \
133 "b %l[" __rseq_str(abort_label) "]\n\t"
135 #define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
136 start_ip, post_commit_ip, abort_ip) \
137 __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
138 abort_label, 0x0, 0x0, start_ip, \
139 (post_commit_ip - start_ip), abort_ip)
141 #define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
142 __rseq_str(label) ":\n\t" \
144 "b %l[" __rseq_str(cmpfail_label) "]\n\t"
146 /* Per-cpu-id indexing. */
148 #define RSEQ_TEMPLATE_CPU_ID
149 #define RSEQ_TEMPLATE_MO_RELAXED
150 #include "rseq-arm-bits.h"
151 #undef RSEQ_TEMPLATE_MO_RELAXED
153 #define RSEQ_TEMPLATE_MO_RELEASE
154 #include "rseq-arm-bits.h"
155 #undef RSEQ_TEMPLATE_MO_RELEASE
156 #undef RSEQ_TEMPLATE_CPU_ID
158 /* Per-mm-cid indexing. */
160 #define RSEQ_TEMPLATE_MM_CID
161 #define RSEQ_TEMPLATE_MO_RELAXED
162 #include "rseq-arm-bits.h"
163 #undef RSEQ_TEMPLATE_MO_RELAXED
165 #define RSEQ_TEMPLATE_MO_RELEASE
166 #include "rseq-arm-bits.h"
167 #undef RSEQ_TEMPLATE_MO_RELEASE
168 #undef RSEQ_TEMPLATE_MM_CID
170 /* APIs which are not based on cpu ids. */
172 #define RSEQ_TEMPLATE_CPU_ID_NONE
173 #define RSEQ_TEMPLATE_MO_RELAXED
174 #include "rseq-arm-bits.h"
175 #undef RSEQ_TEMPLATE_MO_RELAXED
176 #undef RSEQ_TEMPLATE_CPU_ID_NONE