1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Linux Socket Filter Data Structures
5 #ifndef __TOOLS_LINUX_FILTER_H
6 #define __TOOLS_LINUX_FILTER_H
10 /* ArgX, context and stack frame pointer register positions. Note,
11 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
12 * calls in BPF_CALL instruction.
14 #define BPF_REG_ARG1 BPF_REG_1
15 #define BPF_REG_ARG2 BPF_REG_2
16 #define BPF_REG_ARG3 BPF_REG_3
17 #define BPF_REG_ARG4 BPF_REG_4
18 #define BPF_REG_ARG5 BPF_REG_5
19 #define BPF_REG_CTX BPF_REG_6
20 #define BPF_REG_FP BPF_REG_10
22 /* Additional register mappings for converted user programs. */
23 #define BPF_REG_A BPF_REG_0
24 #define BPF_REG_X BPF_REG_7
25 #define BPF_REG_TMP BPF_REG_8
27 /* BPF program can access up to 512 bytes of stack space. */
28 #define MAX_BPF_STACK 512
30 /* Helper macros for filter block array initializers. */
32 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
34 #define BPF_ALU64_REG(OP, DST, SRC) \
35 ((struct bpf_insn) { \
36 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
42 #define BPF_ALU32_REG(OP, DST, SRC) \
43 ((struct bpf_insn) { \
44 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
50 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
52 #define BPF_ALU64_IMM(OP, DST, IMM) \
53 ((struct bpf_insn) { \
54 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
60 #define BPF_ALU32_IMM(OP, DST, IMM) \
61 ((struct bpf_insn) { \
62 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
68 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
70 #define BPF_ENDIAN(TYPE, DST, LEN) \
71 ((struct bpf_insn) { \
72 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
78 /* Short form of mov, dst_reg = src_reg */
80 #define BPF_MOV64_REG(DST, SRC) \
81 ((struct bpf_insn) { \
82 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
88 #define BPF_MOV32_REG(DST, SRC) \
89 ((struct bpf_insn) { \
90 .code = BPF_ALU | BPF_MOV | BPF_X, \
96 /* Short form of mov, dst_reg = imm32 */
98 #define BPF_MOV64_IMM(DST, IMM) \
99 ((struct bpf_insn) { \
100 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
106 #define BPF_MOV32_IMM(DST, IMM) \
107 ((struct bpf_insn) { \
108 .code = BPF_ALU | BPF_MOV | BPF_K, \
114 /* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
116 #define BPF_MOVSX64_REG(DST, SRC, OFF) \
117 ((struct bpf_insn) { \
118 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
124 #define BPF_MOVSX32_REG(DST, SRC, OFF) \
125 ((struct bpf_insn) { \
126 .code = BPF_ALU | BPF_MOV | BPF_X, \
132 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
134 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
142 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
143 ((struct bpf_insn) { \
144 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
150 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
152 #define BPF_LD_ABS(SIZE, IMM) \
153 ((struct bpf_insn) { \
154 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
160 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
162 #define BPF_LD_IND(SIZE, SRC, IMM) \
163 ((struct bpf_insn) { \
164 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
170 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
172 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
173 ((struct bpf_insn) { \
174 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
180 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
182 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
183 ((struct bpf_insn) { \
184 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
193 * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
194 * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
195 * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
196 * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
197 * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
198 * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
199 * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
200 * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
201 * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
202 * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
205 #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
206 ((struct bpf_insn) { \
207 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
214 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
216 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
218 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
219 ((struct bpf_insn) { \
220 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
226 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
228 #define BPF_JMP_REG(OP, DST, SRC, OFF) \
229 ((struct bpf_insn) { \
230 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
236 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
238 #define BPF_JMP32_REG(OP, DST, SRC, OFF) \
239 ((struct bpf_insn) { \
240 .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
246 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
248 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
249 ((struct bpf_insn) { \
250 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
256 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
258 #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
259 ((struct bpf_insn) { \
260 .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
266 /* Unconditional jumps, goto pc + off16 */
268 #define BPF_JMP_A(OFF) \
269 ((struct bpf_insn) { \
270 .code = BPF_JMP | BPF_JA, \
278 #define BPF_EMIT_CALL(FUNC) \
279 ((struct bpf_insn) { \
280 .code = BPF_JMP | BPF_CALL, \
284 .imm = ((FUNC) - BPF_FUNC_unspec) })
286 /* Raw code statement block */
288 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
289 ((struct bpf_insn) { \
296 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
298 #define BPF_LD_IMM64(DST, IMM) \
299 BPF_LD_IMM64_RAW(DST, 0, IMM)
301 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
302 ((struct bpf_insn) { \
303 .code = BPF_LD | BPF_DW | BPF_IMM, \
307 .imm = (__u32) (IMM) }), \
308 ((struct bpf_insn) { \
309 .code = 0, /* zero is reserved opcode */ \
313 .imm = ((__u64) (IMM)) >> 32 })
315 #define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \
316 ((struct bpf_insn) { \
317 .code = BPF_LD | BPF_DW | BPF_IMM, \
322 ((struct bpf_insn) { \
323 .code = 0, /* zero is reserved opcode */ \
329 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
331 #define BPF_LD_MAP_FD(DST, MAP_FD) \
332 BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \
335 #define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \
336 BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \
341 #define BPF_CALL_REL(TGT) \
342 ((struct bpf_insn) { \
343 .code = BPF_JMP | BPF_CALL, \
345 .src_reg = BPF_PSEUDO_CALL, \
351 #define BPF_EXIT_INSN() \
352 ((struct bpf_insn) { \
353 .code = BPF_JMP | BPF_EXIT, \
359 #endif /* __TOOLS_LINUX_FILTER_H */