drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / tools / include / linux / filter.h
blob65aa8ce142e598c943147ca5e47133f1db017d81
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Linux Socket Filter Data Structures
4 */
5 #ifndef __TOOLS_LINUX_FILTER_H
6 #define __TOOLS_LINUX_FILTER_H
8 #include <linux/bpf.h>
10 /* ArgX, context and stack frame pointer register positions. Note,
11 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
12 * calls in BPF_CALL instruction.
14 #define BPF_REG_ARG1 BPF_REG_1
15 #define BPF_REG_ARG2 BPF_REG_2
16 #define BPF_REG_ARG3 BPF_REG_3
17 #define BPF_REG_ARG4 BPF_REG_4
18 #define BPF_REG_ARG5 BPF_REG_5
19 #define BPF_REG_CTX BPF_REG_6
20 #define BPF_REG_FP BPF_REG_10
22 /* Additional register mappings for converted user programs. */
23 #define BPF_REG_A BPF_REG_0
24 #define BPF_REG_X BPF_REG_7
25 #define BPF_REG_TMP BPF_REG_8
27 /* BPF program can access up to 512 bytes of stack space. */
28 #define MAX_BPF_STACK 512
30 /* Helper macros for filter block array initializers. */
32 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
34 #define BPF_ALU64_REG(OP, DST, SRC) \
35 ((struct bpf_insn) { \
36 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
37 .dst_reg = DST, \
38 .src_reg = SRC, \
39 .off = 0, \
40 .imm = 0 })
42 #define BPF_ALU32_REG(OP, DST, SRC) \
43 ((struct bpf_insn) { \
44 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
45 .dst_reg = DST, \
46 .src_reg = SRC, \
47 .off = 0, \
48 .imm = 0 })
50 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
52 #define BPF_ALU64_IMM(OP, DST, IMM) \
53 ((struct bpf_insn) { \
54 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
55 .dst_reg = DST, \
56 .src_reg = 0, \
57 .off = 0, \
58 .imm = IMM })
60 #define BPF_ALU32_IMM(OP, DST, IMM) \
61 ((struct bpf_insn) { \
62 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
63 .dst_reg = DST, \
64 .src_reg = 0, \
65 .off = 0, \
66 .imm = IMM })
68 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
70 #define BPF_ENDIAN(TYPE, DST, LEN) \
71 ((struct bpf_insn) { \
72 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
73 .dst_reg = DST, \
74 .src_reg = 0, \
75 .off = 0, \
76 .imm = LEN })
78 /* Short form of mov, dst_reg = src_reg */
80 #define BPF_MOV64_REG(DST, SRC) \
81 ((struct bpf_insn) { \
82 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
83 .dst_reg = DST, \
84 .src_reg = SRC, \
85 .off = 0, \
86 .imm = 0 })
88 #define BPF_MOV32_REG(DST, SRC) \
89 ((struct bpf_insn) { \
90 .code = BPF_ALU | BPF_MOV | BPF_X, \
91 .dst_reg = DST, \
92 .src_reg = SRC, \
93 .off = 0, \
94 .imm = 0 })
96 /* Short form of mov, dst_reg = imm32 */
98 #define BPF_MOV64_IMM(DST, IMM) \
99 ((struct bpf_insn) { \
100 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
101 .dst_reg = DST, \
102 .src_reg = 0, \
103 .off = 0, \
104 .imm = IMM })
106 #define BPF_MOV32_IMM(DST, IMM) \
107 ((struct bpf_insn) { \
108 .code = BPF_ALU | BPF_MOV | BPF_K, \
109 .dst_reg = DST, \
110 .src_reg = 0, \
111 .off = 0, \
112 .imm = IMM })
114 /* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
116 #define BPF_MOVSX64_REG(DST, SRC, OFF) \
117 ((struct bpf_insn) { \
118 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
119 .dst_reg = DST, \
120 .src_reg = SRC, \
121 .off = OFF, \
122 .imm = 0 })
124 #define BPF_MOVSX32_REG(DST, SRC, OFF) \
125 ((struct bpf_insn) { \
126 .code = BPF_ALU | BPF_MOV | BPF_X, \
127 .dst_reg = DST, \
128 .src_reg = SRC, \
129 .off = OFF, \
130 .imm = 0 })
132 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
134 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
137 .dst_reg = DST, \
138 .src_reg = SRC, \
139 .off = 0, \
140 .imm = IMM })
142 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
143 ((struct bpf_insn) { \
144 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
145 .dst_reg = DST, \
146 .src_reg = SRC, \
147 .off = 0, \
148 .imm = IMM })
150 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
152 #define BPF_LD_ABS(SIZE, IMM) \
153 ((struct bpf_insn) { \
154 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
155 .dst_reg = 0, \
156 .src_reg = 0, \
157 .off = 0, \
158 .imm = IMM })
160 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
162 #define BPF_LD_IND(SIZE, SRC, IMM) \
163 ((struct bpf_insn) { \
164 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
165 .dst_reg = 0, \
166 .src_reg = SRC, \
167 .off = 0, \
168 .imm = IMM })
170 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
172 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
173 ((struct bpf_insn) { \
174 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
175 .dst_reg = DST, \
176 .src_reg = SRC, \
177 .off = OFF, \
178 .imm = 0 })
180 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
182 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
183 ((struct bpf_insn) { \
184 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
185 .dst_reg = DST, \
186 .src_reg = SRC, \
187 .off = OFF, \
188 .imm = 0 })
191 * Atomic operations:
193 * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
194 * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
195 * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
196 * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
197 * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
198 * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
199 * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
200 * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
201 * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
202 * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
205 #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
206 ((struct bpf_insn) { \
207 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
208 .dst_reg = DST, \
209 .src_reg = SRC, \
210 .off = OFF, \
211 .imm = OP })
213 /* Legacy alias */
214 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
216 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
218 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
219 ((struct bpf_insn) { \
220 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
221 .dst_reg = DST, \
222 .src_reg = 0, \
223 .off = OFF, \
224 .imm = IMM })
226 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
228 #define BPF_JMP_REG(OP, DST, SRC, OFF) \
229 ((struct bpf_insn) { \
230 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
231 .dst_reg = DST, \
232 .src_reg = SRC, \
233 .off = OFF, \
234 .imm = 0 })
236 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
238 #define BPF_JMP32_REG(OP, DST, SRC, OFF) \
239 ((struct bpf_insn) { \
240 .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
241 .dst_reg = DST, \
242 .src_reg = SRC, \
243 .off = OFF, \
244 .imm = 0 })
246 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
248 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
249 ((struct bpf_insn) { \
250 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
251 .dst_reg = DST, \
252 .src_reg = 0, \
253 .off = OFF, \
254 .imm = IMM })
256 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
258 #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
259 ((struct bpf_insn) { \
260 .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
261 .dst_reg = DST, \
262 .src_reg = 0, \
263 .off = OFF, \
264 .imm = IMM })
266 /* Unconditional jumps, goto pc + off16 */
268 #define BPF_JMP_A(OFF) \
269 ((struct bpf_insn) { \
270 .code = BPF_JMP | BPF_JA, \
271 .dst_reg = 0, \
272 .src_reg = 0, \
273 .off = OFF, \
274 .imm = 0 })
276 /* Function call */
278 #define BPF_EMIT_CALL(FUNC) \
279 ((struct bpf_insn) { \
280 .code = BPF_JMP | BPF_CALL, \
281 .dst_reg = 0, \
282 .src_reg = 0, \
283 .off = 0, \
284 .imm = ((FUNC) - BPF_FUNC_unspec) })
286 /* Raw code statement block */
288 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
289 ((struct bpf_insn) { \
290 .code = CODE, \
291 .dst_reg = DST, \
292 .src_reg = SRC, \
293 .off = OFF, \
294 .imm = IMM })
296 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
298 #define BPF_LD_IMM64(DST, IMM) \
299 BPF_LD_IMM64_RAW(DST, 0, IMM)
301 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
302 ((struct bpf_insn) { \
303 .code = BPF_LD | BPF_DW | BPF_IMM, \
304 .dst_reg = DST, \
305 .src_reg = SRC, \
306 .off = 0, \
307 .imm = (__u32) (IMM) }), \
308 ((struct bpf_insn) { \
309 .code = 0, /* zero is reserved opcode */ \
310 .dst_reg = 0, \
311 .src_reg = 0, \
312 .off = 0, \
313 .imm = ((__u64) (IMM)) >> 32 })
315 #define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \
316 ((struct bpf_insn) { \
317 .code = BPF_LD | BPF_DW | BPF_IMM, \
318 .dst_reg = DST, \
319 .src_reg = SRC, \
320 .off = OFF1, \
321 .imm = IMM1 }), \
322 ((struct bpf_insn) { \
323 .code = 0, /* zero is reserved opcode */ \
324 .dst_reg = 0, \
325 .src_reg = 0, \
326 .off = OFF2, \
327 .imm = IMM2 })
329 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
331 #define BPF_LD_MAP_FD(DST, MAP_FD) \
332 BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \
333 MAP_FD, 0)
335 #define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \
336 BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \
337 MAP_FD, VALUE_OFF)
339 /* Relative call */
341 #define BPF_CALL_REL(TGT) \
342 ((struct bpf_insn) { \
343 .code = BPF_JMP | BPF_CALL, \
344 .dst_reg = 0, \
345 .src_reg = BPF_PSEUDO_CALL, \
346 .off = 0, \
347 .imm = TGT })
349 /* Program exit */
351 #define BPF_EXIT_INSN() \
352 ((struct bpf_insn) { \
353 .code = BPF_JMP | BPF_EXIT, \
354 .dst_reg = 0, \
355 .src_reg = 0, \
356 .off = 0, \
357 .imm = 0 })
359 #endif /* __TOOLS_LINUX_FILTER_H */