WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / bpf / verifier / ref_tracking.c
blob3b6ee009c00b648e5b1d92f8fd257aa1a7cfe86a
2 "reference tracking: leak potential reference",
3 .insns = {
4 BPF_SK_LOOKUP(sk_lookup_tcp),
5 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
6 BPF_EXIT_INSN(),
7 },
8 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9 .errstr = "Unreleased reference",
10 .result = REJECT,
13 "reference tracking: leak potential reference to sock_common",
14 .insns = {
15 BPF_SK_LOOKUP(skc_lookup_tcp),
16 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
17 BPF_EXIT_INSN(),
19 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
20 .errstr = "Unreleased reference",
21 .result = REJECT,
24 "reference tracking: leak potential reference on stack",
25 .insns = {
26 BPF_SK_LOOKUP(sk_lookup_tcp),
27 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
29 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
30 BPF_MOV64_IMM(BPF_REG_0, 0),
31 BPF_EXIT_INSN(),
33 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
34 .errstr = "Unreleased reference",
35 .result = REJECT,
38 "reference tracking: leak potential reference on stack 2",
39 .insns = {
40 BPF_SK_LOOKUP(sk_lookup_tcp),
41 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
42 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
43 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
44 BPF_MOV64_IMM(BPF_REG_0, 0),
45 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
46 BPF_EXIT_INSN(),
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .errstr = "Unreleased reference",
50 .result = REJECT,
53 "reference tracking: zero potential reference",
54 .insns = {
55 BPF_SK_LOOKUP(sk_lookup_tcp),
56 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
57 BPF_EXIT_INSN(),
59 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
60 .errstr = "Unreleased reference",
61 .result = REJECT,
64 "reference tracking: zero potential reference to sock_common",
65 .insns = {
66 BPF_SK_LOOKUP(skc_lookup_tcp),
67 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
68 BPF_EXIT_INSN(),
70 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
71 .errstr = "Unreleased reference",
72 .result = REJECT,
75 "reference tracking: copy and zero potential references",
76 .insns = {
77 BPF_SK_LOOKUP(sk_lookup_tcp),
78 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
79 BPF_MOV64_IMM(BPF_REG_0, 0),
80 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
81 BPF_EXIT_INSN(),
83 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
84 .errstr = "Unreleased reference",
85 .result = REJECT,
88 "reference tracking: release reference without check",
89 .insns = {
90 BPF_SK_LOOKUP(sk_lookup_tcp),
91 /* reference in r0 may be NULL */
92 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
93 BPF_MOV64_IMM(BPF_REG_2, 0),
94 BPF_EMIT_CALL(BPF_FUNC_sk_release),
95 BPF_EXIT_INSN(),
97 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
98 .errstr = "type=sock_or_null expected=sock",
99 .result = REJECT,
102 "reference tracking: release reference to sock_common without check",
103 .insns = {
104 BPF_SK_LOOKUP(skc_lookup_tcp),
105 /* reference in r0 may be NULL */
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_MOV64_IMM(BPF_REG_2, 0),
108 BPF_EMIT_CALL(BPF_FUNC_sk_release),
109 BPF_EXIT_INSN(),
111 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
112 .errstr = "type=sock_common_or_null expected=sock",
113 .result = REJECT,
116 "reference tracking: release reference",
117 .insns = {
118 BPF_SK_LOOKUP(sk_lookup_tcp),
119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
120 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
121 BPF_EMIT_CALL(BPF_FUNC_sk_release),
122 BPF_EXIT_INSN(),
124 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
125 .result = ACCEPT,
128 "reference tracking: release reference to sock_common",
129 .insns = {
130 BPF_SK_LOOKUP(skc_lookup_tcp),
131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
132 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
133 BPF_EMIT_CALL(BPF_FUNC_sk_release),
134 BPF_EXIT_INSN(),
136 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
137 .result = ACCEPT,
140 "reference tracking: release reference 2",
141 .insns = {
142 BPF_SK_LOOKUP(sk_lookup_tcp),
143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
144 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
145 BPF_EXIT_INSN(),
146 BPF_EMIT_CALL(BPF_FUNC_sk_release),
147 BPF_EXIT_INSN(),
149 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
150 .result = ACCEPT,
153 "reference tracking: release reference twice",
154 .insns = {
155 BPF_SK_LOOKUP(sk_lookup_tcp),
156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
157 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
159 BPF_EMIT_CALL(BPF_FUNC_sk_release),
160 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
161 BPF_EMIT_CALL(BPF_FUNC_sk_release),
162 BPF_EXIT_INSN(),
164 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
165 .errstr = "type=inv expected=sock",
166 .result = REJECT,
169 "reference tracking: release reference twice inside branch",
170 .insns = {
171 BPF_SK_LOOKUP(sk_lookup_tcp),
172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
173 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
175 BPF_EMIT_CALL(BPF_FUNC_sk_release),
176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
177 BPF_EMIT_CALL(BPF_FUNC_sk_release),
178 BPF_EXIT_INSN(),
180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
181 .errstr = "type=inv expected=sock",
182 .result = REJECT,
185 "reference tracking: alloc, check, free in one subbranch",
186 .insns = {
187 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
188 offsetof(struct __sk_buff, data)),
189 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
190 offsetof(struct __sk_buff, data_end)),
191 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
193 /* if (offsetof(skb, mark) > data_len) exit; */
194 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
195 BPF_EXIT_INSN(),
196 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
197 offsetof(struct __sk_buff, mark)),
198 BPF_SK_LOOKUP(sk_lookup_tcp),
199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
200 /* Leak reference in R0 */
201 BPF_EXIT_INSN(),
202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
204 BPF_EMIT_CALL(BPF_FUNC_sk_release),
205 BPF_EXIT_INSN(),
207 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
208 .errstr = "Unreleased reference",
209 .result = REJECT,
210 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
213 "reference tracking: alloc, check, free in both subbranches",
214 .insns = {
215 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
216 offsetof(struct __sk_buff, data)),
217 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
218 offsetof(struct __sk_buff, data_end)),
219 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
221 /* if (offsetof(skb, mark) > data_len) exit; */
222 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
223 BPF_EXIT_INSN(),
224 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
225 offsetof(struct __sk_buff, mark)),
226 BPF_SK_LOOKUP(sk_lookup_tcp),
227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
230 BPF_EMIT_CALL(BPF_FUNC_sk_release),
231 BPF_EXIT_INSN(),
232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
234 BPF_EMIT_CALL(BPF_FUNC_sk_release),
235 BPF_EXIT_INSN(),
237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
238 .result = ACCEPT,
239 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
242 "reference tracking in call: free reference in subprog",
243 .insns = {
244 BPF_SK_LOOKUP(sk_lookup_tcp),
245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
247 BPF_MOV64_IMM(BPF_REG_0, 0),
248 BPF_EXIT_INSN(),
250 /* subprog 1 */
251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
253 BPF_EMIT_CALL(BPF_FUNC_sk_release),
254 BPF_EXIT_INSN(),
256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
257 .result = ACCEPT,
260 "reference tracking in call: free reference in subprog and outside",
261 .insns = {
262 BPF_SK_LOOKUP(sk_lookup_tcp),
263 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
264 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
267 BPF_EMIT_CALL(BPF_FUNC_sk_release),
268 BPF_EXIT_INSN(),
270 /* subprog 1 */
271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
273 BPF_EMIT_CALL(BPF_FUNC_sk_release),
274 BPF_EXIT_INSN(),
276 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
277 .errstr = "type=inv expected=sock",
278 .result = REJECT,
281 "reference tracking in call: alloc & leak reference in subprog",
282 .insns = {
283 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
287 BPF_MOV64_IMM(BPF_REG_0, 0),
288 BPF_EXIT_INSN(),
290 /* subprog 1 */
291 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
292 BPF_SK_LOOKUP(sk_lookup_tcp),
293 /* spill unchecked sk_ptr into stack of caller */
294 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 BPF_EXIT_INSN(),
298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
299 .errstr = "Unreleased reference",
300 .result = REJECT,
303 "reference tracking in call: alloc in subprog, release outside",
304 .insns = {
305 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
309 BPF_EMIT_CALL(BPF_FUNC_sk_release),
310 BPF_EXIT_INSN(),
312 /* subprog 1 */
313 BPF_SK_LOOKUP(sk_lookup_tcp),
314 BPF_EXIT_INSN(), /* return sk */
316 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
317 .retval = POINTER_VALUE,
318 .result = ACCEPT,
321 "reference tracking in call: sk_ptr leak into caller stack",
322 .insns = {
323 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
327 BPF_EXIT_INSN(),
329 /* subprog 1 */
330 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
332 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
334 /* spill unchecked sk_ptr into stack of caller */
335 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
337 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
338 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
339 BPF_EXIT_INSN(),
341 /* subprog 2 */
342 BPF_SK_LOOKUP(sk_lookup_tcp),
343 BPF_EXIT_INSN(),
345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
346 .errstr = "Unreleased reference",
347 .result = REJECT,
350 "reference tracking in call: sk_ptr spill into caller stack",
351 .insns = {
352 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
355 BPF_MOV64_IMM(BPF_REG_0, 0),
356 BPF_EXIT_INSN(),
358 /* subprog 1 */
359 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
361 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
363 /* spill unchecked sk_ptr into stack of caller */
364 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
366 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
367 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
369 /* now the sk_ptr is verified, free the reference */
370 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
371 BPF_EMIT_CALL(BPF_FUNC_sk_release),
372 BPF_EXIT_INSN(),
374 /* subprog 2 */
375 BPF_SK_LOOKUP(sk_lookup_tcp),
376 BPF_EXIT_INSN(),
378 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
379 .result = ACCEPT,
382 "reference tracking: allow LD_ABS",
383 .insns = {
384 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
385 BPF_SK_LOOKUP(sk_lookup_tcp),
386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
388 BPF_EMIT_CALL(BPF_FUNC_sk_release),
389 BPF_LD_ABS(BPF_B, 0),
390 BPF_LD_ABS(BPF_H, 0),
391 BPF_LD_ABS(BPF_W, 0),
392 BPF_EXIT_INSN(),
394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
395 .result = ACCEPT,
398 "reference tracking: forbid LD_ABS while holding reference",
399 .insns = {
400 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
401 BPF_SK_LOOKUP(sk_lookup_tcp),
402 BPF_LD_ABS(BPF_B, 0),
403 BPF_LD_ABS(BPF_H, 0),
404 BPF_LD_ABS(BPF_W, 0),
405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
407 BPF_EMIT_CALL(BPF_FUNC_sk_release),
408 BPF_EXIT_INSN(),
410 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
411 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
412 .result = REJECT,
415 "reference tracking: allow LD_IND",
416 .insns = {
417 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
418 BPF_SK_LOOKUP(sk_lookup_tcp),
419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
420 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
421 BPF_EMIT_CALL(BPF_FUNC_sk_release),
422 BPF_MOV64_IMM(BPF_REG_7, 1),
423 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
424 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
425 BPF_EXIT_INSN(),
427 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 .result = ACCEPT,
429 .retval = 1,
432 "reference tracking: forbid LD_IND while holding reference",
433 .insns = {
434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
435 BPF_SK_LOOKUP(sk_lookup_tcp),
436 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
437 BPF_MOV64_IMM(BPF_REG_7, 1),
438 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
442 BPF_EMIT_CALL(BPF_FUNC_sk_release),
443 BPF_EXIT_INSN(),
445 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
446 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
447 .result = REJECT,
450 "reference tracking: check reference or tail call",
451 .insns = {
452 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
453 BPF_SK_LOOKUP(sk_lookup_tcp),
454 /* if (sk) bpf_sk_release() */
455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
456 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
457 /* bpf_tail_call() */
458 BPF_MOV64_IMM(BPF_REG_3, 3),
459 BPF_LD_MAP_FD(BPF_REG_2, 0),
460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
462 BPF_MOV64_IMM(BPF_REG_0, 0),
463 BPF_EXIT_INSN(),
464 BPF_EMIT_CALL(BPF_FUNC_sk_release),
465 BPF_EXIT_INSN(),
467 .fixup_prog1 = { 17 },
468 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
469 .result = ACCEPT,
472 "reference tracking: release reference then tail call",
473 .insns = {
474 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
475 BPF_SK_LOOKUP(sk_lookup_tcp),
476 /* if (sk) bpf_sk_release() */
477 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
479 BPF_EMIT_CALL(BPF_FUNC_sk_release),
480 /* bpf_tail_call() */
481 BPF_MOV64_IMM(BPF_REG_3, 3),
482 BPF_LD_MAP_FD(BPF_REG_2, 0),
483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
485 BPF_MOV64_IMM(BPF_REG_0, 0),
486 BPF_EXIT_INSN(),
488 .fixup_prog1 = { 18 },
489 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 .result = ACCEPT,
493 "reference tracking: leak possible reference over tail call",
494 .insns = {
495 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
496 /* Look up socket and store in REG_6 */
497 BPF_SK_LOOKUP(sk_lookup_tcp),
498 /* bpf_tail_call() */
499 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
500 BPF_MOV64_IMM(BPF_REG_3, 3),
501 BPF_LD_MAP_FD(BPF_REG_2, 0),
502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505 /* if (sk) bpf_sk_release() */
506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
508 BPF_EMIT_CALL(BPF_FUNC_sk_release),
509 BPF_EXIT_INSN(),
511 .fixup_prog1 = { 16 },
512 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
513 .errstr = "tail_call would lead to reference leak",
514 .result = REJECT,
517 "reference tracking: leak checked reference over tail call",
518 .insns = {
519 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
520 /* Look up socket and store in REG_6 */
521 BPF_SK_LOOKUP(sk_lookup_tcp),
522 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
523 /* if (!sk) goto end */
524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
525 /* bpf_tail_call() */
526 BPF_MOV64_IMM(BPF_REG_3, 0),
527 BPF_LD_MAP_FD(BPF_REG_2, 0),
528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
530 BPF_MOV64_IMM(BPF_REG_0, 0),
531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
532 BPF_EMIT_CALL(BPF_FUNC_sk_release),
533 BPF_EXIT_INSN(),
535 .fixup_prog1 = { 17 },
536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
537 .errstr = "tail_call would lead to reference leak",
538 .result = REJECT,
541 "reference tracking: mangle and release sock_or_null",
542 .insns = {
543 BPF_SK_LOOKUP(sk_lookup_tcp),
544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
547 BPF_EMIT_CALL(BPF_FUNC_sk_release),
548 BPF_EXIT_INSN(),
550 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
551 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
552 .result = REJECT,
555 "reference tracking: mangle and release sock",
556 .insns = {
557 BPF_SK_LOOKUP(sk_lookup_tcp),
558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
561 BPF_EMIT_CALL(BPF_FUNC_sk_release),
562 BPF_EXIT_INSN(),
564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
565 .errstr = "R1 pointer arithmetic on sock prohibited",
566 .result = REJECT,
569 "reference tracking: access member",
570 .insns = {
571 BPF_SK_LOOKUP(sk_lookup_tcp),
572 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
574 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
576 BPF_EMIT_CALL(BPF_FUNC_sk_release),
577 BPF_EXIT_INSN(),
579 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
580 .result = ACCEPT,
583 "reference tracking: write to member",
584 .insns = {
585 BPF_SK_LOOKUP(sk_lookup_tcp),
586 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
589 BPF_LD_IMM64(BPF_REG_2, 42),
590 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
591 offsetof(struct bpf_sock, mark)),
592 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
593 BPF_EMIT_CALL(BPF_FUNC_sk_release),
594 BPF_LD_IMM64(BPF_REG_0, 0),
595 BPF_EXIT_INSN(),
597 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
598 .errstr = "cannot write into sock",
599 .result = REJECT,
602 "reference tracking: invalid 64-bit access of member",
603 .insns = {
604 BPF_SK_LOOKUP(sk_lookup_tcp),
605 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
607 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
609 BPF_EMIT_CALL(BPF_FUNC_sk_release),
610 BPF_EXIT_INSN(),
612 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
613 .errstr = "invalid sock access off=0 size=8",
614 .result = REJECT,
617 "reference tracking: access after release",
618 .insns = {
619 BPF_SK_LOOKUP(sk_lookup_tcp),
620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
622 BPF_EMIT_CALL(BPF_FUNC_sk_release),
623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
624 BPF_EXIT_INSN(),
626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
627 .errstr = "!read_ok",
628 .result = REJECT,
631 "reference tracking: direct access for lookup",
632 .insns = {
633 /* Check that the packet is at least 64B long */
634 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
635 offsetof(struct __sk_buff, data)),
636 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
637 offsetof(struct __sk_buff, data_end)),
638 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
640 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
641 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
642 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
643 BPF_MOV64_IMM(BPF_REG_4, 0),
644 BPF_MOV64_IMM(BPF_REG_5, 0),
645 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
646 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
648 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
650 BPF_EMIT_CALL(BPF_FUNC_sk_release),
651 BPF_EXIT_INSN(),
653 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
654 .result = ACCEPT,
657 "reference tracking: use ptr from bpf_tcp_sock() after release",
658 .insns = {
659 BPF_SK_LOOKUP(sk_lookup_tcp),
660 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
661 BPF_EXIT_INSN(),
662 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
664 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
665 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
667 BPF_EMIT_CALL(BPF_FUNC_sk_release),
668 BPF_EXIT_INSN(),
669 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
670 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
671 BPF_EMIT_CALL(BPF_FUNC_sk_release),
672 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
673 BPF_EXIT_INSN(),
675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
676 .result = REJECT,
677 .errstr = "invalid mem access",
678 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
681 "reference tracking: use ptr from bpf_sk_fullsock() after release",
682 .insns = {
683 BPF_SK_LOOKUP(sk_lookup_tcp),
684 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
685 BPF_EXIT_INSN(),
686 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
688 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
689 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
691 BPF_EMIT_CALL(BPF_FUNC_sk_release),
692 BPF_EXIT_INSN(),
693 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
695 BPF_EMIT_CALL(BPF_FUNC_sk_release),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
697 BPF_EXIT_INSN(),
699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
700 .result = REJECT,
701 .errstr = "invalid mem access",
702 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
705 "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
706 .insns = {
707 BPF_SK_LOOKUP(sk_lookup_tcp),
708 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
709 BPF_EXIT_INSN(),
710 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
712 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
713 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
715 BPF_EMIT_CALL(BPF_FUNC_sk_release),
716 BPF_EXIT_INSN(),
717 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
718 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
720 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
721 BPF_EMIT_CALL(BPF_FUNC_sk_release),
722 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
723 BPF_EXIT_INSN(),
724 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
725 BPF_EXIT_INSN(),
727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
728 .result = REJECT,
729 .errstr = "invalid mem access",
730 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
733 "reference tracking: use sk after bpf_sk_release(tp)",
734 .insns = {
735 BPF_SK_LOOKUP(sk_lookup_tcp),
736 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
737 BPF_EXIT_INSN(),
738 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
739 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
740 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
741 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
743 BPF_EMIT_CALL(BPF_FUNC_sk_release),
744 BPF_EXIT_INSN(),
745 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
746 BPF_EMIT_CALL(BPF_FUNC_sk_release),
747 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
748 BPF_EXIT_INSN(),
750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
751 .result = REJECT,
752 .errstr = "invalid mem access",
753 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
756 "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
757 .insns = {
758 BPF_SK_LOOKUP(sk_lookup_tcp),
759 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
760 BPF_EXIT_INSN(),
761 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
762 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
763 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
764 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
766 BPF_EMIT_CALL(BPF_FUNC_sk_release),
767 BPF_EXIT_INSN(),
768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
769 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
770 BPF_EMIT_CALL(BPF_FUNC_sk_release),
771 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
772 BPF_EXIT_INSN(),
774 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
775 .result = ACCEPT,
778 "reference tracking: bpf_sk_release(listen_sk)",
779 .insns = {
780 BPF_SK_LOOKUP(sk_lookup_tcp),
781 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
782 BPF_EXIT_INSN(),
783 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
784 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
785 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
786 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
788 BPF_EMIT_CALL(BPF_FUNC_sk_release),
789 BPF_EXIT_INSN(),
790 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
791 BPF_EMIT_CALL(BPF_FUNC_sk_release),
792 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
793 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
794 BPF_EMIT_CALL(BPF_FUNC_sk_release),
795 BPF_EXIT_INSN(),
797 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
798 .result = REJECT,
799 .errstr = "reference has not been acquired before",
802 /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
803 "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
804 .insns = {
805 BPF_SK_LOOKUP(sk_lookup_tcp),
806 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
807 BPF_EXIT_INSN(),
808 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
810 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
811 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
812 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
813 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
814 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
815 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
816 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
817 BPF_EMIT_CALL(BPF_FUNC_sk_release),
818 BPF_EXIT_INSN(),
819 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
821 BPF_EMIT_CALL(BPF_FUNC_sk_release),
822 BPF_EXIT_INSN(),
824 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
825 .result = REJECT,
826 .errstr = "invalid mem access",
829 "reference tracking: branch tracking valid pointer null comparison",
830 .insns = {
831 BPF_SK_LOOKUP(sk_lookup_tcp),
832 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
833 BPF_MOV64_IMM(BPF_REG_3, 1),
834 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
835 BPF_MOV64_IMM(BPF_REG_3, 0),
836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2),
837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
838 BPF_EMIT_CALL(BPF_FUNC_sk_release),
839 BPF_EXIT_INSN(),
841 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
842 .result = ACCEPT,
845 "reference tracking: branch tracking valid pointer value comparison",
846 .insns = {
847 BPF_SK_LOOKUP(sk_lookup_tcp),
848 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
849 BPF_MOV64_IMM(BPF_REG_3, 1),
850 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4),
851 BPF_MOV64_IMM(BPF_REG_3, 0),
852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 1234, 2),
853 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
854 BPF_EMIT_CALL(BPF_FUNC_sk_release),
855 BPF_EXIT_INSN(),
857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
858 .errstr = "Unreleased reference",
859 .result = REJECT,
862 "reference tracking: bpf_sk_release(btf_tcp_sock)",
863 .insns = {
864 BPF_SK_LOOKUP(sk_lookup_tcp),
865 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
866 BPF_EXIT_INSN(),
867 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
868 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
869 BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
870 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
872 BPF_EMIT_CALL(BPF_FUNC_sk_release),
873 BPF_EXIT_INSN(),
874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
875 BPF_EMIT_CALL(BPF_FUNC_sk_release),
876 BPF_EXIT_INSN(),
878 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
879 .result = ACCEPT,
880 .result_unpriv = REJECT,
881 .errstr_unpriv = "unknown func",
884 "reference tracking: use ptr from bpf_skc_to_tcp_sock() after release",
885 .insns = {
886 BPF_SK_LOOKUP(sk_lookup_tcp),
887 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
888 BPF_EXIT_INSN(),
889 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
890 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
891 BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
892 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
894 BPF_EMIT_CALL(BPF_FUNC_sk_release),
895 BPF_EXIT_INSN(),
896 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
898 BPF_EMIT_CALL(BPF_FUNC_sk_release),
899 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0),
900 BPF_EXIT_INSN(),
902 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
903 .result = REJECT,
904 .errstr = "invalid mem access",
905 .result_unpriv = REJECT,
906 .errstr_unpriv = "unknown func",