WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / bpf / verifier / ctx_skb.c
blob2022c0f2cd759b551d041be0c3659a98c0862151
2 "access skb fields ok",
3 .insns = {
4 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5 offsetof(struct __sk_buff, len)),
6 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
7 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8 offsetof(struct __sk_buff, mark)),
9 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
10 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11 offsetof(struct __sk_buff, pkt_type)),
12 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
13 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14 offsetof(struct __sk_buff, queue_mapping)),
15 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
16 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
17 offsetof(struct __sk_buff, protocol)),
18 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
19 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
20 offsetof(struct __sk_buff, vlan_present)),
21 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
22 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
23 offsetof(struct __sk_buff, vlan_tci)),
24 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
25 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
26 offsetof(struct __sk_buff, napi_id)),
27 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
28 BPF_EXIT_INSN(),
30 .result = ACCEPT,
33 "access skb fields bad1",
34 .insns = {
35 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
36 BPF_EXIT_INSN(),
38 .errstr = "invalid bpf_context access",
39 .result = REJECT,
42 "access skb fields bad2",
43 .insns = {
44 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
45 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
46 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
48 BPF_LD_MAP_FD(BPF_REG_1, 0),
49 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
50 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
51 BPF_EXIT_INSN(),
52 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
53 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
54 offsetof(struct __sk_buff, pkt_type)),
55 BPF_EXIT_INSN(),
57 .fixup_map_hash_8b = { 4 },
58 .errstr = "different pointers",
59 .errstr_unpriv = "R1 pointer comparison",
60 .result = REJECT,
63 "access skb fields bad3",
64 .insns = {
65 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, pkt_type)),
68 BPF_EXIT_INSN(),
69 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
70 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
71 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
72 BPF_LD_MAP_FD(BPF_REG_1, 0),
73 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
74 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
75 BPF_EXIT_INSN(),
76 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
77 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
79 .fixup_map_hash_8b = { 6 },
80 .errstr = "different pointers",
81 .errstr_unpriv = "R1 pointer comparison",
82 .result = REJECT,
85 "access skb fields bad4",
86 .insns = {
87 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
88 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
89 offsetof(struct __sk_buff, len)),
90 BPF_MOV64_IMM(BPF_REG_0, 0),
91 BPF_EXIT_INSN(),
92 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
93 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
95 BPF_LD_MAP_FD(BPF_REG_1, 0),
96 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
97 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
98 BPF_EXIT_INSN(),
99 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
100 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
102 .fixup_map_hash_8b = { 7 },
103 .errstr = "different pointers",
104 .errstr_unpriv = "R1 pointer comparison",
105 .result = REJECT,
108 "invalid access __sk_buff family",
109 .insns = {
110 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
111 offsetof(struct __sk_buff, family)),
112 BPF_EXIT_INSN(),
114 .errstr = "invalid bpf_context access",
115 .result = REJECT,
118 "invalid access __sk_buff remote_ip4",
119 .insns = {
120 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
121 offsetof(struct __sk_buff, remote_ip4)),
122 BPF_EXIT_INSN(),
124 .errstr = "invalid bpf_context access",
125 .result = REJECT,
128 "invalid access __sk_buff local_ip4",
129 .insns = {
130 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
131 offsetof(struct __sk_buff, local_ip4)),
132 BPF_EXIT_INSN(),
134 .errstr = "invalid bpf_context access",
135 .result = REJECT,
138 "invalid access __sk_buff remote_ip6",
139 .insns = {
140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
141 offsetof(struct __sk_buff, remote_ip6)),
142 BPF_EXIT_INSN(),
144 .errstr = "invalid bpf_context access",
145 .result = REJECT,
148 "invalid access __sk_buff local_ip6",
149 .insns = {
150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
151 offsetof(struct __sk_buff, local_ip6)),
152 BPF_EXIT_INSN(),
154 .errstr = "invalid bpf_context access",
155 .result = REJECT,
158 "invalid access __sk_buff remote_port",
159 .insns = {
160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
161 offsetof(struct __sk_buff, remote_port)),
162 BPF_EXIT_INSN(),
164 .errstr = "invalid bpf_context access",
165 .result = REJECT,
168 "invalid access __sk_buff remote_port",
169 .insns = {
170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
171 offsetof(struct __sk_buff, local_port)),
172 BPF_EXIT_INSN(),
174 .errstr = "invalid bpf_context access",
175 .result = REJECT,
178 "valid access __sk_buff family",
179 .insns = {
180 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
181 offsetof(struct __sk_buff, family)),
182 BPF_EXIT_INSN(),
184 .result = ACCEPT,
185 .prog_type = BPF_PROG_TYPE_SK_SKB,
188 "valid access __sk_buff remote_ip4",
189 .insns = {
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, remote_ip4)),
192 BPF_EXIT_INSN(),
194 .result = ACCEPT,
195 .prog_type = BPF_PROG_TYPE_SK_SKB,
198 "valid access __sk_buff local_ip4",
199 .insns = {
200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
201 offsetof(struct __sk_buff, local_ip4)),
202 BPF_EXIT_INSN(),
204 .result = ACCEPT,
205 .prog_type = BPF_PROG_TYPE_SK_SKB,
208 "valid access __sk_buff remote_ip6",
209 .insns = {
210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
211 offsetof(struct __sk_buff, remote_ip6[0])),
212 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
213 offsetof(struct __sk_buff, remote_ip6[1])),
214 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
215 offsetof(struct __sk_buff, remote_ip6[2])),
216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
217 offsetof(struct __sk_buff, remote_ip6[3])),
218 BPF_EXIT_INSN(),
220 .result = ACCEPT,
221 .prog_type = BPF_PROG_TYPE_SK_SKB,
224 "valid access __sk_buff local_ip6",
225 .insns = {
226 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
227 offsetof(struct __sk_buff, local_ip6[0])),
228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
229 offsetof(struct __sk_buff, local_ip6[1])),
230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
231 offsetof(struct __sk_buff, local_ip6[2])),
232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
233 offsetof(struct __sk_buff, local_ip6[3])),
234 BPF_EXIT_INSN(),
236 .result = ACCEPT,
237 .prog_type = BPF_PROG_TYPE_SK_SKB,
240 "valid access __sk_buff remote_port",
241 .insns = {
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, remote_port)),
244 BPF_EXIT_INSN(),
246 .result = ACCEPT,
247 .prog_type = BPF_PROG_TYPE_SK_SKB,
250 "valid access __sk_buff remote_port",
251 .insns = {
252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
253 offsetof(struct __sk_buff, local_port)),
254 BPF_EXIT_INSN(),
256 .result = ACCEPT,
257 .prog_type = BPF_PROG_TYPE_SK_SKB,
260 "invalid access of tc_classid for SK_SKB",
261 .insns = {
262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
263 offsetof(struct __sk_buff, tc_classid)),
264 BPF_EXIT_INSN(),
266 .result = REJECT,
267 .prog_type = BPF_PROG_TYPE_SK_SKB,
268 .errstr = "invalid bpf_context access",
271 "invalid access of skb->mark for SK_SKB",
272 .insns = {
273 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
274 offsetof(struct __sk_buff, mark)),
275 BPF_EXIT_INSN(),
277 .result = REJECT,
278 .prog_type = BPF_PROG_TYPE_SK_SKB,
279 .errstr = "invalid bpf_context access",
282 "check skb->mark is not writeable by SK_SKB",
283 .insns = {
284 BPF_MOV64_IMM(BPF_REG_0, 0),
285 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
286 offsetof(struct __sk_buff, mark)),
287 BPF_EXIT_INSN(),
289 .result = REJECT,
290 .prog_type = BPF_PROG_TYPE_SK_SKB,
291 .errstr = "invalid bpf_context access",
294 "check skb->tc_index is writeable by SK_SKB",
295 .insns = {
296 BPF_MOV64_IMM(BPF_REG_0, 0),
297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
298 offsetof(struct __sk_buff, tc_index)),
299 BPF_EXIT_INSN(),
301 .result = ACCEPT,
302 .prog_type = BPF_PROG_TYPE_SK_SKB,
305 "check skb->priority is writeable by SK_SKB",
306 .insns = {
307 BPF_MOV64_IMM(BPF_REG_0, 0),
308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
309 offsetof(struct __sk_buff, priority)),
310 BPF_EXIT_INSN(),
312 .result = ACCEPT,
313 .prog_type = BPF_PROG_TYPE_SK_SKB,
316 "direct packet read for SK_SKB",
317 .insns = {
318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
319 offsetof(struct __sk_buff, data)),
320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
321 offsetof(struct __sk_buff, data_end)),
322 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
324 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
327 BPF_EXIT_INSN(),
329 .result = ACCEPT,
330 .prog_type = BPF_PROG_TYPE_SK_SKB,
333 "direct packet write for SK_SKB",
334 .insns = {
335 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
336 offsetof(struct __sk_buff, data)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
338 offsetof(struct __sk_buff, data_end)),
339 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
341 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
342 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
343 BPF_MOV64_IMM(BPF_REG_0, 0),
344 BPF_EXIT_INSN(),
346 .result = ACCEPT,
347 .prog_type = BPF_PROG_TYPE_SK_SKB,
350 "overlapping checks for direct packet access SK_SKB",
351 .insns = {
352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
353 offsetof(struct __sk_buff, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
355 offsetof(struct __sk_buff, data_end)),
356 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
362 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
363 BPF_MOV64_IMM(BPF_REG_0, 0),
364 BPF_EXIT_INSN(),
366 .result = ACCEPT,
367 .prog_type = BPF_PROG_TYPE_SK_SKB,
370 "check skb->mark is not writeable by sockets",
371 .insns = {
372 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
373 offsetof(struct __sk_buff, mark)),
374 BPF_EXIT_INSN(),
376 .errstr = "invalid bpf_context access",
377 .errstr_unpriv = "R1 leaks addr",
378 .result = REJECT,
381 "check skb->tc_index is not writeable by sockets",
382 .insns = {
383 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
384 offsetof(struct __sk_buff, tc_index)),
385 BPF_EXIT_INSN(),
387 .errstr = "invalid bpf_context access",
388 .errstr_unpriv = "R1 leaks addr",
389 .result = REJECT,
392 "check cb access: byte",
393 .insns = {
394 BPF_MOV64_IMM(BPF_REG_0, 0),
395 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
396 offsetof(struct __sk_buff, cb[0])),
397 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
398 offsetof(struct __sk_buff, cb[0]) + 1),
399 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
400 offsetof(struct __sk_buff, cb[0]) + 2),
401 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
402 offsetof(struct __sk_buff, cb[0]) + 3),
403 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
404 offsetof(struct __sk_buff, cb[1])),
405 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
406 offsetof(struct __sk_buff, cb[1]) + 1),
407 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
408 offsetof(struct __sk_buff, cb[1]) + 2),
409 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
410 offsetof(struct __sk_buff, cb[1]) + 3),
411 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
412 offsetof(struct __sk_buff, cb[2])),
413 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
414 offsetof(struct __sk_buff, cb[2]) + 1),
415 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
416 offsetof(struct __sk_buff, cb[2]) + 2),
417 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
418 offsetof(struct __sk_buff, cb[2]) + 3),
419 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
420 offsetof(struct __sk_buff, cb[3])),
421 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
422 offsetof(struct __sk_buff, cb[3]) + 1),
423 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
424 offsetof(struct __sk_buff, cb[3]) + 2),
425 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
426 offsetof(struct __sk_buff, cb[3]) + 3),
427 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
428 offsetof(struct __sk_buff, cb[4])),
429 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
430 offsetof(struct __sk_buff, cb[4]) + 1),
431 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
432 offsetof(struct __sk_buff, cb[4]) + 2),
433 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
434 offsetof(struct __sk_buff, cb[4]) + 3),
435 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
436 offsetof(struct __sk_buff, cb[0])),
437 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
438 offsetof(struct __sk_buff, cb[0]) + 1),
439 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
440 offsetof(struct __sk_buff, cb[0]) + 2),
441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
442 offsetof(struct __sk_buff, cb[0]) + 3),
443 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
444 offsetof(struct __sk_buff, cb[1])),
445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
446 offsetof(struct __sk_buff, cb[1]) + 1),
447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
448 offsetof(struct __sk_buff, cb[1]) + 2),
449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
450 offsetof(struct __sk_buff, cb[1]) + 3),
451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
452 offsetof(struct __sk_buff, cb[2])),
453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
454 offsetof(struct __sk_buff, cb[2]) + 1),
455 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
456 offsetof(struct __sk_buff, cb[2]) + 2),
457 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
458 offsetof(struct __sk_buff, cb[2]) + 3),
459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
460 offsetof(struct __sk_buff, cb[3])),
461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
462 offsetof(struct __sk_buff, cb[3]) + 1),
463 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
464 offsetof(struct __sk_buff, cb[3]) + 2),
465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
466 offsetof(struct __sk_buff, cb[3]) + 3),
467 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
468 offsetof(struct __sk_buff, cb[4])),
469 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
470 offsetof(struct __sk_buff, cb[4]) + 1),
471 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
472 offsetof(struct __sk_buff, cb[4]) + 2),
473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
474 offsetof(struct __sk_buff, cb[4]) + 3),
475 BPF_EXIT_INSN(),
477 .result = ACCEPT,
480 "__sk_buff->hash, offset 0, byte store not permitted",
481 .insns = {
482 BPF_MOV64_IMM(BPF_REG_0, 0),
483 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
484 offsetof(struct __sk_buff, hash)),
485 BPF_EXIT_INSN(),
487 .errstr = "invalid bpf_context access",
488 .result = REJECT,
491 "__sk_buff->tc_index, offset 3, byte store not permitted",
492 .insns = {
493 BPF_MOV64_IMM(BPF_REG_0, 0),
494 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
495 offsetof(struct __sk_buff, tc_index) + 3),
496 BPF_EXIT_INSN(),
498 .errstr = "invalid bpf_context access",
499 .result = REJECT,
502 "check skb->hash byte load permitted",
503 .insns = {
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505 #if __BYTE_ORDER == __LITTLE_ENDIAN
506 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
507 offsetof(struct __sk_buff, hash)),
508 #else
509 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
510 offsetof(struct __sk_buff, hash) + 3),
511 #endif
512 BPF_EXIT_INSN(),
514 .result = ACCEPT,
517 "check skb->hash byte load permitted 1",
518 .insns = {
519 BPF_MOV64_IMM(BPF_REG_0, 0),
520 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
521 offsetof(struct __sk_buff, hash) + 1),
522 BPF_EXIT_INSN(),
524 .result = ACCEPT,
527 "check skb->hash byte load permitted 2",
528 .insns = {
529 BPF_MOV64_IMM(BPF_REG_0, 0),
530 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
531 offsetof(struct __sk_buff, hash) + 2),
532 BPF_EXIT_INSN(),
534 .result = ACCEPT,
537 "check skb->hash byte load permitted 3",
538 .insns = {
539 BPF_MOV64_IMM(BPF_REG_0, 0),
540 #if __BYTE_ORDER == __LITTLE_ENDIAN
541 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
542 offsetof(struct __sk_buff, hash) + 3),
543 #else
544 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
545 offsetof(struct __sk_buff, hash)),
546 #endif
547 BPF_EXIT_INSN(),
549 .result = ACCEPT,
552 "check cb access: byte, wrong type",
553 .insns = {
554 BPF_MOV64_IMM(BPF_REG_0, 0),
555 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
556 offsetof(struct __sk_buff, cb[0])),
557 BPF_EXIT_INSN(),
559 .errstr = "invalid bpf_context access",
560 .result = REJECT,
561 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
564 "check cb access: half",
565 .insns = {
566 BPF_MOV64_IMM(BPF_REG_0, 0),
567 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
568 offsetof(struct __sk_buff, cb[0])),
569 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
570 offsetof(struct __sk_buff, cb[0]) + 2),
571 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
572 offsetof(struct __sk_buff, cb[1])),
573 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
574 offsetof(struct __sk_buff, cb[1]) + 2),
575 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
576 offsetof(struct __sk_buff, cb[2])),
577 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
578 offsetof(struct __sk_buff, cb[2]) + 2),
579 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
580 offsetof(struct __sk_buff, cb[3])),
581 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
582 offsetof(struct __sk_buff, cb[3]) + 2),
583 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
584 offsetof(struct __sk_buff, cb[4])),
585 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
586 offsetof(struct __sk_buff, cb[4]) + 2),
587 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
588 offsetof(struct __sk_buff, cb[0])),
589 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
590 offsetof(struct __sk_buff, cb[0]) + 2),
591 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
592 offsetof(struct __sk_buff, cb[1])),
593 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
594 offsetof(struct __sk_buff, cb[1]) + 2),
595 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
596 offsetof(struct __sk_buff, cb[2])),
597 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
598 offsetof(struct __sk_buff, cb[2]) + 2),
599 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
600 offsetof(struct __sk_buff, cb[3])),
601 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
602 offsetof(struct __sk_buff, cb[3]) + 2),
603 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
604 offsetof(struct __sk_buff, cb[4])),
605 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
606 offsetof(struct __sk_buff, cb[4]) + 2),
607 BPF_EXIT_INSN(),
609 .result = ACCEPT,
612 "check cb access: half, unaligned",
613 .insns = {
614 BPF_MOV64_IMM(BPF_REG_0, 0),
615 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
616 offsetof(struct __sk_buff, cb[0]) + 1),
617 BPF_EXIT_INSN(),
619 .errstr = "misaligned context access",
620 .result = REJECT,
621 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
624 "check __sk_buff->hash, offset 0, half store not permitted",
625 .insns = {
626 BPF_MOV64_IMM(BPF_REG_0, 0),
627 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
628 offsetof(struct __sk_buff, hash)),
629 BPF_EXIT_INSN(),
631 .errstr = "invalid bpf_context access",
632 .result = REJECT,
635 "check __sk_buff->tc_index, offset 2, half store not permitted",
636 .insns = {
637 BPF_MOV64_IMM(BPF_REG_0, 0),
638 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
639 offsetof(struct __sk_buff, tc_index) + 2),
640 BPF_EXIT_INSN(),
642 .errstr = "invalid bpf_context access",
643 .result = REJECT,
646 "check skb->hash half load permitted",
647 .insns = {
648 BPF_MOV64_IMM(BPF_REG_0, 0),
649 #if __BYTE_ORDER == __LITTLE_ENDIAN
650 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
651 offsetof(struct __sk_buff, hash)),
652 #else
653 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
654 offsetof(struct __sk_buff, hash) + 2),
655 #endif
656 BPF_EXIT_INSN(),
658 .result = ACCEPT,
661 "check skb->hash half load permitted 2",
662 .insns = {
663 BPF_MOV64_IMM(BPF_REG_0, 0),
664 #if __BYTE_ORDER == __LITTLE_ENDIAN
665 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
666 offsetof(struct __sk_buff, hash) + 2),
667 #else
668 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
669 offsetof(struct __sk_buff, hash)),
670 #endif
671 BPF_EXIT_INSN(),
673 .result = ACCEPT,
676 "check skb->hash half load not permitted, unaligned 1",
677 .insns = {
678 BPF_MOV64_IMM(BPF_REG_0, 0),
679 #if __BYTE_ORDER == __LITTLE_ENDIAN
680 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, hash) + 1),
682 #else
683 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, hash) + 3),
685 #endif
686 BPF_EXIT_INSN(),
688 .errstr = "invalid bpf_context access",
689 .result = REJECT,
690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
693 "check skb->hash half load not permitted, unaligned 3",
694 .insns = {
695 BPF_MOV64_IMM(BPF_REG_0, 0),
696 #if __BYTE_ORDER == __LITTLE_ENDIAN
697 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
698 offsetof(struct __sk_buff, hash) + 3),
699 #else
700 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
701 offsetof(struct __sk_buff, hash) + 1),
702 #endif
703 BPF_EXIT_INSN(),
705 .errstr = "invalid bpf_context access",
706 .result = REJECT,
707 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
710 "check cb access: half, wrong type",
711 .insns = {
712 BPF_MOV64_IMM(BPF_REG_0, 0),
713 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
714 offsetof(struct __sk_buff, cb[0])),
715 BPF_EXIT_INSN(),
717 .errstr = "invalid bpf_context access",
718 .result = REJECT,
719 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
722 "check cb access: word",
723 .insns = {
724 BPF_MOV64_IMM(BPF_REG_0, 0),
725 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
726 offsetof(struct __sk_buff, cb[0])),
727 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
728 offsetof(struct __sk_buff, cb[1])),
729 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
730 offsetof(struct __sk_buff, cb[2])),
731 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
732 offsetof(struct __sk_buff, cb[3])),
733 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
734 offsetof(struct __sk_buff, cb[4])),
735 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
736 offsetof(struct __sk_buff, cb[0])),
737 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
738 offsetof(struct __sk_buff, cb[1])),
739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
740 offsetof(struct __sk_buff, cb[2])),
741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
742 offsetof(struct __sk_buff, cb[3])),
743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
744 offsetof(struct __sk_buff, cb[4])),
745 BPF_EXIT_INSN(),
747 .result = ACCEPT,
750 "check cb access: word, unaligned 1",
751 .insns = {
752 BPF_MOV64_IMM(BPF_REG_0, 0),
753 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
754 offsetof(struct __sk_buff, cb[0]) + 2),
755 BPF_EXIT_INSN(),
757 .errstr = "misaligned context access",
758 .result = REJECT,
759 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
762 "check cb access: word, unaligned 2",
763 .insns = {
764 BPF_MOV64_IMM(BPF_REG_0, 0),
765 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
766 offsetof(struct __sk_buff, cb[4]) + 1),
767 BPF_EXIT_INSN(),
769 .errstr = "misaligned context access",
770 .result = REJECT,
771 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
774 "check cb access: word, unaligned 3",
775 .insns = {
776 BPF_MOV64_IMM(BPF_REG_0, 0),
777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
778 offsetof(struct __sk_buff, cb[4]) + 2),
779 BPF_EXIT_INSN(),
781 .errstr = "misaligned context access",
782 .result = REJECT,
783 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
786 "check cb access: word, unaligned 4",
787 .insns = {
788 BPF_MOV64_IMM(BPF_REG_0, 0),
789 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
790 offsetof(struct __sk_buff, cb[4]) + 3),
791 BPF_EXIT_INSN(),
793 .errstr = "misaligned context access",
794 .result = REJECT,
795 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
798 "check cb access: double",
799 .insns = {
800 BPF_MOV64_IMM(BPF_REG_0, 0),
801 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
802 offsetof(struct __sk_buff, cb[0])),
803 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
804 offsetof(struct __sk_buff, cb[2])),
805 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
806 offsetof(struct __sk_buff, cb[0])),
807 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
808 offsetof(struct __sk_buff, cb[2])),
809 BPF_EXIT_INSN(),
811 .result = ACCEPT,
814 "check cb access: double, unaligned 1",
815 .insns = {
816 BPF_MOV64_IMM(BPF_REG_0, 0),
817 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
818 offsetof(struct __sk_buff, cb[1])),
819 BPF_EXIT_INSN(),
821 .errstr = "misaligned context access",
822 .result = REJECT,
823 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
826 "check cb access: double, unaligned 2",
827 .insns = {
828 BPF_MOV64_IMM(BPF_REG_0, 0),
829 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
830 offsetof(struct __sk_buff, cb[3])),
831 BPF_EXIT_INSN(),
833 .errstr = "misaligned context access",
834 .result = REJECT,
835 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
838 "check cb access: double, oob 1",
839 .insns = {
840 BPF_MOV64_IMM(BPF_REG_0, 0),
841 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
842 offsetof(struct __sk_buff, cb[4])),
843 BPF_EXIT_INSN(),
845 .errstr = "invalid bpf_context access",
846 .result = REJECT,
849 "check cb access: double, oob 2",
850 .insns = {
851 BPF_MOV64_IMM(BPF_REG_0, 0),
852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
853 offsetof(struct __sk_buff, cb[4])),
854 BPF_EXIT_INSN(),
856 .errstr = "invalid bpf_context access",
857 .result = REJECT,
860 "check __sk_buff->ifindex dw store not permitted",
861 .insns = {
862 BPF_MOV64_IMM(BPF_REG_0, 0),
863 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
864 offsetof(struct __sk_buff, ifindex)),
865 BPF_EXIT_INSN(),
867 .errstr = "invalid bpf_context access",
868 .result = REJECT,
871 "check __sk_buff->ifindex dw load not permitted",
872 .insns = {
873 BPF_MOV64_IMM(BPF_REG_0, 0),
874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
875 offsetof(struct __sk_buff, ifindex)),
876 BPF_EXIT_INSN(),
878 .errstr = "invalid bpf_context access",
879 .result = REJECT,
882 "check cb access: double, wrong type",
883 .insns = {
884 BPF_MOV64_IMM(BPF_REG_0, 0),
885 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
886 offsetof(struct __sk_buff, cb[0])),
887 BPF_EXIT_INSN(),
889 .errstr = "invalid bpf_context access",
890 .result = REJECT,
891 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
894 "check out of range skb->cb access",
895 .insns = {
896 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
897 offsetof(struct __sk_buff, cb[0]) + 256),
898 BPF_EXIT_INSN(),
900 .errstr = "invalid bpf_context access",
901 .errstr_unpriv = "",
902 .result = REJECT,
903 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
906 "write skb fields from socket prog",
907 .insns = {
908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, cb[4])),
910 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
911 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
912 offsetof(struct __sk_buff, mark)),
913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, tc_index)),
915 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
916 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
917 offsetof(struct __sk_buff, cb[0])),
918 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
919 offsetof(struct __sk_buff, cb[2])),
920 BPF_EXIT_INSN(),
922 .result = ACCEPT,
923 .errstr_unpriv = "R1 leaks addr",
924 .result_unpriv = REJECT,
927 "write skb fields from tc_cls_act prog",
928 .insns = {
929 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
930 offsetof(struct __sk_buff, cb[0])),
931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
932 offsetof(struct __sk_buff, mark)),
933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
934 offsetof(struct __sk_buff, tc_index)),
935 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
936 offsetof(struct __sk_buff, tc_index)),
937 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
938 offsetof(struct __sk_buff, cb[3])),
939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
940 offsetof(struct __sk_buff, tstamp)),
941 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
942 offsetof(struct __sk_buff, tstamp)),
943 BPF_EXIT_INSN(),
945 .errstr_unpriv = "",
946 .result_unpriv = REJECT,
947 .result = ACCEPT,
948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
951 "check skb->data half load not permitted",
952 .insns = {
953 BPF_MOV64_IMM(BPF_REG_0, 0),
954 #if __BYTE_ORDER == __LITTLE_ENDIAN
955 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
956 offsetof(struct __sk_buff, data)),
957 #else
958 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
959 offsetof(struct __sk_buff, data) + 2),
960 #endif
961 BPF_EXIT_INSN(),
963 .result = REJECT,
964 .errstr = "invalid bpf_context access",
967 "read gso_segs from CGROUP_SKB",
968 .insns = {
969 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
970 offsetof(struct __sk_buff, gso_segs)),
971 BPF_MOV64_IMM(BPF_REG_0, 0),
972 BPF_EXIT_INSN(),
974 .result = ACCEPT,
975 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
978 "read gso_segs from CGROUP_SKB",
979 .insns = {
980 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
981 offsetof(struct __sk_buff, gso_segs)),
982 BPF_MOV64_IMM(BPF_REG_0, 0),
983 BPF_EXIT_INSN(),
985 .result = ACCEPT,
986 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
989 "write gso_segs from CGROUP_SKB",
990 .insns = {
991 BPF_MOV64_IMM(BPF_REG_0, 0),
992 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
993 offsetof(struct __sk_buff, gso_segs)),
994 BPF_MOV64_IMM(BPF_REG_0, 0),
995 BPF_EXIT_INSN(),
997 .result = REJECT,
998 .result_unpriv = REJECT,
999 .errstr = "invalid bpf_context access off=164 size=4",
1000 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1003 "read gso_segs from CLS",
1004 .insns = {
1005 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1006 offsetof(struct __sk_buff, gso_segs)),
1007 BPF_MOV64_IMM(BPF_REG_0, 0),
1008 BPF_EXIT_INSN(),
1010 .result = ACCEPT,
1011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1014 "read gso_size from CGROUP_SKB",
1015 .insns = {
1016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1017 offsetof(struct __sk_buff, gso_size)),
1018 BPF_MOV64_IMM(BPF_REG_0, 0),
1019 BPF_EXIT_INSN(),
1021 .result = ACCEPT,
1022 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1025 "read gso_size from CGROUP_SKB",
1026 .insns = {
1027 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1028 offsetof(struct __sk_buff, gso_size)),
1029 BPF_MOV64_IMM(BPF_REG_0, 0),
1030 BPF_EXIT_INSN(),
1032 .result = ACCEPT,
1033 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1036 "write gso_size from CGROUP_SKB",
1037 .insns = {
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1039 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1040 offsetof(struct __sk_buff, gso_size)),
1041 BPF_MOV64_IMM(BPF_REG_0, 0),
1042 BPF_EXIT_INSN(),
1044 .result = REJECT,
1045 .result_unpriv = REJECT,
1046 .errstr = "invalid bpf_context access off=176 size=4",
1047 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1050 "read gso_size from CLS",
1051 .insns = {
1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, gso_size)),
1054 BPF_MOV64_IMM(BPF_REG_0, 0),
1055 BPF_EXIT_INSN(),
1057 .result = ACCEPT,
1058 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1061 "check wire_len is not readable by sockets",
1062 .insns = {
1063 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, wire_len)),
1065 BPF_EXIT_INSN(),
1067 .errstr = "invalid bpf_context access",
1068 .result = REJECT,
1071 "check wire_len is readable by tc classifier",
1072 .insns = {
1073 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1074 offsetof(struct __sk_buff, wire_len)),
1075 BPF_EXIT_INSN(),
1077 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1078 .result = ACCEPT,
1081 "check wire_len is not writable by tc classifier",
1082 .insns = {
1083 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1084 offsetof(struct __sk_buff, wire_len)),
1085 BPF_EXIT_INSN(),
1087 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1088 .errstr = "invalid bpf_context access",
1089 .errstr_unpriv = "R1 leaks addr",
1090 .result = REJECT,
1093 "pkt > pkt_end taken check",
1094 .insns = {
1095 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end)
1096 offsetof(struct __sk_buff, data_end)),
1097 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data)
1098 offsetof(struct __sk_buff, data)),
1099 BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4
1100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42
1101 BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0
1102 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8
1103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14
1104 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4
1105 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1), // 8. if r3 > r2 goto 10
1106 BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9)
1107 BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0
1108 BPF_EXIT_INSN(), // 11. exit
1110 .result = ACCEPT,
1111 .prog_type = BPF_PROG_TYPE_SK_SKB,
1114 "pkt_end < pkt taken check",
1115 .insns = {
1116 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end)
1117 offsetof(struct __sk_buff, data_end)),
1118 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data)
1119 offsetof(struct __sk_buff, data)),
1120 BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4
1121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42
1122 BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0
1123 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8
1124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14
1125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4
1126 BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1), // 8. if r2 < r3 goto 10
1127 BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9)
1128 BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0
1129 BPF_EXIT_INSN(), // 11. exit
1131 .result = ACCEPT,
1132 .prog_type = BPF_PROG_TYPE_SK_SKB,