2 "unpriv: return pointer",
4 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
8 .result_unpriv
= REJECT
,
9 .errstr_unpriv
= "R0 leaks addr",
10 .retval
= POINTER_VALUE
,
13 "unpriv: add const to pointer",
15 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
16 BPF_MOV64_IMM(BPF_REG_0
, 0),
22 "unpriv: add pointer to pointer",
24 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
25 BPF_MOV64_IMM(BPF_REG_0
, 0),
29 .errstr
= "R1 pointer += pointer",
32 "unpriv: neg pointer",
34 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
35 BPF_MOV64_IMM(BPF_REG_0
, 0),
39 .result_unpriv
= REJECT
,
40 .errstr_unpriv
= "R1 pointer arithmetic",
43 "unpriv: cmp pointer with const",
45 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
46 BPF_MOV64_IMM(BPF_REG_0
, 0),
50 .result_unpriv
= REJECT
,
51 .errstr_unpriv
= "R1 pointer comparison",
54 "unpriv: cmp pointer with pointer",
56 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
57 BPF_MOV64_IMM(BPF_REG_0
, 0),
61 .result_unpriv
= REJECT
,
62 .errstr_unpriv
= "R10 pointer comparison",
65 "unpriv: check that printk is disallowed",
67 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
68 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
69 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
70 BPF_MOV64_IMM(BPF_REG_2
, 8),
71 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
72 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_trace_printk
),
73 BPF_MOV64_IMM(BPF_REG_0
, 0),
76 .errstr_unpriv
= "unknown func bpf_trace_printk#6",
77 .result_unpriv
= REJECT
,
79 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
82 "unpriv: pass pointer to helper function",
84 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
85 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
86 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
87 BPF_LD_MAP_FD(BPF_REG_1
, 0),
88 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
89 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
90 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_update_elem
),
91 BPF_MOV64_IMM(BPF_REG_0
, 0),
94 .fixup_map_hash_8b
= { 3 },
95 .errstr_unpriv
= "R4 leaks addr",
96 .result_unpriv
= REJECT
,
100 "unpriv: indirectly pass pointer on stack to helper function",
102 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
103 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
104 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
105 BPF_LD_MAP_FD(BPF_REG_1
, 0),
106 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
107 BPF_MOV64_IMM(BPF_REG_0
, 0),
110 .fixup_map_hash_8b
= { 3 },
111 .errstr_unpriv
= "invalid indirect read from stack off -8+0 size 8",
112 .result_unpriv
= REJECT
,
116 "unpriv: mangle pointer on stack 1",
118 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
119 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
120 BPF_MOV64_IMM(BPF_REG_0
, 0),
123 .errstr_unpriv
= "attempt to corrupt spilled",
124 .result_unpriv
= REJECT
,
128 "unpriv: mangle pointer on stack 2",
130 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
131 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
132 BPF_MOV64_IMM(BPF_REG_0
, 0),
135 .errstr_unpriv
= "attempt to corrupt spilled",
136 .result_unpriv
= REJECT
,
140 "unpriv: read pointer from stack in small chunks",
142 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
143 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
144 BPF_MOV64_IMM(BPF_REG_0
, 0),
147 .errstr
= "invalid size",
151 "unpriv: write pointer into ctx",
153 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
154 BPF_MOV64_IMM(BPF_REG_0
, 0),
157 .errstr_unpriv
= "R1 leaks addr",
158 .result_unpriv
= REJECT
,
159 .errstr
= "invalid bpf_context access",
163 "unpriv: spill/fill of ctx",
165 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
166 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
167 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
168 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
169 BPF_MOV64_IMM(BPF_REG_0
, 0),
175 "unpriv: spill/fill of ctx 2",
177 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
178 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
179 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
180 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
181 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_get_hash_recalc
),
182 BPF_MOV64_IMM(BPF_REG_0
, 0),
186 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
189 "unpriv: spill/fill of ctx 3",
191 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
192 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
193 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
194 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
195 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
196 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_get_hash_recalc
),
200 .errstr
= "R1 type=fp expected=ctx",
201 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
204 "unpriv: spill/fill of ctx 4",
206 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
207 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
208 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
209 BPF_MOV64_IMM(BPF_REG_0
, 1),
210 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8, 0),
211 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
212 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_get_hash_recalc
),
216 .errstr
= "R1 type=inv expected=ctx",
217 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
220 "unpriv: spill/fill of different pointers stx",
222 BPF_MOV64_IMM(BPF_REG_3
, 42),
223 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
224 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
225 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
226 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
227 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
228 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
229 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
230 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
231 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
232 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
233 offsetof(struct __sk_buff
, mark
)),
234 BPF_MOV64_IMM(BPF_REG_0
, 0),
238 .errstr
= "same insn cannot be used with different pointers",
239 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
242 "unpriv: spill/fill of different pointers stx - ctx and sock",
244 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
245 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
246 BPF_SK_LOOKUP(sk_lookup_tcp
),
247 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
249 /* void *target = &foo; */
250 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
251 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
252 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
253 /* if (skb == NULL) *target = sock; */
254 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
255 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
256 /* else *target = skb; */
257 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
258 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
259 /* struct __sk_buff *skb = *target; */
260 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
261 /* skb->mark = 42; */
262 BPF_MOV64_IMM(BPF_REG_3
, 42),
263 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
264 offsetof(struct __sk_buff
, mark
)),
265 /* if (sk) bpf_sk_release(sk) */
266 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
267 BPF_EMIT_CALL(BPF_FUNC_sk_release
),
268 BPF_MOV64_IMM(BPF_REG_0
, 0),
272 .errstr
= "type=ctx expected=sock",
273 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
276 "unpriv: spill/fill of different pointers stx - leak sock",
278 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
279 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
280 BPF_SK_LOOKUP(sk_lookup_tcp
),
281 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
283 /* void *target = &foo; */
284 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
285 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
286 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
287 /* if (skb == NULL) *target = sock; */
288 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
289 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
290 /* else *target = skb; */
291 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
292 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
293 /* struct __sk_buff *skb = *target; */
294 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
295 /* skb->mark = 42; */
296 BPF_MOV64_IMM(BPF_REG_3
, 42),
297 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
298 offsetof(struct __sk_buff
, mark
)),
302 //.errstr = "same insn cannot be used with different pointers",
303 .errstr
= "Unreleased reference",
304 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
307 "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
309 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
310 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
311 BPF_SK_LOOKUP(sk_lookup_tcp
),
312 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
314 /* void *target = &foo; */
315 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
316 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
317 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
318 /* if (skb) *target = skb */
319 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
320 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
321 /* else *target = sock */
322 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
323 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
324 /* struct bpf_sock *sk = *target; */
325 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
326 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
327 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
328 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
329 offsetof(struct bpf_sock
, mark
)),
330 BPF_EMIT_CALL(BPF_FUNC_sk_release
),
331 BPF_MOV64_IMM(BPF_REG_0
, 0),
335 .errstr
= "same insn cannot be used with different pointers",
336 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
339 "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
341 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
342 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
343 BPF_SK_LOOKUP(sk_lookup_tcp
),
344 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
346 /* void *target = &foo; */
347 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
348 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
349 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
350 /* if (skb) *target = skb */
351 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
352 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
353 /* else *target = sock */
354 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
355 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
356 /* struct bpf_sock *sk = *target; */
357 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
358 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
359 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
360 BPF_MOV64_IMM(BPF_REG_3
, 42),
361 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
362 offsetof(struct bpf_sock
, mark
)),
363 BPF_EMIT_CALL(BPF_FUNC_sk_release
),
364 BPF_MOV64_IMM(BPF_REG_0
, 0),
368 //.errstr = "same insn cannot be used with different pointers",
369 .errstr
= "cannot write into sock",
370 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
373 "unpriv: spill/fill of different pointers ldx",
375 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
376 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
377 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
378 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
379 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
,
380 -(__s32
)offsetof(struct bpf_perf_event_data
,
382 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
383 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
384 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
385 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
386 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
,
387 offsetof(struct bpf_perf_event_data
, sample_period
)),
388 BPF_MOV64_IMM(BPF_REG_0
, 0),
392 .errstr
= "same insn cannot be used with different pointers",
393 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
396 "unpriv: write pointer into map elem value",
398 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
399 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
400 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
401 BPF_LD_MAP_FD(BPF_REG_1
, 0),
402 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
403 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
404 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
407 .fixup_map_hash_8b
= { 3 },
408 .errstr_unpriv
= "R0 leaks addr",
409 .result_unpriv
= REJECT
,
413 "alu32: mov u32 const",
415 BPF_MOV32_IMM(BPF_REG_7
, 0),
416 BPF_ALU32_IMM(BPF_AND
, BPF_REG_7
, 1),
417 BPF_MOV32_REG(BPF_REG_0
, BPF_REG_7
),
418 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
419 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_7
, 0),
426 "unpriv: partial copy of pointer",
428 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
429 BPF_MOV64_IMM(BPF_REG_0
, 0),
432 .errstr_unpriv
= "R10 partial copy",
433 .result_unpriv
= REJECT
,
437 "unpriv: pass pointer to tail_call",
439 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
440 BPF_LD_MAP_FD(BPF_REG_2
, 0),
441 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
442 BPF_MOV64_IMM(BPF_REG_0
, 0),
445 .fixup_prog1
= { 1 },
446 .errstr_unpriv
= "R3 leaks addr into helper",
447 .result_unpriv
= REJECT
,
451 "unpriv: cmp map pointer with zero",
453 BPF_MOV64_IMM(BPF_REG_1
, 0),
454 BPF_LD_MAP_FD(BPF_REG_1
, 0),
455 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
456 BPF_MOV64_IMM(BPF_REG_0
, 0),
459 .fixup_map_hash_8b
= { 1 },
460 .errstr_unpriv
= "R1 pointer comparison",
461 .result_unpriv
= REJECT
,
465 "unpriv: write into frame pointer",
467 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
468 BPF_MOV64_IMM(BPF_REG_0
, 0),
471 .errstr
= "frame pointer is read only",
475 "unpriv: spill/fill frame pointer",
477 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
478 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
479 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
480 BPF_LDX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, 0),
481 BPF_MOV64_IMM(BPF_REG_0
, 0),
484 .errstr
= "frame pointer is read only",
488 "unpriv: cmp of frame pointer",
490 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
491 BPF_MOV64_IMM(BPF_REG_0
, 0),
494 .errstr_unpriv
= "R10 pointer comparison",
495 .result_unpriv
= REJECT
,
499 "unpriv: adding of fp",
501 BPF_MOV64_IMM(BPF_REG_0
, 0),
502 BPF_MOV64_IMM(BPF_REG_1
, 0),
503 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
504 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, -8),
507 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
508 .result_unpriv
= REJECT
,
512 "unpriv: cmp of stack pointer",
514 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
515 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
516 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
517 BPF_MOV64_IMM(BPF_REG_0
, 0),
520 .errstr_unpriv
= "R2 pointer comparison",
521 .result_unpriv
= REJECT
,