2 "runtime/jit: tail_call within bounds, prog once",
4 BPF_MOV64_IMM(BPF_REG_3
, 0),
5 BPF_LD_MAP_FD(BPF_REG_2
, 0),
6 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
7 BPF_MOV64_IMM(BPF_REG_0
, 1),
15 "runtime/jit: tail_call within bounds, prog loop",
17 BPF_MOV64_IMM(BPF_REG_3
, 1),
18 BPF_LD_MAP_FD(BPF_REG_2
, 0),
19 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
20 BPF_MOV64_IMM(BPF_REG_0
, 1),
28 "runtime/jit: tail_call within bounds, no prog",
30 BPF_MOV64_IMM(BPF_REG_3
, 3),
31 BPF_LD_MAP_FD(BPF_REG_2
, 0),
32 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
33 BPF_MOV64_IMM(BPF_REG_0
, 1),
41 "runtime/jit: tail_call within bounds, key 2",
43 BPF_MOV64_IMM(BPF_REG_3
, 2),
44 BPF_LD_MAP_FD(BPF_REG_2
, 0),
45 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
46 BPF_MOV64_IMM(BPF_REG_0
, 1),
54 "runtime/jit: tail_call within bounds, key 2 / key 2, first branch",
56 BPF_MOV64_IMM(BPF_REG_0
, 13),
57 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
58 offsetof(struct __sk_buff
, cb
[0])),
59 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
60 offsetof(struct __sk_buff
, cb
[0])),
61 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 13, 4),
62 BPF_MOV64_IMM(BPF_REG_3
, 2),
63 BPF_LD_MAP_FD(BPF_REG_2
, 0),
64 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
65 BPF_MOV64_IMM(BPF_REG_3
, 2),
66 BPF_LD_MAP_FD(BPF_REG_2
, 0),
67 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
68 BPF_MOV64_IMM(BPF_REG_0
, 1),
71 .fixup_prog1
= { 5, 9 },
76 "runtime/jit: tail_call within bounds, key 2 / key 2, second branch",
78 BPF_MOV64_IMM(BPF_REG_0
, 14),
79 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
80 offsetof(struct __sk_buff
, cb
[0])),
81 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
82 offsetof(struct __sk_buff
, cb
[0])),
83 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 13, 4),
84 BPF_MOV64_IMM(BPF_REG_3
, 2),
85 BPF_LD_MAP_FD(BPF_REG_2
, 0),
86 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
87 BPF_MOV64_IMM(BPF_REG_3
, 2),
88 BPF_LD_MAP_FD(BPF_REG_2
, 0),
89 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
90 BPF_MOV64_IMM(BPF_REG_0
, 1),
93 .fixup_prog1
= { 5, 9 },
98 "runtime/jit: tail_call within bounds, key 0 / key 2, first branch",
100 BPF_MOV64_IMM(BPF_REG_0
, 13),
101 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
102 offsetof(struct __sk_buff
, cb
[0])),
103 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
104 offsetof(struct __sk_buff
, cb
[0])),
105 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 13, 4),
106 BPF_MOV64_IMM(BPF_REG_3
, 0),
107 BPF_LD_MAP_FD(BPF_REG_2
, 0),
108 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
109 BPF_MOV64_IMM(BPF_REG_3
, 2),
110 BPF_LD_MAP_FD(BPF_REG_2
, 0),
111 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
112 BPF_MOV64_IMM(BPF_REG_0
, 1),
115 .fixup_prog1
= { 5, 9 },
120 "runtime/jit: tail_call within bounds, key 0 / key 2, second branch",
122 BPF_MOV64_IMM(BPF_REG_0
, 14),
123 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
124 offsetof(struct __sk_buff
, cb
[0])),
125 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
126 offsetof(struct __sk_buff
, cb
[0])),
127 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 13, 4),
128 BPF_MOV64_IMM(BPF_REG_3
, 0),
129 BPF_LD_MAP_FD(BPF_REG_2
, 0),
130 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
131 BPF_MOV64_IMM(BPF_REG_3
, 2),
132 BPF_LD_MAP_FD(BPF_REG_2
, 0),
133 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
134 BPF_MOV64_IMM(BPF_REG_0
, 1),
137 .fixup_prog1
= { 5, 9 },
142 "runtime/jit: tail_call within bounds, different maps, first branch",
144 BPF_MOV64_IMM(BPF_REG_0
, 13),
145 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
146 offsetof(struct __sk_buff
, cb
[0])),
147 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
148 offsetof(struct __sk_buff
, cb
[0])),
149 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 13, 4),
150 BPF_MOV64_IMM(BPF_REG_3
, 0),
151 BPF_LD_MAP_FD(BPF_REG_2
, 0),
152 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
153 BPF_MOV64_IMM(BPF_REG_3
, 0),
154 BPF_LD_MAP_FD(BPF_REG_2
, 0),
155 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
156 BPF_MOV64_IMM(BPF_REG_0
, 1),
159 .fixup_prog1
= { 5 },
160 .fixup_prog2
= { 9 },
161 .result_unpriv
= REJECT
,
162 .errstr_unpriv
= "tail_call abusing map_ptr",
167 "runtime/jit: tail_call within bounds, different maps, second branch",
169 BPF_MOV64_IMM(BPF_REG_0
, 14),
170 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
171 offsetof(struct __sk_buff
, cb
[0])),
172 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
173 offsetof(struct __sk_buff
, cb
[0])),
174 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 13, 4),
175 BPF_MOV64_IMM(BPF_REG_3
, 0),
176 BPF_LD_MAP_FD(BPF_REG_2
, 0),
177 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
178 BPF_MOV64_IMM(BPF_REG_3
, 0),
179 BPF_LD_MAP_FD(BPF_REG_2
, 0),
180 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
181 BPF_MOV64_IMM(BPF_REG_0
, 1),
184 .fixup_prog1
= { 5 },
185 .fixup_prog2
= { 9 },
186 .result_unpriv
= REJECT
,
187 .errstr_unpriv
= "tail_call abusing map_ptr",
192 "runtime/jit: tail_call out of bounds",
194 BPF_MOV64_IMM(BPF_REG_3
, 256),
195 BPF_LD_MAP_FD(BPF_REG_2
, 0),
196 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
197 BPF_MOV64_IMM(BPF_REG_0
, 2),
200 .fixup_prog1
= { 1 },
205 "runtime/jit: pass negative index to tail_call",
207 BPF_MOV64_IMM(BPF_REG_3
, -1),
208 BPF_LD_MAP_FD(BPF_REG_2
, 0),
209 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
210 BPF_MOV64_IMM(BPF_REG_0
, 2),
213 .fixup_prog1
= { 1 },
218 "runtime/jit: pass > 32bit index to tail_call",
220 BPF_LD_IMM64(BPF_REG_3
, 0x100000000ULL
),
221 BPF_LD_MAP_FD(BPF_REG_2
, 0),
222 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_tail_call
),
223 BPF_MOV64_IMM(BPF_REG_0
, 2),
226 .fixup_prog1
= { 2 },
229 /* Verifier rewrite for unpriv skips tail call here. */