1 // SPDX-License-Identifier: GPL-2.0-only
3 * BPF JIT compiler for LoongArch
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
9 #define REG_TCC LOONGARCH_GPR_A6
10 #define TCC_SAVED LOONGARCH_GPR_S5
12 #define SAVE_RA BIT(0)
13 #define SAVE_TCC BIT(1)
15 static const int regmap
[] = {
16 /* return value from in-kernel function, and exit value for eBPF program */
17 [BPF_REG_0
] = LOONGARCH_GPR_A5
,
18 /* arguments from eBPF program to in-kernel function */
19 [BPF_REG_1
] = LOONGARCH_GPR_A0
,
20 [BPF_REG_2
] = LOONGARCH_GPR_A1
,
21 [BPF_REG_3
] = LOONGARCH_GPR_A2
,
22 [BPF_REG_4
] = LOONGARCH_GPR_A3
,
23 [BPF_REG_5
] = LOONGARCH_GPR_A4
,
24 /* callee saved registers that in-kernel function will preserve */
25 [BPF_REG_6
] = LOONGARCH_GPR_S0
,
26 [BPF_REG_7
] = LOONGARCH_GPR_S1
,
27 [BPF_REG_8
] = LOONGARCH_GPR_S2
,
28 [BPF_REG_9
] = LOONGARCH_GPR_S3
,
29 /* read-only frame pointer to access stack */
30 [BPF_REG_FP
] = LOONGARCH_GPR_S4
,
31 /* temporary register for blinding constants */
32 [BPF_REG_AX
] = LOONGARCH_GPR_T0
,
35 static void mark_call(struct jit_ctx
*ctx
)
37 ctx
->flags
|= SAVE_RA
;
40 static void mark_tail_call(struct jit_ctx
*ctx
)
42 ctx
->flags
|= SAVE_TCC
;
45 static bool seen_call(struct jit_ctx
*ctx
)
47 return (ctx
->flags
& SAVE_RA
);
50 static bool seen_tail_call(struct jit_ctx
*ctx
)
52 return (ctx
->flags
& SAVE_TCC
);
55 static u8
tail_call_reg(struct jit_ctx
*ctx
)
64 * eBPF prog stack layout:
67 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
69 * +-------------------------+
71 * +-------------------------+
73 * +-------------------------+
75 * +-------------------------+
77 * +-------------------------+
79 * +-------------------------+
81 * +-------------------------+
83 * +-------------------------+ <--BPF_REG_FP
84 * | prog->aux->stack_depth |
86 * current $sp -------------> +-------------------------+
89 static void build_prologue(struct jit_ctx
*ctx
)
91 int stack_adjust
= 0, store_offset
, bpf_stack_adjust
;
93 bpf_stack_adjust
= round_up(ctx
->prog
->aux
->stack_depth
, 16);
95 /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
96 stack_adjust
+= sizeof(long) * 8;
98 stack_adjust
= round_up(stack_adjust
, 16);
99 stack_adjust
+= bpf_stack_adjust
;
102 * First instruction initializes the tail call count (TCC).
103 * On tail call we skip this instruction, and the TCC is
104 * passed in REG_TCC from the caller.
106 emit_insn(ctx
, addid
, REG_TCC
, LOONGARCH_GPR_ZERO
, MAX_TAIL_CALL_CNT
);
108 emit_insn(ctx
, addid
, LOONGARCH_GPR_SP
, LOONGARCH_GPR_SP
, -stack_adjust
);
110 store_offset
= stack_adjust
- sizeof(long);
111 emit_insn(ctx
, std
, LOONGARCH_GPR_RA
, LOONGARCH_GPR_SP
, store_offset
);
113 store_offset
-= sizeof(long);
114 emit_insn(ctx
, std
, LOONGARCH_GPR_FP
, LOONGARCH_GPR_SP
, store_offset
);
116 store_offset
-= sizeof(long);
117 emit_insn(ctx
, std
, LOONGARCH_GPR_S0
, LOONGARCH_GPR_SP
, store_offset
);
119 store_offset
-= sizeof(long);
120 emit_insn(ctx
, std
, LOONGARCH_GPR_S1
, LOONGARCH_GPR_SP
, store_offset
);
122 store_offset
-= sizeof(long);
123 emit_insn(ctx
, std
, LOONGARCH_GPR_S2
, LOONGARCH_GPR_SP
, store_offset
);
125 store_offset
-= sizeof(long);
126 emit_insn(ctx
, std
, LOONGARCH_GPR_S3
, LOONGARCH_GPR_SP
, store_offset
);
128 store_offset
-= sizeof(long);
129 emit_insn(ctx
, std
, LOONGARCH_GPR_S4
, LOONGARCH_GPR_SP
, store_offset
);
131 store_offset
-= sizeof(long);
132 emit_insn(ctx
, std
, LOONGARCH_GPR_S5
, LOONGARCH_GPR_SP
, store_offset
);
134 emit_insn(ctx
, addid
, LOONGARCH_GPR_FP
, LOONGARCH_GPR_SP
, stack_adjust
);
136 if (bpf_stack_adjust
)
137 emit_insn(ctx
, addid
, regmap
[BPF_REG_FP
], LOONGARCH_GPR_SP
, bpf_stack_adjust
);
140 * Program contains calls and tail calls, so REG_TCC need
141 * to be saved across calls.
143 if (seen_tail_call(ctx
) && seen_call(ctx
))
144 move_reg(ctx
, TCC_SAVED
, REG_TCC
);
146 ctx
->stack_size
= stack_adjust
;
149 static void __build_epilogue(struct jit_ctx
*ctx
, bool is_tail_call
)
151 int stack_adjust
= ctx
->stack_size
;
154 load_offset
= stack_adjust
- sizeof(long);
155 emit_insn(ctx
, ldd
, LOONGARCH_GPR_RA
, LOONGARCH_GPR_SP
, load_offset
);
157 load_offset
-= sizeof(long);
158 emit_insn(ctx
, ldd
, LOONGARCH_GPR_FP
, LOONGARCH_GPR_SP
, load_offset
);
160 load_offset
-= sizeof(long);
161 emit_insn(ctx
, ldd
, LOONGARCH_GPR_S0
, LOONGARCH_GPR_SP
, load_offset
);
163 load_offset
-= sizeof(long);
164 emit_insn(ctx
, ldd
, LOONGARCH_GPR_S1
, LOONGARCH_GPR_SP
, load_offset
);
166 load_offset
-= sizeof(long);
167 emit_insn(ctx
, ldd
, LOONGARCH_GPR_S2
, LOONGARCH_GPR_SP
, load_offset
);
169 load_offset
-= sizeof(long);
170 emit_insn(ctx
, ldd
, LOONGARCH_GPR_S3
, LOONGARCH_GPR_SP
, load_offset
);
172 load_offset
-= sizeof(long);
173 emit_insn(ctx
, ldd
, LOONGARCH_GPR_S4
, LOONGARCH_GPR_SP
, load_offset
);
175 load_offset
-= sizeof(long);
176 emit_insn(ctx
, ldd
, LOONGARCH_GPR_S5
, LOONGARCH_GPR_SP
, load_offset
);
178 emit_insn(ctx
, addid
, LOONGARCH_GPR_SP
, LOONGARCH_GPR_SP
, stack_adjust
);
181 /* Set return value */
182 emit_insn(ctx
, addiw
, LOONGARCH_GPR_A0
, regmap
[BPF_REG_0
], 0);
183 /* Return to the caller */
184 emit_insn(ctx
, jirl
, LOONGARCH_GPR_ZERO
, LOONGARCH_GPR_RA
, 0);
187 * Call the next bpf prog and skip the first instruction
188 * of TCC initialization.
190 emit_insn(ctx
, jirl
, LOONGARCH_GPR_ZERO
, LOONGARCH_GPR_T3
, 1);
194 static void build_epilogue(struct jit_ctx
*ctx
)
196 __build_epilogue(ctx
, false);
199 bool bpf_jit_supports_kfunc_call(void)
204 bool bpf_jit_supports_far_kfunc_call(void)
209 /* initialized on the first pass of build_body() */
210 static int out_offset
= -1;
211 static int emit_bpf_tail_call(struct jit_ctx
*ctx
)
214 u8 tcc
= tail_call_reg(ctx
);
215 u8 a1
= LOONGARCH_GPR_A1
;
216 u8 a2
= LOONGARCH_GPR_A2
;
217 u8 t1
= LOONGARCH_GPR_T1
;
218 u8 t2
= LOONGARCH_GPR_T2
;
219 u8 t3
= LOONGARCH_GPR_T3
;
220 const int idx0
= ctx
->idx
;
222 #define cur_offset (ctx->idx - idx0)
223 #define jmp_offset (out_offset - (cur_offset))
230 * if (index >= array->map.max_entries)
233 off
= offsetof(struct bpf_array
, map
.max_entries
);
234 emit_insn(ctx
, ldwu
, t1
, a1
, off
);
235 /* bgeu $a2, $t1, jmp_offset */
236 if (emit_tailcall_jmp(ctx
, BPF_JGE
, a2
, t1
, jmp_offset
) < 0)
243 emit_insn(ctx
, addid
, REG_TCC
, tcc
, -1);
244 if (emit_tailcall_jmp(ctx
, BPF_JSLT
, REG_TCC
, LOONGARCH_GPR_ZERO
, jmp_offset
) < 0)
248 * prog = array->ptrs[index];
252 emit_insn(ctx
, alsld
, t2
, a2
, a1
, 2);
253 off
= offsetof(struct bpf_array
, ptrs
);
254 emit_insn(ctx
, ldd
, t2
, t2
, off
);
255 /* beq $t2, $zero, jmp_offset */
256 if (emit_tailcall_jmp(ctx
, BPF_JEQ
, t2
, LOONGARCH_GPR_ZERO
, jmp_offset
) < 0)
259 /* goto *(prog->bpf_func + 4); */
260 off
= offsetof(struct bpf_prog
, bpf_func
);
261 emit_insn(ctx
, ldd
, t3
, t2
, off
);
262 __build_epilogue(ctx
, true);
265 if (out_offset
== -1)
266 out_offset
= cur_offset
;
267 if (cur_offset
!= out_offset
) {
268 pr_err_once("tail_call out_offset = %d, expected %d!\n",
269 cur_offset
, out_offset
);
276 pr_info_once("tail_call: jump too far\n");
282 static void emit_atomic(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
284 const u8 t1
= LOONGARCH_GPR_T1
;
285 const u8 t2
= LOONGARCH_GPR_T2
;
286 const u8 t3
= LOONGARCH_GPR_T3
;
287 const u8 r0
= regmap
[BPF_REG_0
];
288 const u8 src
= regmap
[insn
->src_reg
];
289 const u8 dst
= regmap
[insn
->dst_reg
];
290 const s16 off
= insn
->off
;
291 const s32 imm
= insn
->imm
;
292 const bool isdw
= BPF_SIZE(insn
->code
) == BPF_DW
;
294 move_imm(ctx
, t1
, off
, false);
295 emit_insn(ctx
, addd
, t1
, dst
, t1
);
296 move_reg(ctx
, t3
, src
);
299 /* lock *(size *)(dst + off) <op>= src */
302 emit_insn(ctx
, amaddd
, t2
, t1
, src
);
304 emit_insn(ctx
, amaddw
, t2
, t1
, src
);
308 emit_insn(ctx
, amandd
, t2
, t1
, src
);
310 emit_insn(ctx
, amandw
, t2
, t1
, src
);
314 emit_insn(ctx
, amord
, t2
, t1
, src
);
316 emit_insn(ctx
, amorw
, t2
, t1
, src
);
320 emit_insn(ctx
, amxord
, t2
, t1
, src
);
322 emit_insn(ctx
, amxorw
, t2
, t1
, src
);
324 /* src = atomic_fetch_<op>(dst + off, src) */
325 case BPF_ADD
| BPF_FETCH
:
327 emit_insn(ctx
, amaddd
, src
, t1
, t3
);
329 emit_insn(ctx
, amaddw
, src
, t1
, t3
);
330 emit_zext_32(ctx
, src
, true);
333 case BPF_AND
| BPF_FETCH
:
335 emit_insn(ctx
, amandd
, src
, t1
, t3
);
337 emit_insn(ctx
, amandw
, src
, t1
, t3
);
338 emit_zext_32(ctx
, src
, true);
341 case BPF_OR
| BPF_FETCH
:
343 emit_insn(ctx
, amord
, src
, t1
, t3
);
345 emit_insn(ctx
, amorw
, src
, t1
, t3
);
346 emit_zext_32(ctx
, src
, true);
349 case BPF_XOR
| BPF_FETCH
:
351 emit_insn(ctx
, amxord
, src
, t1
, t3
);
353 emit_insn(ctx
, amxorw
, src
, t1
, t3
);
354 emit_zext_32(ctx
, src
, true);
357 /* src = atomic_xchg(dst + off, src); */
360 emit_insn(ctx
, amswapd
, src
, t1
, t3
);
362 emit_insn(ctx
, amswapw
, src
, t1
, t3
);
363 emit_zext_32(ctx
, src
, true);
366 /* r0 = atomic_cmpxchg(dst + off, r0, src); */
368 move_reg(ctx
, t2
, r0
);
370 emit_insn(ctx
, lld
, r0
, t1
, 0);
371 emit_insn(ctx
, bne
, t2
, r0
, 4);
372 move_reg(ctx
, t3
, src
);
373 emit_insn(ctx
, scd
, t3
, t1
, 0);
374 emit_insn(ctx
, beq
, t3
, LOONGARCH_GPR_ZERO
, -4);
376 emit_insn(ctx
, llw
, r0
, t1
, 0);
377 emit_zext_32(ctx
, t2
, true);
378 emit_zext_32(ctx
, r0
, true);
379 emit_insn(ctx
, bne
, t2
, r0
, 4);
380 move_reg(ctx
, t3
, src
);
381 emit_insn(ctx
, scw
, t3
, t1
, 0);
382 emit_insn(ctx
, beq
, t3
, LOONGARCH_GPR_ZERO
, -6);
383 emit_zext_32(ctx
, r0
, true);
389 static bool is_signed_bpf_cond(u8 cond
)
391 return cond
== BPF_JSGT
|| cond
== BPF_JSLT
||
392 cond
== BPF_JSGE
|| cond
== BPF_JSLE
;
395 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
396 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
398 bool ex_handler_bpf(const struct exception_table_entry
*ex
,
399 struct pt_regs
*regs
)
401 int dst_reg
= FIELD_GET(BPF_FIXUP_REG_MASK
, ex
->fixup
);
402 off_t offset
= FIELD_GET(BPF_FIXUP_OFFSET_MASK
, ex
->fixup
);
404 regs
->regs
[dst_reg
] = 0;
405 regs
->csr_era
= (unsigned long)&ex
->fixup
- offset
;
410 /* For accesses to BTF pointers, add an entry to the exception table */
411 static int add_exception_handler(const struct bpf_insn
*insn
,
417 struct exception_table_entry
*ex
;
419 if (!ctx
->image
|| !ctx
->prog
->aux
->extable
)
422 if (BPF_MODE(insn
->code
) != BPF_PROBE_MEM
&&
423 BPF_MODE(insn
->code
) != BPF_PROBE_MEMSX
)
426 if (WARN_ON_ONCE(ctx
->num_exentries
>= ctx
->prog
->aux
->num_exentries
))
429 ex
= &ctx
->prog
->aux
->extable
[ctx
->num_exentries
];
430 pc
= (unsigned long)&ctx
->image
[ctx
->idx
- 1];
432 offset
= pc
- (long)&ex
->insn
;
433 if (WARN_ON_ONCE(offset
>= 0 || offset
< INT_MIN
))
439 * Since the extable follows the program, the fixup offset is always
440 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
441 * to keep things simple, and put the destination register in the upper
442 * bits. We don't need to worry about buildtime or runtime sort
443 * modifying the upper bits because the table is already sorted, and
444 * isn't part of the main exception table.
446 offset
= (long)&ex
->fixup
- (pc
+ LOONGARCH_INSN_SIZE
);
447 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK
, offset
))
450 ex
->type
= EX_TYPE_BPF
;
451 ex
->fixup
= FIELD_PREP(BPF_FIXUP_OFFSET_MASK
, offset
) | FIELD_PREP(BPF_FIXUP_REG_MASK
, dst_reg
);
453 ctx
->num_exentries
++;
458 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
, bool extra_pass
)
462 bool func_addr_fixed
, sign_extend
;
463 int i
= insn
- ctx
->prog
->insnsi
;
465 const u8 code
= insn
->code
;
466 const u8 cond
= BPF_OP(code
);
467 const u8 t1
= LOONGARCH_GPR_T1
;
468 const u8 t2
= LOONGARCH_GPR_T2
;
469 const u8 src
= regmap
[insn
->src_reg
];
470 const u8 dst
= regmap
[insn
->dst_reg
];
471 const s16 off
= insn
->off
;
472 const s32 imm
= insn
->imm
;
473 const bool is32
= BPF_CLASS(insn
->code
) == BPF_ALU
|| BPF_CLASS(insn
->code
) == BPF_JMP32
;
477 case BPF_ALU
| BPF_MOV
| BPF_X
:
478 case BPF_ALU64
| BPF_MOV
| BPF_X
:
481 move_reg(ctx
, dst
, src
);
482 emit_zext_32(ctx
, dst
, is32
);
485 move_reg(ctx
, t1
, src
);
486 emit_insn(ctx
, extwb
, dst
, t1
);
487 emit_zext_32(ctx
, dst
, is32
);
490 move_reg(ctx
, t1
, src
);
491 emit_insn(ctx
, extwh
, dst
, t1
);
492 emit_zext_32(ctx
, dst
, is32
);
495 emit_insn(ctx
, addw
, dst
, src
, LOONGARCH_GPR_ZERO
);
501 case BPF_ALU
| BPF_MOV
| BPF_K
:
502 case BPF_ALU64
| BPF_MOV
| BPF_K
:
503 move_imm(ctx
, dst
, imm
, is32
);
506 /* dst = dst + src */
507 case BPF_ALU
| BPF_ADD
| BPF_X
:
508 case BPF_ALU64
| BPF_ADD
| BPF_X
:
509 emit_insn(ctx
, addd
, dst
, dst
, src
);
510 emit_zext_32(ctx
, dst
, is32
);
513 /* dst = dst + imm */
514 case BPF_ALU
| BPF_ADD
| BPF_K
:
515 case BPF_ALU64
| BPF_ADD
| BPF_K
:
516 if (is_signed_imm12(imm
)) {
517 emit_insn(ctx
, addid
, dst
, dst
, imm
);
519 move_imm(ctx
, t1
, imm
, is32
);
520 emit_insn(ctx
, addd
, dst
, dst
, t1
);
522 emit_zext_32(ctx
, dst
, is32
);
525 /* dst = dst - src */
526 case BPF_ALU
| BPF_SUB
| BPF_X
:
527 case BPF_ALU64
| BPF_SUB
| BPF_X
:
528 emit_insn(ctx
, subd
, dst
, dst
, src
);
529 emit_zext_32(ctx
, dst
, is32
);
532 /* dst = dst - imm */
533 case BPF_ALU
| BPF_SUB
| BPF_K
:
534 case BPF_ALU64
| BPF_SUB
| BPF_K
:
535 if (is_signed_imm12(-imm
)) {
536 emit_insn(ctx
, addid
, dst
, dst
, -imm
);
538 move_imm(ctx
, t1
, imm
, is32
);
539 emit_insn(ctx
, subd
, dst
, dst
, t1
);
541 emit_zext_32(ctx
, dst
, is32
);
544 /* dst = dst * src */
545 case BPF_ALU
| BPF_MUL
| BPF_X
:
546 case BPF_ALU64
| BPF_MUL
| BPF_X
:
547 emit_insn(ctx
, muld
, dst
, dst
, src
);
548 emit_zext_32(ctx
, dst
, is32
);
551 /* dst = dst * imm */
552 case BPF_ALU
| BPF_MUL
| BPF_K
:
553 case BPF_ALU64
| BPF_MUL
| BPF_K
:
554 move_imm(ctx
, t1
, imm
, is32
);
555 emit_insn(ctx
, muld
, dst
, dst
, t1
);
556 emit_zext_32(ctx
, dst
, is32
);
559 /* dst = dst / src */
560 case BPF_ALU
| BPF_DIV
| BPF_X
:
561 case BPF_ALU64
| BPF_DIV
| BPF_X
:
563 emit_zext_32(ctx
, dst
, is32
);
564 move_reg(ctx
, t1
, src
);
565 emit_zext_32(ctx
, t1
, is32
);
566 emit_insn(ctx
, divdu
, dst
, dst
, t1
);
567 emit_zext_32(ctx
, dst
, is32
);
569 emit_sext_32(ctx
, dst
, is32
);
570 move_reg(ctx
, t1
, src
);
571 emit_sext_32(ctx
, t1
, is32
);
572 emit_insn(ctx
, divd
, dst
, dst
, t1
);
573 emit_sext_32(ctx
, dst
, is32
);
577 /* dst = dst / imm */
578 case BPF_ALU
| BPF_DIV
| BPF_K
:
579 case BPF_ALU64
| BPF_DIV
| BPF_K
:
581 move_imm(ctx
, t1
, imm
, is32
);
582 emit_zext_32(ctx
, dst
, is32
);
583 emit_insn(ctx
, divdu
, dst
, dst
, t1
);
584 emit_zext_32(ctx
, dst
, is32
);
586 move_imm(ctx
, t1
, imm
, false);
587 emit_sext_32(ctx
, t1
, is32
);
588 emit_sext_32(ctx
, dst
, is32
);
589 emit_insn(ctx
, divd
, dst
, dst
, t1
);
590 emit_sext_32(ctx
, dst
, is32
);
594 /* dst = dst % src */
595 case BPF_ALU
| BPF_MOD
| BPF_X
:
596 case BPF_ALU64
| BPF_MOD
| BPF_X
:
598 emit_zext_32(ctx
, dst
, is32
);
599 move_reg(ctx
, t1
, src
);
600 emit_zext_32(ctx
, t1
, is32
);
601 emit_insn(ctx
, moddu
, dst
, dst
, t1
);
602 emit_zext_32(ctx
, dst
, is32
);
604 emit_sext_32(ctx
, dst
, is32
);
605 move_reg(ctx
, t1
, src
);
606 emit_sext_32(ctx
, t1
, is32
);
607 emit_insn(ctx
, modd
, dst
, dst
, t1
);
608 emit_sext_32(ctx
, dst
, is32
);
612 /* dst = dst % imm */
613 case BPF_ALU
| BPF_MOD
| BPF_K
:
614 case BPF_ALU64
| BPF_MOD
| BPF_K
:
616 move_imm(ctx
, t1
, imm
, is32
);
617 emit_zext_32(ctx
, dst
, is32
);
618 emit_insn(ctx
, moddu
, dst
, dst
, t1
);
619 emit_zext_32(ctx
, dst
, is32
);
621 move_imm(ctx
, t1
, imm
, false);
622 emit_sext_32(ctx
, t1
, is32
);
623 emit_sext_32(ctx
, dst
, is32
);
624 emit_insn(ctx
, modd
, dst
, dst
, t1
);
625 emit_sext_32(ctx
, dst
, is32
);
630 case BPF_ALU
| BPF_NEG
:
631 case BPF_ALU64
| BPF_NEG
:
632 move_imm(ctx
, t1
, imm
, is32
);
633 emit_insn(ctx
, subd
, dst
, LOONGARCH_GPR_ZERO
, dst
);
634 emit_zext_32(ctx
, dst
, is32
);
637 /* dst = dst & src */
638 case BPF_ALU
| BPF_AND
| BPF_X
:
639 case BPF_ALU64
| BPF_AND
| BPF_X
:
640 emit_insn(ctx
, and, dst
, dst
, src
);
641 emit_zext_32(ctx
, dst
, is32
);
644 /* dst = dst & imm */
645 case BPF_ALU
| BPF_AND
| BPF_K
:
646 case BPF_ALU64
| BPF_AND
| BPF_K
:
647 if (is_unsigned_imm12(imm
)) {
648 emit_insn(ctx
, andi
, dst
, dst
, imm
);
650 move_imm(ctx
, t1
, imm
, is32
);
651 emit_insn(ctx
, and, dst
, dst
, t1
);
653 emit_zext_32(ctx
, dst
, is32
);
656 /* dst = dst | src */
657 case BPF_ALU
| BPF_OR
| BPF_X
:
658 case BPF_ALU64
| BPF_OR
| BPF_X
:
659 emit_insn(ctx
, or, dst
, dst
, src
);
660 emit_zext_32(ctx
, dst
, is32
);
663 /* dst = dst | imm */
664 case BPF_ALU
| BPF_OR
| BPF_K
:
665 case BPF_ALU64
| BPF_OR
| BPF_K
:
666 if (is_unsigned_imm12(imm
)) {
667 emit_insn(ctx
, ori
, dst
, dst
, imm
);
669 move_imm(ctx
, t1
, imm
, is32
);
670 emit_insn(ctx
, or, dst
, dst
, t1
);
672 emit_zext_32(ctx
, dst
, is32
);
675 /* dst = dst ^ src */
676 case BPF_ALU
| BPF_XOR
| BPF_X
:
677 case BPF_ALU64
| BPF_XOR
| BPF_X
:
678 emit_insn(ctx
, xor, dst
, dst
, src
);
679 emit_zext_32(ctx
, dst
, is32
);
682 /* dst = dst ^ imm */
683 case BPF_ALU
| BPF_XOR
| BPF_K
:
684 case BPF_ALU64
| BPF_XOR
| BPF_K
:
685 if (is_unsigned_imm12(imm
)) {
686 emit_insn(ctx
, xori
, dst
, dst
, imm
);
688 move_imm(ctx
, t1
, imm
, is32
);
689 emit_insn(ctx
, xor, dst
, dst
, t1
);
691 emit_zext_32(ctx
, dst
, is32
);
694 /* dst = dst << src (logical) */
695 case BPF_ALU
| BPF_LSH
| BPF_X
:
696 emit_insn(ctx
, sllw
, dst
, dst
, src
);
697 emit_zext_32(ctx
, dst
, is32
);
700 case BPF_ALU64
| BPF_LSH
| BPF_X
:
701 emit_insn(ctx
, slld
, dst
, dst
, src
);
704 /* dst = dst << imm (logical) */
705 case BPF_ALU
| BPF_LSH
| BPF_K
:
706 emit_insn(ctx
, slliw
, dst
, dst
, imm
);
707 emit_zext_32(ctx
, dst
, is32
);
710 case BPF_ALU64
| BPF_LSH
| BPF_K
:
711 emit_insn(ctx
, sllid
, dst
, dst
, imm
);
714 /* dst = dst >> src (logical) */
715 case BPF_ALU
| BPF_RSH
| BPF_X
:
716 emit_insn(ctx
, srlw
, dst
, dst
, src
);
717 emit_zext_32(ctx
, dst
, is32
);
720 case BPF_ALU64
| BPF_RSH
| BPF_X
:
721 emit_insn(ctx
, srld
, dst
, dst
, src
);
724 /* dst = dst >> imm (logical) */
725 case BPF_ALU
| BPF_RSH
| BPF_K
:
726 emit_insn(ctx
, srliw
, dst
, dst
, imm
);
727 emit_zext_32(ctx
, dst
, is32
);
730 case BPF_ALU64
| BPF_RSH
| BPF_K
:
731 emit_insn(ctx
, srlid
, dst
, dst
, imm
);
734 /* dst = dst >> src (arithmetic) */
735 case BPF_ALU
| BPF_ARSH
| BPF_X
:
736 emit_insn(ctx
, sraw
, dst
, dst
, src
);
737 emit_zext_32(ctx
, dst
, is32
);
740 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
741 emit_insn(ctx
, srad
, dst
, dst
, src
);
744 /* dst = dst >> imm (arithmetic) */
745 case BPF_ALU
| BPF_ARSH
| BPF_K
:
746 emit_insn(ctx
, sraiw
, dst
, dst
, imm
);
747 emit_zext_32(ctx
, dst
, is32
);
750 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
751 emit_insn(ctx
, sraid
, dst
, dst
, imm
);
754 /* dst = BSWAP##imm(dst) */
755 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
758 /* zero-extend 16 bits into 64 bits */
759 emit_insn(ctx
, bstrpickd
, dst
, dst
, 15, 0);
762 /* zero-extend 32 bits into 64 bits */
763 emit_zext_32(ctx
, dst
, is32
);
771 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
772 case BPF_ALU64
| BPF_END
| BPF_FROM_LE
:
775 emit_insn(ctx
, revb2h
, dst
, dst
);
776 /* zero-extend 16 bits into 64 bits */
777 emit_insn(ctx
, bstrpickd
, dst
, dst
, 15, 0);
780 emit_insn(ctx
, revb2w
, dst
, dst
);
781 /* clear the upper 32 bits */
782 emit_zext_32(ctx
, dst
, true);
785 emit_insn(ctx
, revbd
, dst
, dst
);
790 /* PC += off if dst cond src */
791 case BPF_JMP
| BPF_JEQ
| BPF_X
:
792 case BPF_JMP
| BPF_JNE
| BPF_X
:
793 case BPF_JMP
| BPF_JGT
| BPF_X
:
794 case BPF_JMP
| BPF_JGE
| BPF_X
:
795 case BPF_JMP
| BPF_JLT
| BPF_X
:
796 case BPF_JMP
| BPF_JLE
| BPF_X
:
797 case BPF_JMP
| BPF_JSGT
| BPF_X
:
798 case BPF_JMP
| BPF_JSGE
| BPF_X
:
799 case BPF_JMP
| BPF_JSLT
| BPF_X
:
800 case BPF_JMP
| BPF_JSLE
| BPF_X
:
801 case BPF_JMP32
| BPF_JEQ
| BPF_X
:
802 case BPF_JMP32
| BPF_JNE
| BPF_X
:
803 case BPF_JMP32
| BPF_JGT
| BPF_X
:
804 case BPF_JMP32
| BPF_JGE
| BPF_X
:
805 case BPF_JMP32
| BPF_JLT
| BPF_X
:
806 case BPF_JMP32
| BPF_JLE
| BPF_X
:
807 case BPF_JMP32
| BPF_JSGT
| BPF_X
:
808 case BPF_JMP32
| BPF_JSGE
| BPF_X
:
809 case BPF_JMP32
| BPF_JSLT
| BPF_X
:
810 case BPF_JMP32
| BPF_JSLE
| BPF_X
:
811 jmp_offset
= bpf2la_offset(i
, off
, ctx
);
812 move_reg(ctx
, t1
, dst
);
813 move_reg(ctx
, t2
, src
);
814 if (is_signed_bpf_cond(BPF_OP(code
))) {
815 emit_sext_32(ctx
, t1
, is32
);
816 emit_sext_32(ctx
, t2
, is32
);
818 emit_zext_32(ctx
, t1
, is32
);
819 emit_zext_32(ctx
, t2
, is32
);
821 if (emit_cond_jmp(ctx
, cond
, t1
, t2
, jmp_offset
) < 0)
825 /* PC += off if dst cond imm */
826 case BPF_JMP
| BPF_JEQ
| BPF_K
:
827 case BPF_JMP
| BPF_JNE
| BPF_K
:
828 case BPF_JMP
| BPF_JGT
| BPF_K
:
829 case BPF_JMP
| BPF_JGE
| BPF_K
:
830 case BPF_JMP
| BPF_JLT
| BPF_K
:
831 case BPF_JMP
| BPF_JLE
| BPF_K
:
832 case BPF_JMP
| BPF_JSGT
| BPF_K
:
833 case BPF_JMP
| BPF_JSGE
| BPF_K
:
834 case BPF_JMP
| BPF_JSLT
| BPF_K
:
835 case BPF_JMP
| BPF_JSLE
| BPF_K
:
836 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
837 case BPF_JMP32
| BPF_JNE
| BPF_K
:
838 case BPF_JMP32
| BPF_JGT
| BPF_K
:
839 case BPF_JMP32
| BPF_JGE
| BPF_K
:
840 case BPF_JMP32
| BPF_JLT
| BPF_K
:
841 case BPF_JMP32
| BPF_JLE
| BPF_K
:
842 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
843 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
844 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
845 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
846 jmp_offset
= bpf2la_offset(i
, off
, ctx
);
848 move_imm(ctx
, t1
, imm
, false);
851 /* If imm is 0, simply use zero register. */
852 tm
= LOONGARCH_GPR_ZERO
;
854 move_reg(ctx
, t2
, dst
);
855 if (is_signed_bpf_cond(BPF_OP(code
))) {
856 emit_sext_32(ctx
, tm
, is32
);
857 emit_sext_32(ctx
, t2
, is32
);
859 emit_zext_32(ctx
, tm
, is32
);
860 emit_zext_32(ctx
, t2
, is32
);
862 if (emit_cond_jmp(ctx
, cond
, t2
, tm
, jmp_offset
) < 0)
866 /* PC += off if dst & src */
867 case BPF_JMP
| BPF_JSET
| BPF_X
:
868 case BPF_JMP32
| BPF_JSET
| BPF_X
:
869 jmp_offset
= bpf2la_offset(i
, off
, ctx
);
870 emit_insn(ctx
, and, t1
, dst
, src
);
871 emit_zext_32(ctx
, t1
, is32
);
872 if (emit_cond_jmp(ctx
, cond
, t1
, LOONGARCH_GPR_ZERO
, jmp_offset
) < 0)
876 /* PC += off if dst & imm */
877 case BPF_JMP
| BPF_JSET
| BPF_K
:
878 case BPF_JMP32
| BPF_JSET
| BPF_K
:
879 jmp_offset
= bpf2la_offset(i
, off
, ctx
);
880 move_imm(ctx
, t1
, imm
, is32
);
881 emit_insn(ctx
, and, t1
, dst
, t1
);
882 emit_zext_32(ctx
, t1
, is32
);
883 if (emit_cond_jmp(ctx
, cond
, t1
, LOONGARCH_GPR_ZERO
, jmp_offset
) < 0)
888 case BPF_JMP
| BPF_JA
:
889 case BPF_JMP32
| BPF_JA
:
890 if (BPF_CLASS(code
) == BPF_JMP
)
891 jmp_offset
= bpf2la_offset(i
, off
, ctx
);
893 jmp_offset
= bpf2la_offset(i
, imm
, ctx
);
894 if (emit_uncond_jmp(ctx
, jmp_offset
) < 0)
899 case BPF_JMP
| BPF_CALL
:
901 ret
= bpf_jit_get_func_addr(ctx
->prog
, insn
, extra_pass
,
902 &func_addr
, &func_addr_fixed
);
906 move_addr(ctx
, t1
, func_addr
);
907 emit_insn(ctx
, jirl
, LOONGARCH_GPR_RA
, t1
, 0);
908 move_reg(ctx
, regmap
[BPF_REG_0
], LOONGARCH_GPR_A0
);
912 case BPF_JMP
| BPF_TAIL_CALL
:
914 if (emit_bpf_tail_call(ctx
) < 0)
918 /* function return */
919 case BPF_JMP
| BPF_EXIT
:
920 if (i
== ctx
->prog
->len
- 1)
923 jmp_offset
= epilogue_offset(ctx
);
924 if (emit_uncond_jmp(ctx
, jmp_offset
) < 0)
929 case BPF_LD
| BPF_IMM
| BPF_DW
:
931 const u64 imm64
= (u64
)(insn
+ 1)->imm
<< 32 | (u32
)insn
->imm
;
933 move_imm(ctx
, dst
, imm64
, is32
);
937 /* dst = *(size *)(src + off) */
938 case BPF_LDX
| BPF_MEM
| BPF_B
:
939 case BPF_LDX
| BPF_MEM
| BPF_H
:
940 case BPF_LDX
| BPF_MEM
| BPF_W
:
941 case BPF_LDX
| BPF_MEM
| BPF_DW
:
942 case BPF_LDX
| BPF_PROBE_MEM
| BPF_DW
:
943 case BPF_LDX
| BPF_PROBE_MEM
| BPF_W
:
944 case BPF_LDX
| BPF_PROBE_MEM
| BPF_H
:
945 case BPF_LDX
| BPF_PROBE_MEM
| BPF_B
:
946 /* dst_reg = (s64)*(signed size *)(src_reg + off) */
947 case BPF_LDX
| BPF_MEMSX
| BPF_B
:
948 case BPF_LDX
| BPF_MEMSX
| BPF_H
:
949 case BPF_LDX
| BPF_MEMSX
| BPF_W
:
950 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_B
:
951 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_H
:
952 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_W
:
953 sign_extend
= BPF_MODE(insn
->code
) == BPF_MEMSX
||
954 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
;
955 switch (BPF_SIZE(code
)) {
957 if (is_signed_imm12(off
)) {
959 emit_insn(ctx
, ldb
, dst
, src
, off
);
961 emit_insn(ctx
, ldbu
, dst
, src
, off
);
963 move_imm(ctx
, t1
, off
, is32
);
965 emit_insn(ctx
, ldxb
, dst
, src
, t1
);
967 emit_insn(ctx
, ldxbu
, dst
, src
, t1
);
971 if (is_signed_imm12(off
)) {
973 emit_insn(ctx
, ldh
, dst
, src
, off
);
975 emit_insn(ctx
, ldhu
, dst
, src
, off
);
977 move_imm(ctx
, t1
, off
, is32
);
979 emit_insn(ctx
, ldxh
, dst
, src
, t1
);
981 emit_insn(ctx
, ldxhu
, dst
, src
, t1
);
985 if (is_signed_imm12(off
)) {
987 emit_insn(ctx
, ldw
, dst
, src
, off
);
989 emit_insn(ctx
, ldwu
, dst
, src
, off
);
991 move_imm(ctx
, t1
, off
, is32
);
993 emit_insn(ctx
, ldxw
, dst
, src
, t1
);
995 emit_insn(ctx
, ldxwu
, dst
, src
, t1
);
999 move_imm(ctx
, t1
, off
, is32
);
1000 emit_insn(ctx
, ldxd
, dst
, src
, t1
);
1004 ret
= add_exception_handler(insn
, ctx
, dst
);
1009 /* *(size *)(dst + off) = imm */
1010 case BPF_ST
| BPF_MEM
| BPF_B
:
1011 case BPF_ST
| BPF_MEM
| BPF_H
:
1012 case BPF_ST
| BPF_MEM
| BPF_W
:
1013 case BPF_ST
| BPF_MEM
| BPF_DW
:
1014 switch (BPF_SIZE(code
)) {
1016 move_imm(ctx
, t1
, imm
, is32
);
1017 if (is_signed_imm12(off
)) {
1018 emit_insn(ctx
, stb
, t1
, dst
, off
);
1020 move_imm(ctx
, t2
, off
, is32
);
1021 emit_insn(ctx
, stxb
, t1
, dst
, t2
);
1025 move_imm(ctx
, t1
, imm
, is32
);
1026 if (is_signed_imm12(off
)) {
1027 emit_insn(ctx
, sth
, t1
, dst
, off
);
1029 move_imm(ctx
, t2
, off
, is32
);
1030 emit_insn(ctx
, stxh
, t1
, dst
, t2
);
1034 move_imm(ctx
, t1
, imm
, is32
);
1035 if (is_signed_imm12(off
)) {
1036 emit_insn(ctx
, stw
, t1
, dst
, off
);
1037 } else if (is_signed_imm14(off
)) {
1038 emit_insn(ctx
, stptrw
, t1
, dst
, off
);
1040 move_imm(ctx
, t2
, off
, is32
);
1041 emit_insn(ctx
, stxw
, t1
, dst
, t2
);
1045 move_imm(ctx
, t1
, imm
, is32
);
1046 if (is_signed_imm12(off
)) {
1047 emit_insn(ctx
, std
, t1
, dst
, off
);
1048 } else if (is_signed_imm14(off
)) {
1049 emit_insn(ctx
, stptrd
, t1
, dst
, off
);
1051 move_imm(ctx
, t2
, off
, is32
);
1052 emit_insn(ctx
, stxd
, t1
, dst
, t2
);
1058 /* *(size *)(dst + off) = src */
1059 case BPF_STX
| BPF_MEM
| BPF_B
:
1060 case BPF_STX
| BPF_MEM
| BPF_H
:
1061 case BPF_STX
| BPF_MEM
| BPF_W
:
1062 case BPF_STX
| BPF_MEM
| BPF_DW
:
1063 switch (BPF_SIZE(code
)) {
1065 if (is_signed_imm12(off
)) {
1066 emit_insn(ctx
, stb
, src
, dst
, off
);
1068 move_imm(ctx
, t1
, off
, is32
);
1069 emit_insn(ctx
, stxb
, src
, dst
, t1
);
1073 if (is_signed_imm12(off
)) {
1074 emit_insn(ctx
, sth
, src
, dst
, off
);
1076 move_imm(ctx
, t1
, off
, is32
);
1077 emit_insn(ctx
, stxh
, src
, dst
, t1
);
1081 if (is_signed_imm12(off
)) {
1082 emit_insn(ctx
, stw
, src
, dst
, off
);
1083 } else if (is_signed_imm14(off
)) {
1084 emit_insn(ctx
, stptrw
, src
, dst
, off
);
1086 move_imm(ctx
, t1
, off
, is32
);
1087 emit_insn(ctx
, stxw
, src
, dst
, t1
);
1091 if (is_signed_imm12(off
)) {
1092 emit_insn(ctx
, std
, src
, dst
, off
);
1093 } else if (is_signed_imm14(off
)) {
1094 emit_insn(ctx
, stptrd
, src
, dst
, off
);
1096 move_imm(ctx
, t1
, off
, is32
);
1097 emit_insn(ctx
, stxd
, src
, dst
, t1
);
1103 case BPF_STX
| BPF_ATOMIC
| BPF_W
:
1104 case BPF_STX
| BPF_ATOMIC
| BPF_DW
:
1105 emit_atomic(insn
, ctx
);
1108 /* Speculation barrier */
1109 case BPF_ST
| BPF_NOSPEC
:
1113 pr_err("bpf_jit: unknown opcode %02x\n", code
);
1120 pr_info_once("bpf_jit: opcode %02x, jump too far\n", code
);
1124 static int build_body(struct jit_ctx
*ctx
, bool extra_pass
)
1127 const struct bpf_prog
*prog
= ctx
->prog
;
1129 for (i
= 0; i
< prog
->len
; i
++) {
1130 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
1133 if (ctx
->image
== NULL
)
1134 ctx
->offset
[i
] = ctx
->idx
;
1136 ret
= build_insn(insn
, ctx
, extra_pass
);
1139 if (ctx
->image
== NULL
)
1140 ctx
->offset
[i
] = ctx
->idx
;
1147 if (ctx
->image
== NULL
)
1148 ctx
->offset
[i
] = ctx
->idx
;
1153 /* Fill space with break instructions */
1154 static void jit_fill_hole(void *area
, unsigned int size
)
1158 /* We are guaranteed to have aligned memory */
1159 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1160 *ptr
++ = INSN_BREAK
;
1163 static int validate_code(struct jit_ctx
*ctx
)
1166 union loongarch_instruction insn
;
1168 for (i
= 0; i
< ctx
->idx
; i
++) {
1169 insn
= ctx
->image
[i
];
1170 /* Check INSN_BREAK */
1171 if (insn
.word
== INSN_BREAK
)
1175 if (WARN_ON_ONCE(ctx
->num_exentries
!= ctx
->prog
->aux
->num_exentries
))
1181 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1183 bool tmp_blinded
= false, extra_pass
= false;
1185 int image_size
, prog_size
, extable_size
;
1187 struct jit_data
*jit_data
;
1188 struct bpf_binary_header
*header
;
1189 struct bpf_prog
*tmp
, *orig_prog
= prog
;
1192 * If BPF JIT was not enabled then we must fall back to
1195 if (!prog
->jit_requested
)
1198 tmp
= bpf_jit_blind_constants(prog
);
1200 * If blinding was requested and we failed during blinding,
1201 * we must fall back to the interpreter. Otherwise, we save
1202 * the new JITed code.
1212 jit_data
= prog
->aux
->jit_data
;
1214 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
1219 prog
->aux
->jit_data
= jit_data
;
1221 if (jit_data
->ctx
.offset
) {
1222 ctx
= jit_data
->ctx
;
1223 image_ptr
= jit_data
->image
;
1224 header
= jit_data
->header
;
1226 prog_size
= sizeof(u32
) * ctx
.idx
;
1230 memset(&ctx
, 0, sizeof(ctx
));
1233 ctx
.offset
= kvcalloc(prog
->len
+ 1, sizeof(u32
), GFP_KERNEL
);
1234 if (ctx
.offset
== NULL
) {
1239 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1240 build_prologue(&ctx
);
1241 if (build_body(&ctx
, extra_pass
)) {
1245 ctx
.epilogue_offset
= ctx
.idx
;
1246 build_epilogue(&ctx
);
1248 extable_size
= prog
->aux
->num_exentries
* sizeof(struct exception_table_entry
);
1250 /* Now we know the actual image size.
1251 * As each LoongArch instruction is of length 32bit,
1252 * we are translating number of JITed intructions into
1253 * the size required to store these JITed code.
1255 prog_size
= sizeof(u32
) * ctx
.idx
;
1256 image_size
= prog_size
+ extable_size
;
1257 /* Now we know the size of the structure to make */
1258 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1259 sizeof(u32
), jit_fill_hole
);
1260 if (header
== NULL
) {
1265 /* 2. Now, the actual pass to generate final JIT code */
1266 ctx
.image
= (union loongarch_instruction
*)image_ptr
;
1268 prog
->aux
->extable
= (void *)image_ptr
+ prog_size
;
1272 ctx
.num_exentries
= 0;
1274 build_prologue(&ctx
);
1275 if (build_body(&ctx
, extra_pass
)) {
1276 bpf_jit_binary_free(header
);
1280 build_epilogue(&ctx
);
1282 /* 3. Extra pass to validate JITed code */
1283 if (validate_code(&ctx
)) {
1284 bpf_jit_binary_free(header
);
1289 /* And we're done */
1290 if (bpf_jit_enable
> 1)
1291 bpf_jit_dump(prog
->len
, prog_size
, 2, ctx
.image
);
1293 /* Update the icache */
1294 flush_icache_range((unsigned long)header
, (unsigned long)(ctx
.image
+ ctx
.idx
));
1296 if (!prog
->is_func
|| extra_pass
) {
1299 if (extra_pass
&& ctx
.idx
!= jit_data
->ctx
.idx
) {
1300 pr_err_once("multi-func JIT bug %d != %d\n",
1301 ctx
.idx
, jit_data
->ctx
.idx
);
1304 err
= bpf_jit_binary_lock_ro(header
);
1306 pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
1311 jit_data
->ctx
= ctx
;
1312 jit_data
->image
= image_ptr
;
1313 jit_data
->header
= header
;
1316 prog
->jited_len
= prog_size
;
1317 prog
->bpf_func
= (void *)ctx
.image
;
1319 if (!prog
->is_func
|| extra_pass
) {
1322 /* offset[prog->len] is the size of program */
1323 for (i
= 0; i
<= prog
->len
; i
++)
1324 ctx
.offset
[i
] *= LOONGARCH_INSN_SIZE
;
1325 bpf_prog_fill_jited_linfo(prog
, ctx
.offset
+ 1);
1330 prog
->aux
->jit_data
= NULL
;
1335 bpf_jit_prog_release_other(prog
, prog
== orig_prog
? tmp
: orig_prog
);
1342 bpf_jit_binary_free(header
);
1343 prog
->bpf_func
= NULL
;
1345 prog
->jited_len
= 0;
1349 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
1350 bool bpf_jit_supports_subprog_tailcalls(void)