1 // SPDX-License-Identifier: GPL-2.0
3 * BPF JIT compiler for PA-RISC (64-bit)
5 * Copyright(c) 2023 Helge Deller <deller@gmx.de>
7 * The code is based on the BPF JIT compiler for RV64 by Björn Töpel.
10 * - check if bpf_jit_needs_zext() is needed (currently enabled)
11 * - implement arch_prepare_bpf_trampoline(), poke(), ...
14 #include <linux/bitfield.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <linux/libgcc.h>
20 static const int regmap
[] = {
21 [BPF_REG_0
] = HPPA_REG_RET0
,
22 [BPF_REG_1
] = HPPA_R(5),
23 [BPF_REG_2
] = HPPA_R(6),
24 [BPF_REG_3
] = HPPA_R(7),
25 [BPF_REG_4
] = HPPA_R(8),
26 [BPF_REG_5
] = HPPA_R(9),
27 [BPF_REG_6
] = HPPA_R(10),
28 [BPF_REG_7
] = HPPA_R(11),
29 [BPF_REG_8
] = HPPA_R(12),
30 [BPF_REG_9
] = HPPA_R(13),
31 [BPF_REG_FP
] = HPPA_R(14),
32 [BPF_REG_AX
] = HPPA_R(15),
36 * Stack layout during BPF program execution (note: stack grows up):
39 * HPPA64 sp => +----------+ <= HPPA64 fp
42 * | ... | HPPA64 callee-saved registers
45 * +----------+ <= (BPF FP)
47 * | ... | BPF program stack
49 * | ... | Function call stack
55 /* Offset from fp for BPF registers stored on stack. */
56 #define STACK_ALIGN FRAME_SIZE
58 #define EXIT_PTR_LOAD(reg) hppa64_ldd_im16(-FRAME_SIZE, HPPA_REG_SP, reg)
59 #define EXIT_PTR_STORE(reg) hppa64_std_im16(reg, -FRAME_SIZE, HPPA_REG_SP)
60 #define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop)
62 static u8
bpf_to_hppa_reg(int bpf_reg
, struct hppa_jit_context
*ctx
)
64 u8 reg
= regmap
[bpf_reg
];
66 REG_SET_SEEN(ctx
, reg
);
70 static void emit_hppa_copy(const s8 rs
, const s8 rd
, struct hppa_jit_context
*ctx
)
72 REG_SET_SEEN(ctx
, rd
);
73 if (OPTIMIZE_HPPA
&& (rs
== rd
))
75 REG_SET_SEEN(ctx
, rs
);
76 emit(hppa_copy(rs
, rd
), ctx
);
79 static void emit_hppa64_depd(u8 src
, u8 pos
, u8 len
, u8 target
, bool no_zero
, struct hppa_jit_context
*ctx
)
83 pos
&= (BITS_PER_LONG
- 1);
86 c
= (len
< 32) ? 0x4 : 0;
87 c
|= (pos
>= 32) ? 0x2 : 0;
88 c
|= (no_zero
) ? 0x1 : 0;
89 emit(hppa_t10_insn(0x3c, target
, src
, 0, c
, pos
& 0x1f, len
& 0x1f), ctx
);
92 static void emit_hppa64_shld(u8 src
, int num
, u8 target
, struct hppa_jit_context
*ctx
)
94 emit_hppa64_depd(src
, 63-num
, 64-num
, target
, 0, ctx
);
97 static void emit_hppa64_extrd(u8 src
, u8 pos
, u8 len
, u8 target
, bool signed_op
, struct hppa_jit_context
*ctx
)
101 pos
&= (BITS_PER_LONG
- 1);
103 c
= (len
< 32) ? 0x4 : 0;
104 c
|= (pos
>= 32) ? 0x2 : 0;
105 c
|= signed_op
? 0x1 : 0;
106 emit(hppa_t10_insn(0x36, src
, target
, 0, c
, pos
& 0x1f, len
& 0x1f), ctx
);
109 static void emit_hppa64_extrw(u8 src
, u8 pos
, u8 len
, u8 target
, bool signed_op
, struct hppa_jit_context
*ctx
)
115 c
= 0x06 | (signed_op
? 1 : 0);
116 emit(hppa_t10_insn(0x34, src
, target
, 0, c
, pos
, len
), ctx
);
119 #define emit_hppa64_zext32(r, target, ctx) \
120 emit_hppa64_extrd(r, 63, 32, target, false, ctx)
121 #define emit_hppa64_sext32(r, target, ctx) \
122 emit_hppa64_extrd(r, 63, 32, target, true, ctx)
124 static void emit_hppa64_shrd(u8 src
, int num
, u8 target
, bool signed_op
, struct hppa_jit_context
*ctx
)
126 emit_hppa64_extrd(src
, 63-num
, 64-num
, target
, signed_op
, ctx
);
129 static void emit_hppa64_shrw(u8 src
, int num
, u8 target
, bool signed_op
, struct hppa_jit_context
*ctx
)
131 emit_hppa64_extrw(src
, 31-num
, 32-num
, target
, signed_op
, ctx
);
134 /* Emit variable-length instructions for 32-bit imm */
135 static void emit_imm32(u8 rd
, s32 imm
, struct hppa_jit_context
*ctx
)
137 u32 lower
= im11(imm
);
139 REG_SET_SEEN(ctx
, rd
);
140 if (OPTIMIZE_HPPA
&& relative_bits_ok(imm
, 14)) {
141 emit(hppa_ldi(imm
, rd
), ctx
);
144 if (OPTIMIZE_HPPA
&& lower
== imm
) {
145 emit(hppa_ldo(lower
, HPPA_REG_ZERO
, rd
), ctx
);
148 emit(hppa_ldil(imm
, rd
), ctx
);
149 if (OPTIMIZE_HPPA
&& (lower
== 0))
151 emit(hppa_ldo(lower
, rd
, rd
), ctx
);
154 static bool is_32b_int(s64 val
)
156 return val
== (s32
) val
;
159 /* Emit variable-length instructions for 64-bit imm */
160 static void emit_imm(u8 rd
, s64 imm
, u8 tmpreg
, struct hppa_jit_context
*ctx
)
164 /* get lower 32-bits into rd, sign extended */
165 emit_imm32(rd
, imm
, ctx
);
167 /* do we have upper 32-bits too ? */
168 if (OPTIMIZE_HPPA
&& is_32b_int(imm
))
171 /* load upper 32-bits into lower tmpreg and deposit into rd */
173 if (upper32
|| !OPTIMIZE_HPPA
) {
174 emit_imm32(tmpreg
, upper32
, ctx
);
175 emit_hppa64_depd(tmpreg
, 31, 32, rd
, 1, ctx
);
177 emit_hppa64_depd(HPPA_REG_ZERO
, 31, 32, rd
, 1, ctx
);
181 static int emit_jump(signed long paoff
, bool force_far
,
182 struct hppa_jit_context
*ctx
)
184 unsigned long pc
, addr
;
186 /* Note: Use 2 instructions for jumps if force_far is set. */
187 if (relative_bits_ok(paoff
- HPPA_BRANCH_DISPLACEMENT
, 22)) {
188 /* use BL,long branch followed by nop() */
189 emit(hppa64_bl_long(paoff
- HPPA_BRANCH_DISPLACEMENT
), ctx
);
191 emit(hppa_nop(), ctx
);
195 pc
= (uintptr_t) &ctx
->insns
[ctx
->ninsns
];
196 addr
= pc
+ (paoff
* HPPA_INSN_SIZE
);
197 /* even the 64-bit kernel runs in memory below 4GB */
198 if (WARN_ON_ONCE(addr
>> 32))
200 emit(hppa_ldil(addr
, HPPA_REG_R31
), ctx
);
201 emit(hppa_be_l(im11(addr
) >> 2, HPPA_REG_R31
, NOP_NEXT_INSTR
), ctx
);
205 static void __build_epilogue(bool is_tail_call
, struct hppa_jit_context
*ctx
)
212 * Skips first instruction of prologue which initializes tail
213 * call counter. Assumes t0 contains address of target program,
214 * see emit_bpf_tail_call.
216 emit(hppa_ldo(1 * HPPA_INSN_SIZE
, HPPA_REG_T0
, HPPA_REG_T0
), ctx
);
217 emit(hppa_bv(HPPA_REG_ZERO
, HPPA_REG_T0
, EXEC_NEXT_INSTR
), ctx
);
219 emit(hppa_copy(HPPA_REG_TCC
, HPPA_REG_TCC_IN_INIT
), ctx
);
224 /* load epilogue function pointer and jump to it. */
225 /* exit point is either at next instruction, or the outest TCC exit function */
226 emit(EXIT_PTR_LOAD(HPPA_REG_RP
), ctx
);
227 emit(EXIT_PTR_JUMP(HPPA_REG_RP
, NOP_NEXT_INSTR
), ctx
);
229 /* NOTE: we are 64-bit and big-endian, so return lower sign-extended 32-bit value */
230 emit_hppa64_sext32(regmap
[BPF_REG_0
], HPPA_REG_RET0
, ctx
);
232 /* Restore callee-saved registers. */
233 for (i
= 3; i
<= 15; i
++) {
234 if (OPTIMIZE_HPPA
&& !REG_WAS_SEEN(ctx
, HPPA_R(i
)))
236 emit(hppa64_ldd_im16(-REG_SIZE
* i
, HPPA_REG_SP
, HPPA_R(i
)), ctx
);
239 /* load original return pointer (stored by outest TCC function) */
240 emit(hppa64_ldd_im16(-2*REG_SIZE
, HPPA_REG_SP
, HPPA_REG_RP
), ctx
);
241 emit(hppa_bv(HPPA_REG_ZERO
, HPPA_REG_RP
, EXEC_NEXT_INSTR
), ctx
);
243 emit(hppa64_ldd_im5(-REG_SIZE
, HPPA_REG_SP
, HPPA_REG_SP
), ctx
);
245 emit(hppa_nop(), ctx
); // XXX WARUM einer zu wenig ??
248 static int emit_branch(u8 op
, u8 rd
, u8 rs
, signed long paoff
,
249 struct hppa_jit_context
*ctx
)
255 if (op
== BPF_JSET
) {
257 * BPF_JSET is a special case: it has no inverse so translate
258 * to and() function and compare against zero
260 emit(hppa_and(rd
, rs
, HPPA_REG_T0
), ctx
);
261 paoff
-= 1; /* reduce offset due to hppa_and() above */
267 /* set start after BPF_JSET */
270 if (!relative_branch_ok(paoff
- HPPA_BRANCH_DISPLACEMENT
+ 1, 12)) {
271 op
= invert_bpf_cond(op
);
276 * For a far branch, the condition is negated and we jump over the
277 * branch itself, and the two instructions from emit_jump.
278 * For a near branch, just use paoff.
280 off
= far
? (2 - HPPA_BRANCH_DISPLACEMENT
) : paoff
- HPPA_BRANCH_DISPLACEMENT
;
283 /* IF (dst COND src) JUMP off */
285 emit(hppa_beq(rd
, rs
, off
), ctx
);
288 emit(hppa_bgtu(rd
, rs
, off
), ctx
);
291 emit(hppa_bltu(rd
, rs
, off
), ctx
);
294 emit(hppa_bgeu(rd
, rs
, off
), ctx
);
297 emit(hppa_bleu(rd
, rs
, off
), ctx
);
300 emit(hppa_bne(rd
, rs
, off
), ctx
);
303 emit(hppa_bgt(rd
, rs
, off
), ctx
);
306 emit(hppa_blt(rd
, rs
, off
), ctx
);
309 emit(hppa_bge(rd
, rs
, off
), ctx
);
312 emit(hppa_ble(rd
, rs
, off
), ctx
);
321 /* Adjust for extra insns. */
323 ret
= emit_jump(paoff
, true, ctx
);
328 * always allocate 2 nops instead of the far branch to
329 * reduce translation loops
331 emit(hppa_nop(), ctx
);
332 emit(hppa_nop(), ctx
);
337 static void emit_zext_32(u8 reg
, struct hppa_jit_context
*ctx
)
339 emit_hppa64_zext32(reg
, reg
, ctx
);
342 static void emit_bpf_tail_call(int insn
, struct hppa_jit_context
*ctx
)
350 const s8 arr_reg
= regmap
[BPF_REG_2
];
351 const s8 idx_reg
= regmap
[BPF_REG_3
];
352 struct bpf_array bpfa
;
353 struct bpf_prog bpfp
;
355 /* if there is any tail call, we need to save & restore all registers */
356 REG_SET_SEEN_ALL(ctx
);
358 /* get address of TCC main exit function for error case into rp */
359 emit(EXIT_PTR_LOAD(HPPA_REG_RP
), ctx
);
361 /* max_entries = array->map.max_entries; */
362 off
= offsetof(struct bpf_array
, map
.max_entries
);
363 BUILD_BUG_ON(sizeof(bpfa
.map
.max_entries
) != 4);
364 emit(hppa_ldw(off
, arr_reg
, HPPA_REG_T1
), ctx
);
367 * if (index >= max_entries)
370 emit(hppa_bltu(idx_reg
, HPPA_REG_T1
, 2 - HPPA_BRANCH_DISPLACEMENT
), ctx
);
371 emit(EXIT_PTR_JUMP(HPPA_REG_RP
, NOP_NEXT_INSTR
), ctx
);
377 REG_FORCE_SEEN(ctx
, HPPA_REG_TCC
);
378 emit(hppa_ldo(-1, HPPA_REG_TCC
, HPPA_REG_TCC
), ctx
);
379 emit(hppa_bge(HPPA_REG_TCC
, HPPA_REG_ZERO
, 2 - HPPA_BRANCH_DISPLACEMENT
), ctx
);
380 emit(EXIT_PTR_JUMP(HPPA_REG_RP
, NOP_NEXT_INSTR
), ctx
);
383 * prog = array->ptrs[index];
387 BUILD_BUG_ON(sizeof(bpfa
.ptrs
[0]) != 8);
388 emit(hppa64_shladd(idx_reg
, 3, arr_reg
, HPPA_REG_T0
), ctx
);
389 off
= offsetof(struct bpf_array
, ptrs
);
390 BUILD_BUG_ON(off
< 16);
391 emit(hppa64_ldd_im16(off
, HPPA_REG_T0
, HPPA_REG_T0
), ctx
);
392 emit(hppa_bne(HPPA_REG_T0
, HPPA_REG_ZERO
, 2 - HPPA_BRANCH_DISPLACEMENT
), ctx
);
393 emit(EXIT_PTR_JUMP(HPPA_REG_RP
, NOP_NEXT_INSTR
), ctx
);
397 * goto *(prog->bpf_func + 4);
399 off
= offsetof(struct bpf_prog
, bpf_func
);
400 BUILD_BUG_ON(off
< 16);
401 BUILD_BUG_ON(sizeof(bpfp
.bpf_func
) != 8);
402 emit(hppa64_ldd_im16(off
, HPPA_REG_T0
, HPPA_REG_T0
), ctx
);
403 /* Epilogue jumps to *(t0 + 4). */
404 __build_epilogue(true, ctx
);
407 static void init_regs(u8
*rd
, u8
*rs
, const struct bpf_insn
*insn
,
408 struct hppa_jit_context
*ctx
)
410 u8 code
= insn
->code
;
413 case BPF_JMP
| BPF_JA
:
414 case BPF_JMP
| BPF_CALL
:
415 case BPF_JMP
| BPF_EXIT
:
416 case BPF_JMP
| BPF_TAIL_CALL
:
419 *rd
= bpf_to_hppa_reg(insn
->dst_reg
, ctx
);
422 if (code
& (BPF_ALU
| BPF_X
) || code
& (BPF_ALU64
| BPF_X
) ||
423 code
& (BPF_JMP
| BPF_X
) || code
& (BPF_JMP32
| BPF_X
) ||
424 code
& BPF_LDX
|| code
& BPF_STX
)
425 *rs
= bpf_to_hppa_reg(insn
->src_reg
, ctx
);
428 static void emit_zext_32_rd_rs(u8
*rd
, u8
*rs
, struct hppa_jit_context
*ctx
)
430 emit_hppa64_zext32(*rd
, HPPA_REG_T2
, ctx
);
432 emit_hppa64_zext32(*rs
, HPPA_REG_T1
, ctx
);
436 static void emit_sext_32_rd_rs(u8
*rd
, u8
*rs
, struct hppa_jit_context
*ctx
)
438 emit_hppa64_sext32(*rd
, HPPA_REG_T2
, ctx
);
440 emit_hppa64_sext32(*rs
, HPPA_REG_T1
, ctx
);
444 static void emit_zext_32_rd_t1(u8
*rd
, struct hppa_jit_context
*ctx
)
446 emit_hppa64_zext32(*rd
, HPPA_REG_T2
, ctx
);
448 emit_zext_32(HPPA_REG_T1
, ctx
);
451 static void emit_sext_32_rd(u8
*rd
, struct hppa_jit_context
*ctx
)
453 emit_hppa64_sext32(*rd
, HPPA_REG_T2
, ctx
);
457 static bool is_signed_bpf_cond(u8 cond
)
459 return cond
== BPF_JSGT
|| cond
== BPF_JSLT
||
460 cond
== BPF_JSGE
|| cond
== BPF_JSLE
;
463 static void emit_call(u64 addr
, bool fixed
, struct hppa_jit_context
*ctx
)
465 const int offset_sp
= 2*FRAME_SIZE
;
467 emit(hppa_ldo(offset_sp
, HPPA_REG_SP
, HPPA_REG_SP
), ctx
);
469 emit_hppa_copy(regmap
[BPF_REG_1
], HPPA_REG_ARG0
, ctx
);
470 emit_hppa_copy(regmap
[BPF_REG_2
], HPPA_REG_ARG1
, ctx
);
471 emit_hppa_copy(regmap
[BPF_REG_3
], HPPA_REG_ARG2
, ctx
);
472 emit_hppa_copy(regmap
[BPF_REG_4
], HPPA_REG_ARG3
, ctx
);
473 emit_hppa_copy(regmap
[BPF_REG_5
], HPPA_REG_ARG4
, ctx
);
476 REG_FORCE_SEEN(ctx
, HPPA_REG_TCC_SAVED
);
477 if (REG_WAS_SEEN(ctx
, HPPA_REG_TCC
))
478 emit(hppa_copy(HPPA_REG_TCC
, HPPA_REG_TCC_SAVED
), ctx
);
481 * Use ldil() to load absolute address. Don't use emit_imm as the
482 * number of emitted instructions should not depend on the value of
486 /* load function address and gp from Elf64_Fdesc descriptor */
487 emit(hppa_ldil(addr
, HPPA_REG_R31
), ctx
);
488 emit(hppa_ldo(im11(addr
), HPPA_REG_R31
, HPPA_REG_R31
), ctx
);
489 emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc
, addr
),
490 HPPA_REG_R31
, HPPA_REG_RP
), ctx
);
491 emit(hppa64_bve_l_rp(HPPA_REG_RP
), ctx
);
492 emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc
, gp
),
493 HPPA_REG_R31
, HPPA_REG_GP
), ctx
);
496 if (REG_WAS_SEEN(ctx
, HPPA_REG_TCC
))
497 emit(hppa_copy(HPPA_REG_TCC_SAVED
, HPPA_REG_TCC
), ctx
);
499 emit(hppa_ldo(-offset_sp
, HPPA_REG_SP
, HPPA_REG_SP
), ctx
);
501 /* Set return value. */
502 emit_hppa_copy(HPPA_REG_RET0
, regmap
[BPF_REG_0
], ctx
);
505 static void emit_call_libgcc_ll(void *func
, const s8 arg0
,
506 const s8 arg1
, u8 opcode
, struct hppa_jit_context
*ctx
)
510 if (BPF_CLASS(opcode
) == BPF_ALU
) {
511 emit_hppa64_zext32(arg0
, HPPA_REG_ARG0
, ctx
);
512 emit_hppa64_zext32(arg1
, HPPA_REG_ARG1
, ctx
);
514 emit_hppa_copy(arg0
, HPPA_REG_ARG0
, ctx
);
515 emit_hppa_copy(arg1
, HPPA_REG_ARG1
, ctx
);
518 /* libcgcc overwrites HPPA_REG_RET0, so keep copy in HPPA_REG_TCC_SAVED */
519 if (arg0
!= HPPA_REG_RET0
) {
520 REG_SET_SEEN(ctx
, HPPA_REG_TCC_SAVED
);
521 emit(hppa_copy(HPPA_REG_RET0
, HPPA_REG_TCC_SAVED
), ctx
);
525 emit(hppa_ldo(FRAME_SIZE
, HPPA_REG_SP
, HPPA_REG_SP
), ctx
);
527 func_addr
= (uintptr_t) func
;
528 /* load function func_address and gp from Elf64_Fdesc descriptor */
529 emit_imm(HPPA_REG_R31
, func_addr
, arg0
, ctx
);
530 emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc
, addr
),
531 HPPA_REG_R31
, HPPA_REG_RP
), ctx
);
532 /* skip the following bve_l instruction if divisor is 0. */
533 if (BPF_OP(opcode
) == BPF_DIV
|| BPF_OP(opcode
) == BPF_MOD
) {
534 if (BPF_OP(opcode
) == BPF_DIV
)
535 emit_hppa_copy(HPPA_REG_ZERO
, HPPA_REG_RET0
, ctx
);
537 emit_hppa_copy(HPPA_REG_ARG0
, HPPA_REG_RET0
, ctx
);
539 emit(hppa_beq(HPPA_REG_ARG1
, HPPA_REG_ZERO
, 2 - HPPA_BRANCH_DISPLACEMENT
), ctx
);
541 emit(hppa64_bve_l_rp(HPPA_REG_RP
), ctx
);
542 emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc
, gp
),
543 HPPA_REG_R31
, HPPA_REG_GP
), ctx
);
545 emit(hppa_ldo(-FRAME_SIZE
, HPPA_REG_SP
, HPPA_REG_SP
), ctx
);
547 emit_hppa_copy(HPPA_REG_RET0
, arg0
, ctx
);
549 /* restore HPPA_REG_RET0 */
550 if (arg0
!= HPPA_REG_RET0
)
551 emit(hppa_copy(HPPA_REG_TCC_SAVED
, HPPA_REG_RET0
), ctx
);
554 static void emit_store(const s8 rd
, const s8 rs
, s16 off
,
555 struct hppa_jit_context
*ctx
, const u8 size
,
560 /* need to calculate address since offset does not fit in 14 bits? */
561 if (relative_bits_ok(off
, 14))
564 /* need to use R1 here, since addil puts result into R1 */
565 dstreg
= HPPA_REG_R1
;
566 emit(hppa_addil(off
, rd
), ctx
);
572 emit(hppa_stb(rs
, off
, dstreg
), ctx
);
575 emit(hppa_sth(rs
, off
, dstreg
), ctx
);
578 emit(hppa_stw(rs
, off
, dstreg
), ctx
);
582 emit(hppa_ldo(off
, dstreg
, HPPA_REG_R1
), ctx
);
583 emit(hppa64_std_im5(rs
, 0, HPPA_REG_R1
), ctx
);
584 } else if (off
>= -16 && off
<= 15)
585 emit(hppa64_std_im5(rs
, off
, dstreg
), ctx
);
587 emit(hppa64_std_im16(rs
, off
, dstreg
), ctx
);
592 int bpf_jit_emit_insn(const struct bpf_insn
*insn
, struct hppa_jit_context
*ctx
,
595 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
||
596 BPF_CLASS(insn
->code
) == BPF_JMP
;
597 int s
, e
, ret
, i
= insn
- ctx
->prog
->insnsi
;
599 struct bpf_prog_aux
*aux
= ctx
->prog
->aux
;
600 u8 rd
= -1, rs
= -1, code
= insn
->code
;
604 init_regs(&rd
, &rs
, insn
, ctx
);
608 case BPF_ALU
| BPF_MOV
| BPF_X
:
609 case BPF_ALU64
| BPF_MOV
| BPF_X
:
611 /* Special mov32 for zext */
612 emit_zext_32(rd
, ctx
);
615 if (!is64
&& !aux
->verifier_zext
)
616 emit_hppa64_zext32(rs
, rd
, ctx
);
618 emit_hppa_copy(rs
, rd
, ctx
);
621 /* dst = dst OP src */
622 case BPF_ALU
| BPF_ADD
| BPF_X
:
623 case BPF_ALU64
| BPF_ADD
| BPF_X
:
624 emit(hppa_add(rd
, rs
, rd
), ctx
);
625 if (!is64
&& !aux
->verifier_zext
)
626 emit_zext_32(rd
, ctx
);
628 case BPF_ALU
| BPF_SUB
| BPF_X
:
629 case BPF_ALU64
| BPF_SUB
| BPF_X
:
630 emit(hppa_sub(rd
, rs
, rd
), ctx
);
631 if (!is64
&& !aux
->verifier_zext
)
632 emit_zext_32(rd
, ctx
);
634 case BPF_ALU
| BPF_AND
| BPF_X
:
635 case BPF_ALU64
| BPF_AND
| BPF_X
:
636 emit(hppa_and(rd
, rs
, rd
), ctx
);
637 if (!is64
&& !aux
->verifier_zext
)
638 emit_zext_32(rd
, ctx
);
640 case BPF_ALU
| BPF_OR
| BPF_X
:
641 case BPF_ALU64
| BPF_OR
| BPF_X
:
642 emit(hppa_or(rd
, rs
, rd
), ctx
);
643 if (!is64
&& !aux
->verifier_zext
)
644 emit_zext_32(rd
, ctx
);
646 case BPF_ALU
| BPF_XOR
| BPF_X
:
647 case BPF_ALU64
| BPF_XOR
| BPF_X
:
648 emit(hppa_xor(rd
, rs
, rd
), ctx
);
649 if (!is64
&& !aux
->verifier_zext
&& rs
!= rd
)
650 emit_zext_32(rd
, ctx
);
652 case BPF_ALU
| BPF_MUL
| BPF_K
:
653 case BPF_ALU64
| BPF_MUL
| BPF_K
:
654 emit_imm(HPPA_REG_T1
, is64
? (s64
)(s32
)imm
: (u32
)imm
, HPPA_REG_T2
, ctx
);
657 case BPF_ALU
| BPF_MUL
| BPF_X
:
658 case BPF_ALU64
| BPF_MUL
| BPF_X
:
659 emit_call_libgcc_ll(__muldi3
, rd
, rs
, code
, ctx
);
660 if (!is64
&& !aux
->verifier_zext
)
661 emit_zext_32(rd
, ctx
);
663 case BPF_ALU
| BPF_DIV
| BPF_K
:
664 case BPF_ALU64
| BPF_DIV
| BPF_K
:
665 emit_imm(HPPA_REG_T1
, is64
? (s64
)(s32
)imm
: (u32
)imm
, HPPA_REG_T2
, ctx
);
668 case BPF_ALU
| BPF_DIV
| BPF_X
:
669 case BPF_ALU64
| BPF_DIV
| BPF_X
:
670 emit_call_libgcc_ll(&hppa_div64
, rd
, rs
, code
, ctx
);
671 if (!is64
&& !aux
->verifier_zext
)
672 emit_zext_32(rd
, ctx
);
674 case BPF_ALU
| BPF_MOD
| BPF_K
:
675 case BPF_ALU64
| BPF_MOD
| BPF_K
:
676 emit_imm(HPPA_REG_T1
, is64
? (s64
)(s32
)imm
: (u32
)imm
, HPPA_REG_T2
, ctx
);
679 case BPF_ALU
| BPF_MOD
| BPF_X
:
680 case BPF_ALU64
| BPF_MOD
| BPF_X
:
681 emit_call_libgcc_ll(&hppa_div64_rem
, rd
, rs
, code
, ctx
);
682 if (!is64
&& !aux
->verifier_zext
)
683 emit_zext_32(rd
, ctx
);
686 case BPF_ALU
| BPF_LSH
| BPF_X
:
687 case BPF_ALU64
| BPF_LSH
| BPF_X
:
688 emit_hppa64_sext32(rs
, HPPA_REG_T0
, ctx
);
689 emit(hppa64_mtsarcm(HPPA_REG_T0
), ctx
);
691 emit(hppa64_depdz_sar(rd
, rd
), ctx
);
693 emit(hppa_depwz_sar(rd
, rd
), ctx
);
694 if (!is64
&& !aux
->verifier_zext
)
695 emit_zext_32(rd
, ctx
);
697 case BPF_ALU
| BPF_RSH
| BPF_X
:
698 case BPF_ALU64
| BPF_RSH
| BPF_X
:
699 emit(hppa_mtsar(rs
), ctx
);
701 emit(hppa64_shrpd_sar(rd
, rd
), ctx
);
703 emit(hppa_shrpw_sar(rd
, rd
), ctx
);
704 if (!is64
&& !aux
->verifier_zext
)
705 emit_zext_32(rd
, ctx
);
707 case BPF_ALU
| BPF_ARSH
| BPF_X
:
708 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
709 emit_hppa64_sext32(rs
, HPPA_REG_T0
, ctx
);
710 emit(hppa64_mtsarcm(HPPA_REG_T0
), ctx
);
712 emit(hppa_extrd_sar(rd
, rd
, 1), ctx
);
714 emit(hppa_extrws_sar(rd
, rd
), ctx
);
715 if (!is64
&& !aux
->verifier_zext
)
716 emit_zext_32(rd
, ctx
);
720 case BPF_ALU
| BPF_NEG
:
721 case BPF_ALU64
| BPF_NEG
:
722 emit(hppa_sub(HPPA_REG_ZERO
, rd
, rd
), ctx
);
723 if (!is64
&& !aux
->verifier_zext
)
724 emit_zext_32(rd
, ctx
);
727 /* dst = BSWAP##imm(dst) */
728 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
731 /* zero-extend 16 bits into 64 bits */
732 emit_hppa64_depd(HPPA_REG_ZERO
, 63-16, 64-16, rd
, 1, ctx
);
735 if (!aux
->verifier_zext
)
736 emit_zext_32(rd
, ctx
);
744 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
747 emit(hppa_extru(rd
, 31 - 8, 8, HPPA_REG_T1
), ctx
);
748 emit(hppa_depwz(rd
, 23, 8, HPPA_REG_T1
), ctx
);
749 emit(hppa_extru(HPPA_REG_T1
, 31, 16, rd
), ctx
);
750 emit_hppa64_extrd(HPPA_REG_T1
, 63, 16, rd
, 0, ctx
);
753 emit(hppa_shrpw(rd
, rd
, 16, HPPA_REG_T1
), ctx
);
754 emit_hppa64_depd(HPPA_REG_T1
, 63-16, 8, HPPA_REG_T1
, 1, ctx
);
755 emit(hppa_shrpw(rd
, HPPA_REG_T1
, 8, HPPA_REG_T1
), ctx
);
756 emit_hppa64_extrd(HPPA_REG_T1
, 63, 32, rd
, 0, ctx
);
759 emit(hppa64_permh_3210(rd
, HPPA_REG_T1
), ctx
);
760 emit(hppa64_hshl(HPPA_REG_T1
, 8, HPPA_REG_T2
), ctx
);
761 emit(hppa64_hshr_u(HPPA_REG_T1
, 8, HPPA_REG_T1
), ctx
);
762 emit(hppa_or(HPPA_REG_T2
, HPPA_REG_T1
, rd
), ctx
);
765 pr_err("bpf-jit: BPF_END imm %d invalid\n", imm
);
771 case BPF_ALU
| BPF_MOV
| BPF_K
:
772 case BPF_ALU64
| BPF_MOV
| BPF_K
:
773 emit_imm(rd
, imm
, HPPA_REG_T2
, ctx
);
774 if (!is64
&& !aux
->verifier_zext
)
775 emit_zext_32(rd
, ctx
);
778 /* dst = dst OP imm */
779 case BPF_ALU
| BPF_ADD
| BPF_K
:
780 case BPF_ALU64
| BPF_ADD
| BPF_K
:
781 if (relative_bits_ok(imm
, 14)) {
782 emit(hppa_ldo(imm
, rd
, rd
), ctx
);
784 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
785 emit(hppa_add(rd
, HPPA_REG_T1
, rd
), ctx
);
787 if (!is64
&& !aux
->verifier_zext
)
788 emit_zext_32(rd
, ctx
);
790 case BPF_ALU
| BPF_SUB
| BPF_K
:
791 case BPF_ALU64
| BPF_SUB
| BPF_K
:
792 if (relative_bits_ok(-imm
, 14)) {
793 emit(hppa_ldo(-imm
, rd
, rd
), ctx
);
795 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
796 emit(hppa_sub(rd
, HPPA_REG_T1
, rd
), ctx
);
798 if (!is64
&& !aux
->verifier_zext
)
799 emit_zext_32(rd
, ctx
);
801 case BPF_ALU
| BPF_AND
| BPF_K
:
802 case BPF_ALU64
| BPF_AND
| BPF_K
:
803 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
804 emit(hppa_and(rd
, HPPA_REG_T1
, rd
), ctx
);
805 if (!is64
&& !aux
->verifier_zext
)
806 emit_zext_32(rd
, ctx
);
808 case BPF_ALU
| BPF_OR
| BPF_K
:
809 case BPF_ALU64
| BPF_OR
| BPF_K
:
810 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
811 emit(hppa_or(rd
, HPPA_REG_T1
, rd
), ctx
);
812 if (!is64
&& !aux
->verifier_zext
)
813 emit_zext_32(rd
, ctx
);
815 case BPF_ALU
| BPF_XOR
| BPF_K
:
816 case BPF_ALU64
| BPF_XOR
| BPF_K
:
817 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
818 emit(hppa_xor(rd
, HPPA_REG_T1
, rd
), ctx
);
819 if (!is64
&& !aux
->verifier_zext
)
820 emit_zext_32(rd
, ctx
);
822 case BPF_ALU
| BPF_LSH
| BPF_K
:
823 case BPF_ALU64
| BPF_LSH
| BPF_K
:
825 emit_hppa64_shld(rd
, imm
, rd
, ctx
);
828 if (!is64
&& !aux
->verifier_zext
)
829 emit_zext_32(rd
, ctx
);
831 case BPF_ALU
| BPF_RSH
| BPF_K
:
832 case BPF_ALU64
| BPF_RSH
| BPF_K
:
835 emit_hppa64_shrd(rd
, imm
, rd
, false, ctx
);
837 emit_hppa64_shrw(rd
, imm
, rd
, false, ctx
);
840 if (!is64
&& !aux
->verifier_zext
)
841 emit_zext_32(rd
, ctx
);
843 case BPF_ALU
| BPF_ARSH
| BPF_K
:
844 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
847 emit_hppa64_shrd(rd
, imm
, rd
, true, ctx
);
849 emit_hppa64_shrw(rd
, imm
, rd
, true, ctx
);
852 if (!is64
&& !aux
->verifier_zext
)
853 emit_zext_32(rd
, ctx
);
857 case BPF_JMP
| BPF_JA
:
858 paoff
= hppa_offset(i
, off
, ctx
);
859 ret
= emit_jump(paoff
, false, ctx
);
864 /* IF (dst COND src) JUMP off */
865 case BPF_JMP
| BPF_JEQ
| BPF_X
:
866 case BPF_JMP32
| BPF_JEQ
| BPF_X
:
867 case BPF_JMP
| BPF_JGT
| BPF_X
:
868 case BPF_JMP32
| BPF_JGT
| BPF_X
:
869 case BPF_JMP
| BPF_JLT
| BPF_X
:
870 case BPF_JMP32
| BPF_JLT
| BPF_X
:
871 case BPF_JMP
| BPF_JGE
| BPF_X
:
872 case BPF_JMP32
| BPF_JGE
| BPF_X
:
873 case BPF_JMP
| BPF_JLE
| BPF_X
:
874 case BPF_JMP32
| BPF_JLE
| BPF_X
:
875 case BPF_JMP
| BPF_JNE
| BPF_X
:
876 case BPF_JMP32
| BPF_JNE
| BPF_X
:
877 case BPF_JMP
| BPF_JSGT
| BPF_X
:
878 case BPF_JMP32
| BPF_JSGT
| BPF_X
:
879 case BPF_JMP
| BPF_JSLT
| BPF_X
:
880 case BPF_JMP32
| BPF_JSLT
| BPF_X
:
881 case BPF_JMP
| BPF_JSGE
| BPF_X
:
882 case BPF_JMP32
| BPF_JSGE
| BPF_X
:
883 case BPF_JMP
| BPF_JSLE
| BPF_X
:
884 case BPF_JMP32
| BPF_JSLE
| BPF_X
:
885 case BPF_JMP
| BPF_JSET
| BPF_X
:
886 case BPF_JMP32
| BPF_JSET
| BPF_X
:
887 paoff
= hppa_offset(i
, off
, ctx
);
890 if (is_signed_bpf_cond(BPF_OP(code
)))
891 emit_sext_32_rd_rs(&rd
, &rs
, ctx
);
893 emit_zext_32_rd_rs(&rd
, &rs
, ctx
);
896 /* Adjust for extra insns */
899 if (BPF_OP(code
) == BPF_JSET
) {
902 emit(hppa_and(rs
, rd
, HPPA_REG_T1
), ctx
);
903 emit_branch(BPF_JNE
, HPPA_REG_T1
, HPPA_REG_ZERO
, paoff
,
906 emit_branch(BPF_OP(code
), rd
, rs
, paoff
, ctx
);
910 /* IF (dst COND imm) JUMP off */
911 case BPF_JMP
| BPF_JEQ
| BPF_K
:
912 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
913 case BPF_JMP
| BPF_JGT
| BPF_K
:
914 case BPF_JMP32
| BPF_JGT
| BPF_K
:
915 case BPF_JMP
| BPF_JLT
| BPF_K
:
916 case BPF_JMP32
| BPF_JLT
| BPF_K
:
917 case BPF_JMP
| BPF_JGE
| BPF_K
:
918 case BPF_JMP32
| BPF_JGE
| BPF_K
:
919 case BPF_JMP
| BPF_JLE
| BPF_K
:
920 case BPF_JMP32
| BPF_JLE
| BPF_K
:
921 case BPF_JMP
| BPF_JNE
| BPF_K
:
922 case BPF_JMP32
| BPF_JNE
| BPF_K
:
923 case BPF_JMP
| BPF_JSGT
| BPF_K
:
924 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
925 case BPF_JMP
| BPF_JSLT
| BPF_K
:
926 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
927 case BPF_JMP
| BPF_JSGE
| BPF_K
:
928 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
929 case BPF_JMP
| BPF_JSLE
| BPF_K
:
930 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
931 paoff
= hppa_offset(i
, off
, ctx
);
934 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
940 if (is_signed_bpf_cond(BPF_OP(code
)))
941 emit_sext_32_rd(&rd
, ctx
);
943 emit_zext_32_rd_t1(&rd
, ctx
);
947 /* Adjust for extra insns */
949 emit_branch(BPF_OP(code
), rd
, rs
, paoff
, ctx
);
951 case BPF_JMP
| BPF_JSET
| BPF_K
:
952 case BPF_JMP32
| BPF_JSET
| BPF_K
:
953 paoff
= hppa_offset(i
, off
, ctx
);
955 emit_imm(HPPA_REG_T1
, imm
, HPPA_REG_T2
, ctx
);
956 emit(hppa_and(HPPA_REG_T1
, rd
, HPPA_REG_T1
), ctx
);
957 /* For jset32, we should clear the upper 32 bits of t1, but
958 * sign-extension is sufficient here and saves one instruction,
959 * as t1 is used only in comparison against zero.
961 if (!is64
&& imm
< 0)
962 emit_hppa64_sext32(HPPA_REG_T1
, HPPA_REG_T1
, ctx
);
965 emit_branch(BPF_JNE
, HPPA_REG_T1
, HPPA_REG_ZERO
, paoff
, ctx
);
968 case BPF_JMP
| BPF_CALL
:
973 ret
= bpf_jit_get_func_addr(ctx
->prog
, insn
, extra_pass
,
978 REG_SET_SEEN_ALL(ctx
);
979 emit_call(addr
, fixed_addr
, ctx
);
983 case BPF_JMP
| BPF_TAIL_CALL
:
984 emit_bpf_tail_call(i
, ctx
);
987 /* function return */
988 case BPF_JMP
| BPF_EXIT
:
989 if (i
== ctx
->prog
->len
- 1)
992 paoff
= epilogue_offset(ctx
);
993 ret
= emit_jump(paoff
, false, ctx
);
999 case BPF_LD
| BPF_IMM
| BPF_DW
:
1001 struct bpf_insn insn1
= insn
[1];
1002 u64 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
1003 if (bpf_pseudo_func(insn
))
1004 imm64
= (uintptr_t)dereference_function_descriptor((void*)imm64
);
1005 emit_imm(rd
, imm64
, HPPA_REG_T2
, ctx
);
1010 /* LDX: dst = *(size *)(src + off) */
1011 case BPF_LDX
| BPF_MEM
| BPF_B
:
1012 case BPF_LDX
| BPF_MEM
| BPF_H
:
1013 case BPF_LDX
| BPF_MEM
| BPF_W
:
1014 case BPF_LDX
| BPF_MEM
| BPF_DW
:
1015 case BPF_LDX
| BPF_PROBE_MEM
| BPF_B
:
1016 case BPF_LDX
| BPF_PROBE_MEM
| BPF_H
:
1017 case BPF_LDX
| BPF_PROBE_MEM
| BPF_W
:
1018 case BPF_LDX
| BPF_PROBE_MEM
| BPF_DW
:
1022 /* need to calculate address since offset does not fit in 14 bits? */
1023 if (relative_bits_ok(off
, 14))
1026 /* need to use R1 here, since addil puts result into R1 */
1027 srcreg
= HPPA_REG_R1
;
1028 BUG_ON(rs
== HPPA_REG_R1
);
1029 BUG_ON(rd
== HPPA_REG_R1
);
1030 emit(hppa_addil(off
, rs
), ctx
);
1034 switch (BPF_SIZE(code
)) {
1036 emit(hppa_ldb(off
, srcreg
, rd
), ctx
);
1037 if (insn_is_zext(&insn
[1]))
1041 emit(hppa_ldh(off
, srcreg
, rd
), ctx
);
1042 if (insn_is_zext(&insn
[1]))
1046 emit(hppa_ldw(off
, srcreg
, rd
), ctx
);
1047 if (insn_is_zext(&insn
[1]))
1052 emit(hppa_ldo(off
, srcreg
, HPPA_REG_R1
), ctx
);
1053 emit(hppa64_ldd_reg(HPPA_REG_ZERO
, HPPA_REG_R1
, rd
), ctx
);
1054 } else if (off
>= -16 && off
<= 15)
1055 emit(hppa64_ldd_im5(off
, srcreg
, rd
), ctx
);
1057 emit(hppa64_ldd_im16(off
, srcreg
, rd
), ctx
);
1062 /* speculation barrier */
1063 case BPF_ST
| BPF_NOSPEC
:
1066 /* ST: *(size *)(dst + off) = imm */
1067 /* STX: *(size *)(dst + off) = src */
1068 case BPF_ST
| BPF_MEM
| BPF_B
:
1069 case BPF_ST
| BPF_MEM
| BPF_H
:
1070 case BPF_ST
| BPF_MEM
| BPF_W
:
1071 case BPF_ST
| BPF_MEM
| BPF_DW
:
1073 case BPF_STX
| BPF_MEM
| BPF_B
:
1074 case BPF_STX
| BPF_MEM
| BPF_H
:
1075 case BPF_STX
| BPF_MEM
| BPF_W
:
1076 case BPF_STX
| BPF_MEM
| BPF_DW
:
1077 if (BPF_CLASS(code
) == BPF_ST
) {
1078 emit_imm(HPPA_REG_T2
, imm
, HPPA_REG_T1
, ctx
);
1082 emit_store(rd
, rs
, off
, ctx
, BPF_SIZE(code
), BPF_MODE(code
));
1085 case BPF_STX
| BPF_ATOMIC
| BPF_W
:
1086 case BPF_STX
| BPF_ATOMIC
| BPF_DW
:
1088 "bpf-jit: not supported: atomic operation %02x ***\n",
1093 pr_err("bpf-jit: unknown opcode %02x\n", code
);
1100 void bpf_jit_build_prologue(struct hppa_jit_context
*ctx
)
1102 int bpf_stack_adjust
, stack_adjust
, i
;
1107 * stack on hppa grows up, so if tail calls are used we need to
1108 * allocate the maximum stack size
1110 if (REG_ALL_SEEN(ctx
))
1111 bpf_stack_adjust
= MAX_BPF_STACK
;
1113 bpf_stack_adjust
= ctx
->prog
->aux
->stack_depth
;
1114 bpf_stack_adjust
= round_up(bpf_stack_adjust
, STACK_ALIGN
);
1116 stack_adjust
= FRAME_SIZE
+ bpf_stack_adjust
;
1117 stack_adjust
= round_up(stack_adjust
, STACK_ALIGN
);
1120 * NOTE: We construct an Elf64_Fdesc descriptor here.
1121 * The first 4 words initialize the TCC and compares them.
1122 * Then follows the virtual address of the eBPF function,
1123 * and the gp for this function.
1125 * The first instruction sets the tail-call-counter (TCC) register.
1126 * This instruction is skipped by tail calls.
1127 * Use a temporary register instead of a caller-saved register initially.
1129 REG_FORCE_SEEN(ctx
, HPPA_REG_TCC_IN_INIT
);
1130 emit(hppa_ldi(MAX_TAIL_CALL_CNT
, HPPA_REG_TCC_IN_INIT
), ctx
);
1133 * Skip all initializations when called as BPF TAIL call.
1135 emit(hppa_ldi(MAX_TAIL_CALL_CNT
, HPPA_REG_R1
), ctx
);
1136 emit(hppa_beq(HPPA_REG_TCC_IN_INIT
, HPPA_REG_R1
, 6 - HPPA_BRANCH_DISPLACEMENT
), ctx
);
1137 emit(hppa64_bl_long(ctx
->prologue_len
- 3 - HPPA_BRANCH_DISPLACEMENT
), ctx
);
1139 /* store entry address of this eBPF function */
1140 addr
= (uintptr_t) &ctx
->insns
[0];
1141 emit(addr
>> 32, ctx
);
1142 emit(addr
& 0xffffffff, ctx
);
1144 /* store gp of this eBPF function */
1145 asm("copy %%r27,%0" : "=r" (addr
) );
1146 emit(addr
>> 32, ctx
);
1147 emit(addr
& 0xffffffff, ctx
);
1149 /* Set up hppa stack frame. */
1150 emit_hppa_copy(HPPA_REG_SP
, HPPA_REG_R1
, ctx
);
1151 emit(hppa_ldo(stack_adjust
, HPPA_REG_SP
, HPPA_REG_SP
), ctx
);
1152 emit(hppa64_std_im5 (HPPA_REG_R1
, -REG_SIZE
, HPPA_REG_SP
), ctx
);
1153 emit(hppa64_std_im16(HPPA_REG_RP
, -2*REG_SIZE
, HPPA_REG_SP
), ctx
);
1155 /* Save callee-save registers. */
1156 for (i
= 3; i
<= 15; i
++) {
1157 if (OPTIMIZE_HPPA
&& !REG_WAS_SEEN(ctx
, HPPA_R(i
)))
1159 emit(hppa64_std_im16(HPPA_R(i
), -REG_SIZE
* i
, HPPA_REG_SP
), ctx
);
1162 /* load function parameters; load all if we use tail functions */
1163 #define LOAD_PARAM(arg, dst) \
1164 if (REG_WAS_SEEN(ctx, regmap[dst]) || \
1165 REG_WAS_SEEN(ctx, HPPA_REG_TCC)) \
1166 emit_hppa_copy(arg, regmap[dst], ctx)
1167 LOAD_PARAM(HPPA_REG_ARG0
, BPF_REG_1
);
1168 LOAD_PARAM(HPPA_REG_ARG1
, BPF_REG_2
);
1169 LOAD_PARAM(HPPA_REG_ARG2
, BPF_REG_3
);
1170 LOAD_PARAM(HPPA_REG_ARG3
, BPF_REG_4
);
1171 LOAD_PARAM(HPPA_REG_ARG4
, BPF_REG_5
);
1174 REG_FORCE_SEEN(ctx
, HPPA_REG_T0
);
1175 REG_FORCE_SEEN(ctx
, HPPA_REG_T1
);
1176 REG_FORCE_SEEN(ctx
, HPPA_REG_T2
);
1179 * Now really set the tail call counter (TCC) register.
1181 if (REG_WAS_SEEN(ctx
, HPPA_REG_TCC
))
1182 emit(hppa_ldi(MAX_TAIL_CALL_CNT
, HPPA_REG_TCC
), ctx
);
1185 * Save epilogue function pointer for outer TCC call chain.
1186 * The main TCC call stores the final RP on stack.
1188 addr
= (uintptr_t) &ctx
->insns
[ctx
->epilogue_offset
];
1189 /* skip first two instructions which jump to exit */
1190 addr
+= 2 * HPPA_INSN_SIZE
;
1191 emit_imm(HPPA_REG_T2
, addr
, HPPA_REG_T1
, ctx
);
1192 emit(EXIT_PTR_STORE(HPPA_REG_T2
), ctx
);
1194 /* Set up BPF frame pointer. */
1195 reg
= regmap
[BPF_REG_FP
]; /* -> HPPA_REG_FP */
1196 if (REG_WAS_SEEN(ctx
, reg
)) {
1197 emit(hppa_ldo(-FRAME_SIZE
, HPPA_REG_SP
, reg
), ctx
);
1201 void bpf_jit_build_epilogue(struct hppa_jit_context
*ctx
)
1203 __build_epilogue(false, ctx
);
1206 bool bpf_jit_supports_kfunc_call(void)