1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/moduleloader.h>
3 #include <linux/workqueue.h>
4 #include <linux/netdevice.h>
5 #include <linux/filter.h>
7 #include <linux/cache.h>
8 #include <linux/if_vlan.h>
10 #include <asm/cacheflush.h>
11 #include <asm/ptrace.h>
13 #include "bpf_jit_64.h"
15 static inline bool is_simm13(unsigned int value
)
17 return value
+ 0x1000 < 0x2000;
20 static inline bool is_simm10(unsigned int value
)
22 return value
+ 0x200 < 0x400;
25 static inline bool is_simm5(unsigned int value
)
27 return value
+ 0x10 < 0x20;
30 static inline bool is_sethi(unsigned int value
)
32 return (value
& ~0x3fffff) == 0;
35 static void bpf_flush_icache(void *start_
, void *end_
)
37 /* Cheetah's I-cache is fully coherent. */
38 if (tlb_type
== spitfire
) {
39 unsigned long start
= (unsigned long) start_
;
40 unsigned long end
= (unsigned long) end_
;
43 end
= (end
+ 7UL) & ~7UL;
51 #define S13(X) ((X) & 0x1fff)
52 #define S5(X) ((X) & 0x1f)
53 #define IMMED 0x00002000
54 #define RD(X) ((X) << 25)
55 #define RS1(X) ((X) << 14)
57 #define OP(X) ((X) << 30)
58 #define OP2(X) ((X) << 22)
59 #define OP3(X) ((X) << 19)
60 #define COND(X) (((X) & 0xf) << 25)
61 #define CBCOND(X) (((X) & 0x1f) << 25)
63 #define F2(X, Y) (OP(X) | OP2(Y))
64 #define F3(X, Y) (OP(X) | OP3(Y))
65 #define ASI(X) (((X) & 0xff) << 5)
67 #define CONDN COND(0x0)
68 #define CONDE COND(0x1)
69 #define CONDLE COND(0x2)
70 #define CONDL COND(0x3)
71 #define CONDLEU COND(0x4)
72 #define CONDCS COND(0x5)
73 #define CONDNEG COND(0x6)
74 #define CONDVC COND(0x7)
75 #define CONDA COND(0x8)
76 #define CONDNE COND(0x9)
77 #define CONDG COND(0xa)
78 #define CONDGE COND(0xb)
79 #define CONDGU COND(0xc)
80 #define CONDCC COND(0xd)
81 #define CONDPOS COND(0xe)
82 #define CONDVS COND(0xf)
84 #define CONDGEU CONDCC
87 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
88 #define WDISP19(X) (((X) >> 2) & 0x7ffff)
90 /* The 10-bit branch displacement for CBCOND is split into two fields */
91 static u32
WDISP10(u32 off
)
93 u32 ret
= ((off
>> 2) & 0xff) << 5;
95 ret
|= ((off
>> (2 + 8)) & 0x03) << 19;
100 #define CBCONDE CBCOND(0x09)
101 #define CBCONDLE CBCOND(0x0a)
102 #define CBCONDL CBCOND(0x0b)
103 #define CBCONDLEU CBCOND(0x0c)
104 #define CBCONDCS CBCOND(0x0d)
105 #define CBCONDN CBCOND(0x0e)
106 #define CBCONDVS CBCOND(0x0f)
107 #define CBCONDNE CBCOND(0x19)
108 #define CBCONDG CBCOND(0x1a)
109 #define CBCONDGE CBCOND(0x1b)
110 #define CBCONDGU CBCOND(0x1c)
111 #define CBCONDCC CBCOND(0x1d)
112 #define CBCONDPOS CBCOND(0x1e)
113 #define CBCONDVC CBCOND(0x1f)
115 #define CBCONDGEU CBCONDCC
116 #define CBCONDLU CBCONDCS
118 #define ANNUL (1 << 29)
119 #define XCC (1 << 21)
121 #define BRANCH (F2(0, 1) | XCC)
122 #define CBCOND_OP (F2(0, 3) | XCC)
124 #define BA (BRANCH | CONDA)
125 #define BG (BRANCH | CONDG)
126 #define BL (BRANCH | CONDL)
127 #define BLE (BRANCH | CONDLE)
128 #define BGU (BRANCH | CONDGU)
129 #define BLEU (BRANCH | CONDLEU)
130 #define BGE (BRANCH | CONDGE)
131 #define BGEU (BRANCH | CONDGEU)
132 #define BLU (BRANCH | CONDLU)
133 #define BE (BRANCH | CONDE)
134 #define BNE (BRANCH | CONDNE)
136 #define SETHI(K, REG) \
137 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
138 #define OR_LO(K, REG) \
139 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
141 #define ADD F3(2, 0x00)
142 #define AND F3(2, 0x01)
143 #define ANDCC F3(2, 0x11)
144 #define OR F3(2, 0x02)
145 #define XOR F3(2, 0x03)
146 #define SUB F3(2, 0x04)
147 #define SUBCC F3(2, 0x14)
148 #define MUL F3(2, 0x0a)
149 #define MULX F3(2, 0x09)
150 #define UDIVX F3(2, 0x0d)
151 #define DIV F3(2, 0x0e)
152 #define SLL F3(2, 0x25)
153 #define SLLX (F3(2, 0x25)|(1<<12))
154 #define SRA F3(2, 0x27)
155 #define SRAX (F3(2, 0x27)|(1<<12))
156 #define SRL F3(2, 0x26)
157 #define SRLX (F3(2, 0x26)|(1<<12))
158 #define JMPL F3(2, 0x38)
159 #define SAVE F3(2, 0x3c)
160 #define RESTORE F3(2, 0x3d)
162 #define BR F2(0, 0x01)
163 #define RD_Y F3(2, 0x28)
164 #define WR_Y F3(2, 0x30)
166 #define LD32 F3(3, 0x00)
167 #define LD8 F3(3, 0x01)
168 #define LD16 F3(3, 0x02)
169 #define LD64 F3(3, 0x0b)
170 #define LD64A F3(3, 0x1b)
171 #define ST8 F3(3, 0x05)
172 #define ST16 F3(3, 0x06)
173 #define ST32 F3(3, 0x04)
174 #define ST64 F3(3, 0x0e)
176 #define CAS F3(3, 0x3c)
177 #define CASX F3(3, 0x3e)
180 #define BASE_STACKFRAME 176
182 #define LD32I (LD32 | IMMED)
183 #define LD8I (LD8 | IMMED)
184 #define LD16I (LD16 | IMMED)
185 #define LD64I (LD64 | IMMED)
186 #define LDPTRI (LDPTR | IMMED)
187 #define ST32I (ST32 | IMMED)
190 struct bpf_prog
*prog
;
191 unsigned int *offset
;
197 bool saw_frame_pointer
;
203 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
204 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
205 #define TMP_REG_3 (MAX_BPF_JIT_REG + 2)
207 /* Map BPF registers to SPARC registers */
208 static const int bpf2sparc
[] = {
209 /* return value from in-kernel function, and exit value from eBPF */
212 /* arguments from eBPF program to in-kernel function */
219 /* callee saved registers that in-kernel function will preserve */
225 /* read-only frame pointer to access stack */
230 /* temporary register for internal BPF JIT */
236 static void emit(const u32 insn
, struct jit_ctx
*ctx
)
238 if (ctx
->image
!= NULL
)
239 ctx
->image
[ctx
->idx
] = insn
;
244 static void emit_call(u32
*func
, struct jit_ctx
*ctx
)
246 if (ctx
->image
!= NULL
) {
247 void *here
= &ctx
->image
[ctx
->idx
];
250 off
= (void *)func
- here
;
251 ctx
->image
[ctx
->idx
] = CALL
| ((off
>> 2) & 0x3fffffff);
256 static void emit_nop(struct jit_ctx
*ctx
)
258 emit(SETHI(0, G0
), ctx
);
261 static void emit_reg_move(u32 from
, u32 to
, struct jit_ctx
*ctx
)
263 emit(OR
| RS1(G0
) | RS2(from
) | RD(to
), ctx
);
266 /* Emit 32-bit constant, zero extended. */
267 static void emit_set_const(s32 K
, u32 reg
, struct jit_ctx
*ctx
)
269 emit(SETHI(K
, reg
), ctx
);
270 emit(OR_LO(K
, reg
), ctx
);
273 /* Emit 32-bit constant, sign extended. */
274 static void emit_set_const_sext(s32 K
, u32 reg
, struct jit_ctx
*ctx
)
277 emit(SETHI(K
, reg
), ctx
);
278 emit(OR_LO(K
, reg
), ctx
);
280 u32 hbits
= ~(u32
) K
;
281 u32 lbits
= -0x400 | (u32
) K
;
283 emit(SETHI(hbits
, reg
), ctx
);
284 emit(XOR
| IMMED
| RS1(reg
) | S13(lbits
) | RD(reg
), ctx
);
288 static void emit_alu(u32 opcode
, u32 src
, u32 dst
, struct jit_ctx
*ctx
)
290 emit(opcode
| RS1(dst
) | RS2(src
) | RD(dst
), ctx
);
293 static void emit_alu3(u32 opcode
, u32 a
, u32 b
, u32 c
, struct jit_ctx
*ctx
)
295 emit(opcode
| RS1(a
) | RS2(b
) | RD(c
), ctx
);
298 static void emit_alu_K(unsigned int opcode
, unsigned int dst
, unsigned int imm
,
301 bool small_immed
= is_simm13(imm
);
302 unsigned int insn
= opcode
;
304 insn
|= RS1(dst
) | RD(dst
);
306 emit(insn
| IMMED
| S13(imm
), ctx
);
308 unsigned int tmp
= bpf2sparc
[TMP_REG_1
];
310 ctx
->tmp_1_used
= true;
312 emit_set_const_sext(imm
, tmp
, ctx
);
313 emit(insn
| RS2(tmp
), ctx
);
317 static void emit_alu3_K(unsigned int opcode
, unsigned int src
, unsigned int imm
,
318 unsigned int dst
, struct jit_ctx
*ctx
)
320 bool small_immed
= is_simm13(imm
);
321 unsigned int insn
= opcode
;
323 insn
|= RS1(src
) | RD(dst
);
325 emit(insn
| IMMED
| S13(imm
), ctx
);
327 unsigned int tmp
= bpf2sparc
[TMP_REG_1
];
329 ctx
->tmp_1_used
= true;
331 emit_set_const_sext(imm
, tmp
, ctx
);
332 emit(insn
| RS2(tmp
), ctx
);
336 static void emit_loadimm32(s32 K
, unsigned int dest
, struct jit_ctx
*ctx
)
338 if (K
>= 0 && is_simm13(K
)) {
339 /* or %g0, K, DEST */
340 emit(OR
| IMMED
| RS1(G0
) | S13(K
) | RD(dest
), ctx
);
342 emit_set_const(K
, dest
, ctx
);
346 static void emit_loadimm(s32 K
, unsigned int dest
, struct jit_ctx
*ctx
)
349 /* or %g0, K, DEST */
350 emit(OR
| IMMED
| RS1(G0
) | S13(K
) | RD(dest
), ctx
);
352 emit_set_const(K
, dest
, ctx
);
356 static void emit_loadimm_sext(s32 K
, unsigned int dest
, struct jit_ctx
*ctx
)
359 /* or %g0, K, DEST */
360 emit(OR
| IMMED
| RS1(G0
) | S13(K
) | RD(dest
), ctx
);
362 emit_set_const_sext(K
, dest
, ctx
);
366 static void analyze_64bit_constant(u32 high_bits
, u32 low_bits
,
367 int *hbsp
, int *lbsp
, int *abbasp
)
369 int lowest_bit_set
, highest_bit_set
, all_bits_between_are_set
;
372 lowest_bit_set
= highest_bit_set
= -1;
375 if ((lowest_bit_set
== -1) && ((low_bits
>> i
) & 1))
377 if ((highest_bit_set
== -1) && ((high_bits
>> (32 - i
- 1)) & 1))
378 highest_bit_set
= (64 - i
- 1);
379 } while (++i
< 32 && (highest_bit_set
== -1 ||
380 lowest_bit_set
== -1));
384 if (lowest_bit_set
== -1 && ((high_bits
>> i
) & 1))
385 lowest_bit_set
= i
+ 32;
386 if (highest_bit_set
== -1 &&
387 ((low_bits
>> (32 - i
- 1)) & 1))
388 highest_bit_set
= 32 - i
- 1;
389 } while (++i
< 32 && (highest_bit_set
== -1 ||
390 lowest_bit_set
== -1));
393 all_bits_between_are_set
= 1;
394 for (i
= lowest_bit_set
; i
<= highest_bit_set
; i
++) {
396 if ((low_bits
& (1 << i
)) != 0)
399 if ((high_bits
& (1 << (i
- 32))) != 0)
402 all_bits_between_are_set
= 0;
405 *hbsp
= highest_bit_set
;
406 *lbsp
= lowest_bit_set
;
407 *abbasp
= all_bits_between_are_set
;
410 static unsigned long create_simple_focus_bits(unsigned long high_bits
,
411 unsigned long low_bits
,
412 int lowest_bit_set
, int shift
)
416 if (lowest_bit_set
< 32) {
417 lo
= (low_bits
>> lowest_bit_set
) << shift
;
418 hi
= ((high_bits
<< (32 - lowest_bit_set
)) << shift
);
421 hi
= ((high_bits
>> (lowest_bit_set
- 32)) << shift
);
426 static bool const64_is_2insns(unsigned long high_bits
,
427 unsigned long low_bits
)
429 int highest_bit_set
, lowest_bit_set
, all_bits_between_are_set
;
431 if (high_bits
== 0 || high_bits
== 0xffffffff)
434 analyze_64bit_constant(high_bits
, low_bits
,
435 &highest_bit_set
, &lowest_bit_set
,
436 &all_bits_between_are_set
);
438 if ((highest_bit_set
== 63 || lowest_bit_set
== 0) &&
439 all_bits_between_are_set
!= 0)
442 if (highest_bit_set
- lowest_bit_set
< 21)
448 static void sparc_emit_set_const64_quick2(unsigned long high_bits
,
449 unsigned long low_imm
,
451 int shift_count
, struct jit_ctx
*ctx
)
453 emit_loadimm32(high_bits
, dest
, ctx
);
455 /* Now shift it up into place. */
456 emit_alu_K(SLLX
, dest
, shift_count
, ctx
);
458 /* If there is a low immediate part piece, finish up by
459 * putting that in as well.
462 emit(OR
| IMMED
| RS1(dest
) | S13(low_imm
) | RD(dest
), ctx
);
465 static void emit_loadimm64(u64 K
, unsigned int dest
, struct jit_ctx
*ctx
)
467 int all_bits_between_are_set
, lowest_bit_set
, highest_bit_set
;
468 unsigned int tmp
= bpf2sparc
[TMP_REG_1
];
469 u32 low_bits
= (K
& 0xffffffff);
470 u32 high_bits
= (K
>> 32);
472 /* These two tests also take care of all of the one
475 if (high_bits
== 0xffffffff && (low_bits
& 0x80000000))
476 return emit_loadimm_sext(K
, dest
, ctx
);
477 if (high_bits
== 0x00000000)
478 return emit_loadimm32(K
, dest
, ctx
);
480 analyze_64bit_constant(high_bits
, low_bits
, &highest_bit_set
,
481 &lowest_bit_set
, &all_bits_between_are_set
);
484 * sllx %reg, shift, %reg
486 * srlx %reg, shift, %reg
487 * 3) mov some_small_const, %reg
488 * sllx %reg, shift, %reg
490 if (((highest_bit_set
== 63 || lowest_bit_set
== 0) &&
491 all_bits_between_are_set
!= 0) ||
492 ((highest_bit_set
- lowest_bit_set
) < 12)) {
493 int shift
= lowest_bit_set
;
496 if ((highest_bit_set
!= 63 && lowest_bit_set
!= 0) ||
497 all_bits_between_are_set
== 0) {
499 create_simple_focus_bits(high_bits
, low_bits
,
501 } else if (lowest_bit_set
== 0)
502 shift
= -(63 - highest_bit_set
);
504 emit(OR
| IMMED
| RS1(G0
) | S13(the_const
) | RD(dest
), ctx
);
506 emit_alu_K(SLLX
, dest
, shift
, ctx
);
508 emit_alu_K(SRLX
, dest
, -shift
, ctx
);
513 /* Now a range of 22 or less bits set somewhere.
514 * 1) sethi %hi(focus_bits), %reg
515 * sllx %reg, shift, %reg
516 * 2) sethi %hi(focus_bits), %reg
517 * srlx %reg, shift, %reg
519 if ((highest_bit_set
- lowest_bit_set
) < 21) {
520 unsigned long focus_bits
=
521 create_simple_focus_bits(high_bits
, low_bits
,
524 emit(SETHI(focus_bits
, dest
), ctx
);
526 /* If lowest_bit_set == 10 then a sethi alone could
529 if (lowest_bit_set
< 10)
530 emit_alu_K(SRLX
, dest
, 10 - lowest_bit_set
, ctx
);
531 else if (lowest_bit_set
> 10)
532 emit_alu_K(SLLX
, dest
, lowest_bit_set
- 10, ctx
);
536 /* Ok, now 3 instruction sequences. */
538 emit_loadimm32(high_bits
, dest
, ctx
);
539 emit_alu_K(SLLX
, dest
, 32, ctx
);
543 /* We may be able to do something quick
544 * when the constant is negated, so try that.
546 if (const64_is_2insns((~high_bits
) & 0xffffffff,
547 (~low_bits
) & 0xfffffc00)) {
548 /* NOTE: The trailing bits get XOR'd so we need the
549 * non-negated bits, not the negated ones.
551 unsigned long trailing_bits
= low_bits
& 0x3ff;
553 if ((((~high_bits
) & 0xffffffff) == 0 &&
554 ((~low_bits
) & 0x80000000) == 0) ||
555 (((~high_bits
) & 0xffffffff) == 0xffffffff &&
556 ((~low_bits
) & 0x80000000) != 0)) {
557 unsigned long fast_int
= (~low_bits
& 0xffffffff);
559 if ((is_sethi(fast_int
) &&
560 (~high_bits
& 0xffffffff) == 0)) {
561 emit(SETHI(fast_int
, dest
), ctx
);
562 } else if (is_simm13(fast_int
)) {
563 emit(OR
| IMMED
| RS1(G0
) | S13(fast_int
) | RD(dest
), ctx
);
565 emit_loadimm64(fast_int
, dest
, ctx
);
568 u64 n
= ((~low_bits
) & 0xfffffc00) |
569 (((unsigned long)((~high_bits
) & 0xffffffff))<<32);
570 emit_loadimm64(n
, dest
, ctx
);
573 low_bits
= -0x400 | trailing_bits
;
575 emit(XOR
| IMMED
| RS1(dest
) | S13(low_bits
) | RD(dest
), ctx
);
579 /* 1) sethi %hi(xxx), %reg
580 * or %reg, %lo(xxx), %reg
581 * sllx %reg, yyy, %reg
583 if ((highest_bit_set
- lowest_bit_set
) < 32) {
584 unsigned long focus_bits
=
585 create_simple_focus_bits(high_bits
, low_bits
,
588 /* So what we know is that the set bits straddle the
589 * middle of the 64-bit word.
591 sparc_emit_set_const64_quick2(focus_bits
, 0, dest
,
592 lowest_bit_set
, ctx
);
596 /* 1) sethi %hi(high_bits), %reg
597 * or %reg, %lo(high_bits), %reg
598 * sllx %reg, 32, %reg
599 * or %reg, low_bits, %reg
601 if (is_simm13(low_bits
) && ((int)low_bits
> 0)) {
602 sparc_emit_set_const64_quick2(high_bits
, low_bits
,
607 /* Oh well, we tried... Do a full 64-bit decomposition. */
608 ctx
->tmp_1_used
= true;
610 emit_loadimm32(high_bits
, tmp
, ctx
);
611 emit_loadimm32(low_bits
, dest
, ctx
);
612 emit_alu_K(SLLX
, tmp
, 32, ctx
);
613 emit(OR
| RS1(dest
) | RS2(tmp
) | RD(dest
), ctx
);
616 static void emit_branch(unsigned int br_opc
, unsigned int from_idx
, unsigned int to_idx
,
619 unsigned int off
= to_idx
- from_idx
;
622 emit(br_opc
| WDISP19(off
<< 2), ctx
);
624 emit(br_opc
| WDISP22(off
<< 2), ctx
);
627 static void emit_cbcond(unsigned int cb_opc
, unsigned int from_idx
, unsigned int to_idx
,
628 const u8 dst
, const u8 src
, struct jit_ctx
*ctx
)
630 unsigned int off
= to_idx
- from_idx
;
632 emit(cb_opc
| WDISP10(off
<< 2) | RS1(dst
) | RS2(src
), ctx
);
635 static void emit_cbcondi(unsigned int cb_opc
, unsigned int from_idx
, unsigned int to_idx
,
636 const u8 dst
, s32 imm
, struct jit_ctx
*ctx
)
638 unsigned int off
= to_idx
- from_idx
;
640 emit(cb_opc
| IMMED
| WDISP10(off
<< 2) | RS1(dst
) | S5(imm
), ctx
);
643 #define emit_read_y(REG, CTX) emit(RD_Y | RD(REG), CTX)
644 #define emit_write_y(REG, CTX) emit(WR_Y | IMMED | RS1(REG) | S13(0), CTX)
646 #define emit_cmp(R1, R2, CTX) \
647 emit(SUBCC | RS1(R1) | RS2(R2) | RD(G0), CTX)
649 #define emit_cmpi(R1, IMM, CTX) \
650 emit(SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
652 #define emit_btst(R1, R2, CTX) \
653 emit(ANDCC | RS1(R1) | RS2(R2) | RD(G0), CTX)
655 #define emit_btsti(R1, IMM, CTX) \
656 emit(ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
658 static int emit_compare_and_branch(const u8 code
, const u8 dst
, u8 src
,
659 const s32 imm
, bool is_imm
, int branch_dst
,
662 bool use_cbcond
= (sparc64_elf_hwcap
& AV_SPARC_CBCOND
) != 0;
663 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
665 branch_dst
= ctx
->offset
[branch_dst
];
667 if (!is_simm10(branch_dst
- ctx
->idx
) ||
668 BPF_OP(code
) == BPF_JSET
)
677 } else if (!is_simm13(imm
)) {
681 ctx
->tmp_1_used
= true;
682 emit_loadimm_sext(imm
, tmp
, ctx
);
691 if (BPF_OP(code
) == BPF_JSET
) {
693 emit_btsti(dst
, imm
, ctx
);
695 emit_btst(dst
, src
, ctx
);
698 emit_cmpi(dst
, imm
, ctx
);
700 emit_cmp(dst
, src
, ctx
);
702 switch (BPF_OP(code
)) {
735 /* Make sure we dont leak kernel information to the
740 emit_branch(br_opcode
, ctx
->idx
, branch_dst
, ctx
);
745 switch (BPF_OP(code
)) {
747 cbcond_opcode
= CBCONDE
;
750 cbcond_opcode
= CBCONDGU
;
753 cbcond_opcode
= CBCONDLU
;
756 cbcond_opcode
= CBCONDGEU
;
759 cbcond_opcode
= CBCONDLEU
;
762 cbcond_opcode
= CBCONDNE
;
765 cbcond_opcode
= CBCONDG
;
768 cbcond_opcode
= CBCONDL
;
771 cbcond_opcode
= CBCONDGE
;
774 cbcond_opcode
= CBCONDLE
;
777 /* Make sure we dont leak kernel information to the
782 cbcond_opcode
|= CBCOND_OP
;
784 emit_cbcondi(cbcond_opcode
, ctx
->idx
, branch_dst
,
787 emit_cbcond(cbcond_opcode
, ctx
->idx
, branch_dst
,
793 /* Just skip the save instruction and the ctx register move. */
794 #define BPF_TAILCALL_PROLOGUE_SKIP 32
795 #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
797 static void build_prologue(struct jit_ctx
*ctx
)
799 s32 stack_needed
= BASE_STACKFRAME
;
801 if (ctx
->saw_frame_pointer
|| ctx
->saw_tail_call
) {
802 struct bpf_prog
*prog
= ctx
->prog
;
805 stack_depth
= prog
->aux
->stack_depth
;
806 stack_needed
+= round_up(stack_depth
, 16);
809 if (ctx
->saw_tail_call
)
812 /* save %sp, -176, %sp */
813 emit(SAVE
| IMMED
| RS1(SP
) | S13(-stack_needed
) | RD(SP
), ctx
);
815 /* tail_call_cnt = 0 */
816 if (ctx
->saw_tail_call
) {
817 u32 off
= BPF_TAILCALL_CNT_SP_OFF
;
819 emit(ST32
| IMMED
| RS1(SP
) | S13(off
) | RD(G0
), ctx
);
823 if (ctx
->saw_frame_pointer
) {
824 const u8 vfp
= bpf2sparc
[BPF_REG_FP
];
826 emit(ADD
| IMMED
| RS1(FP
) | S13(STACK_BIAS
) | RD(vfp
), ctx
);
831 emit_reg_move(I0
, O0
, ctx
);
832 emit_reg_move(I1
, O1
, ctx
);
833 emit_reg_move(I2
, O2
, ctx
);
834 emit_reg_move(I3
, O3
, ctx
);
835 emit_reg_move(I4
, O4
, ctx
);
836 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
839 static void build_epilogue(struct jit_ctx
*ctx
)
841 ctx
->epilogue_offset
= ctx
->idx
;
843 /* ret (jmpl %i7 + 8, %g0) */
844 emit(JMPL
| IMMED
| RS1(I7
) | S13(8) | RD(G0
), ctx
);
846 /* restore %i5, %g0, %o0 */
847 emit(RESTORE
| RS1(bpf2sparc
[BPF_REG_0
]) | RS2(G0
) | RD(O0
), ctx
);
850 static void emit_tail_call(struct jit_ctx
*ctx
)
852 const u8 bpf_array
= bpf2sparc
[BPF_REG_2
];
853 const u8 bpf_index
= bpf2sparc
[BPF_REG_3
];
854 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
857 ctx
->saw_tail_call
= true;
859 off
= offsetof(struct bpf_array
, map
.max_entries
);
860 emit(LD32
| IMMED
| RS1(bpf_array
) | S13(off
) | RD(tmp
), ctx
);
861 emit_cmp(bpf_index
, tmp
, ctx
);
863 emit_branch(BGEU
, ctx
->idx
, ctx
->idx
+ OFFSET1
, ctx
);
866 off
= BPF_TAILCALL_CNT_SP_OFF
;
867 emit(LD32
| IMMED
| RS1(SP
) | S13(off
) | RD(tmp
), ctx
);
868 emit_cmpi(tmp
, MAX_TAIL_CALL_CNT
, ctx
);
870 emit_branch(BGU
, ctx
->idx
, ctx
->idx
+ OFFSET2
, ctx
);
873 emit_alu_K(ADD
, tmp
, 1, ctx
);
874 off
= BPF_TAILCALL_CNT_SP_OFF
;
875 emit(ST32
| IMMED
| RS1(SP
) | S13(off
) | RD(tmp
), ctx
);
877 emit_alu3_K(SLL
, bpf_index
, 3, tmp
, ctx
);
878 emit_alu(ADD
, bpf_array
, tmp
, ctx
);
879 off
= offsetof(struct bpf_array
, ptrs
);
880 emit(LD64
| IMMED
| RS1(tmp
) | S13(off
) | RD(tmp
), ctx
);
882 emit_cmpi(tmp
, 0, ctx
);
884 emit_branch(BE
, ctx
->idx
, ctx
->idx
+ OFFSET3
, ctx
);
887 off
= offsetof(struct bpf_prog
, bpf_func
);
888 emit(LD64
| IMMED
| RS1(tmp
) | S13(off
) | RD(tmp
), ctx
);
890 off
= BPF_TAILCALL_PROLOGUE_SKIP
;
891 emit(JMPL
| IMMED
| RS1(tmp
) | S13(off
) | RD(G0
), ctx
);
895 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
897 const u8 code
= insn
->code
;
898 const u8 dst
= bpf2sparc
[insn
->dst_reg
];
899 const u8 src
= bpf2sparc
[insn
->src_reg
];
900 const int i
= insn
- ctx
->prog
->insnsi
;
901 const s16 off
= insn
->off
;
902 const s32 imm
= insn
->imm
;
904 if (insn
->src_reg
== BPF_REG_FP
)
905 ctx
->saw_frame_pointer
= true;
909 case BPF_ALU
| BPF_MOV
| BPF_X
:
910 emit_alu3_K(SRL
, src
, 0, dst
, ctx
);
911 if (insn_is_zext(&insn
[1]))
914 case BPF_ALU64
| BPF_MOV
| BPF_X
:
915 emit_reg_move(src
, dst
, ctx
);
917 /* dst = dst OP src */
918 case BPF_ALU
| BPF_ADD
| BPF_X
:
919 case BPF_ALU64
| BPF_ADD
| BPF_X
:
920 emit_alu(ADD
, src
, dst
, ctx
);
922 case BPF_ALU
| BPF_SUB
| BPF_X
:
923 case BPF_ALU64
| BPF_SUB
| BPF_X
:
924 emit_alu(SUB
, src
, dst
, ctx
);
926 case BPF_ALU
| BPF_AND
| BPF_X
:
927 case BPF_ALU64
| BPF_AND
| BPF_X
:
928 emit_alu(AND
, src
, dst
, ctx
);
930 case BPF_ALU
| BPF_OR
| BPF_X
:
931 case BPF_ALU64
| BPF_OR
| BPF_X
:
932 emit_alu(OR
, src
, dst
, ctx
);
934 case BPF_ALU
| BPF_XOR
| BPF_X
:
935 case BPF_ALU64
| BPF_XOR
| BPF_X
:
936 emit_alu(XOR
, src
, dst
, ctx
);
938 case BPF_ALU
| BPF_MUL
| BPF_X
:
939 emit_alu(MUL
, src
, dst
, ctx
);
941 case BPF_ALU64
| BPF_MUL
| BPF_X
:
942 emit_alu(MULX
, src
, dst
, ctx
);
944 case BPF_ALU
| BPF_DIV
| BPF_X
:
945 emit_write_y(G0
, ctx
);
946 emit_alu(DIV
, src
, dst
, ctx
);
947 if (insn_is_zext(&insn
[1]))
950 case BPF_ALU64
| BPF_DIV
| BPF_X
:
951 emit_alu(UDIVX
, src
, dst
, ctx
);
953 case BPF_ALU
| BPF_MOD
| BPF_X
: {
954 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
956 ctx
->tmp_1_used
= true;
958 emit_write_y(G0
, ctx
);
959 emit_alu3(DIV
, dst
, src
, tmp
, ctx
);
960 emit_alu3(MULX
, tmp
, src
, tmp
, ctx
);
961 emit_alu3(SUB
, dst
, tmp
, dst
, ctx
);
964 case BPF_ALU64
| BPF_MOD
| BPF_X
: {
965 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
967 ctx
->tmp_1_used
= true;
969 emit_alu3(UDIVX
, dst
, src
, tmp
, ctx
);
970 emit_alu3(MULX
, tmp
, src
, tmp
, ctx
);
971 emit_alu3(SUB
, dst
, tmp
, dst
, ctx
);
974 case BPF_ALU
| BPF_LSH
| BPF_X
:
975 emit_alu(SLL
, src
, dst
, ctx
);
977 case BPF_ALU64
| BPF_LSH
| BPF_X
:
978 emit_alu(SLLX
, src
, dst
, ctx
);
980 case BPF_ALU
| BPF_RSH
| BPF_X
:
981 emit_alu(SRL
, src
, dst
, ctx
);
982 if (insn_is_zext(&insn
[1]))
985 case BPF_ALU64
| BPF_RSH
| BPF_X
:
986 emit_alu(SRLX
, src
, dst
, ctx
);
988 case BPF_ALU
| BPF_ARSH
| BPF_X
:
989 emit_alu(SRA
, src
, dst
, ctx
);
991 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
992 emit_alu(SRAX
, src
, dst
, ctx
);
996 case BPF_ALU
| BPF_NEG
:
997 case BPF_ALU64
| BPF_NEG
:
998 emit(SUB
| RS1(0) | RS2(dst
) | RD(dst
), ctx
);
1001 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1004 emit_alu_K(SLL
, dst
, 16, ctx
);
1005 emit_alu_K(SRL
, dst
, 16, ctx
);
1006 if (insn_is_zext(&insn
[1]))
1010 if (!ctx
->prog
->aux
->verifier_zext
)
1011 emit_alu_K(SRL
, dst
, 0, ctx
);
1020 /* dst = BSWAP##imm(dst) */
1021 case BPF_ALU
| BPF_END
| BPF_FROM_LE
: {
1022 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1023 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1025 ctx
->tmp_1_used
= true;
1028 emit_alu3_K(AND
, dst
, 0xff, tmp
, ctx
);
1029 emit_alu3_K(SRL
, dst
, 8, dst
, ctx
);
1030 emit_alu3_K(AND
, dst
, 0xff, dst
, ctx
);
1031 emit_alu3_K(SLL
, tmp
, 8, tmp
, ctx
);
1032 emit_alu(OR
, tmp
, dst
, ctx
);
1033 if (insn_is_zext(&insn
[1]))
1038 ctx
->tmp_2_used
= true;
1039 emit_alu3_K(SRL
, dst
, 24, tmp
, ctx
); /* tmp = dst >> 24 */
1040 emit_alu3_K(SRL
, dst
, 16, tmp2
, ctx
); /* tmp2 = dst >> 16 */
1041 emit_alu3_K(AND
, tmp2
, 0xff, tmp2
, ctx
);/* tmp2 = tmp2 & 0xff */
1042 emit_alu3_K(SLL
, tmp2
, 8, tmp2
, ctx
); /* tmp2 = tmp2 << 8 */
1043 emit_alu(OR
, tmp2
, tmp
, ctx
); /* tmp = tmp | tmp2 */
1044 emit_alu3_K(SRL
, dst
, 8, tmp2
, ctx
); /* tmp2 = dst >> 8 */
1045 emit_alu3_K(AND
, tmp2
, 0xff, tmp2
, ctx
);/* tmp2 = tmp2 & 0xff */
1046 emit_alu3_K(SLL
, tmp2
, 16, tmp2
, ctx
); /* tmp2 = tmp2 << 16 */
1047 emit_alu(OR
, tmp2
, tmp
, ctx
); /* tmp = tmp | tmp2 */
1048 emit_alu3_K(AND
, dst
, 0xff, dst
, ctx
); /* dst = dst & 0xff */
1049 emit_alu3_K(SLL
, dst
, 24, dst
, ctx
); /* dst = dst << 24 */
1050 emit_alu(OR
, tmp
, dst
, ctx
); /* dst = dst | tmp */
1051 if (insn_is_zext(&insn
[1]))
1056 emit_alu3_K(ADD
, SP
, STACK_BIAS
+ 128, tmp
, ctx
);
1057 emit(ST64
| RS1(tmp
) | RS2(G0
) | RD(dst
), ctx
);
1058 emit(LD64A
| ASI(ASI_PL
) | RS1(tmp
) | RS2(G0
) | RD(dst
), ctx
);
1064 case BPF_ALU
| BPF_MOV
| BPF_K
:
1065 emit_loadimm32(imm
, dst
, ctx
);
1066 if (insn_is_zext(&insn
[1]))
1069 case BPF_ALU64
| BPF_MOV
| BPF_K
:
1070 emit_loadimm_sext(imm
, dst
, ctx
);
1072 /* dst = dst OP imm */
1073 case BPF_ALU
| BPF_ADD
| BPF_K
:
1074 case BPF_ALU64
| BPF_ADD
| BPF_K
:
1075 emit_alu_K(ADD
, dst
, imm
, ctx
);
1076 goto do_alu32_trunc
;
1077 case BPF_ALU
| BPF_SUB
| BPF_K
:
1078 case BPF_ALU64
| BPF_SUB
| BPF_K
:
1079 emit_alu_K(SUB
, dst
, imm
, ctx
);
1080 goto do_alu32_trunc
;
1081 case BPF_ALU
| BPF_AND
| BPF_K
:
1082 case BPF_ALU64
| BPF_AND
| BPF_K
:
1083 emit_alu_K(AND
, dst
, imm
, ctx
);
1084 goto do_alu32_trunc
;
1085 case BPF_ALU
| BPF_OR
| BPF_K
:
1086 case BPF_ALU64
| BPF_OR
| BPF_K
:
1087 emit_alu_K(OR
, dst
, imm
, ctx
);
1088 goto do_alu32_trunc
;
1089 case BPF_ALU
| BPF_XOR
| BPF_K
:
1090 case BPF_ALU64
| BPF_XOR
| BPF_K
:
1091 emit_alu_K(XOR
, dst
, imm
, ctx
);
1092 goto do_alu32_trunc
;
1093 case BPF_ALU
| BPF_MUL
| BPF_K
:
1094 emit_alu_K(MUL
, dst
, imm
, ctx
);
1095 goto do_alu32_trunc
;
1096 case BPF_ALU64
| BPF_MUL
| BPF_K
:
1097 emit_alu_K(MULX
, dst
, imm
, ctx
);
1099 case BPF_ALU
| BPF_DIV
| BPF_K
:
1103 emit_write_y(G0
, ctx
);
1104 emit_alu_K(DIV
, dst
, imm
, ctx
);
1105 goto do_alu32_trunc
;
1106 case BPF_ALU64
| BPF_DIV
| BPF_K
:
1110 emit_alu_K(UDIVX
, dst
, imm
, ctx
);
1112 case BPF_ALU64
| BPF_MOD
| BPF_K
:
1113 case BPF_ALU
| BPF_MOD
| BPF_K
: {
1114 const u8 tmp
= bpf2sparc
[TMP_REG_2
];
1120 div
= (BPF_CLASS(code
) == BPF_ALU64
) ? UDIVX
: DIV
;
1122 ctx
->tmp_2_used
= true;
1124 if (BPF_CLASS(code
) != BPF_ALU64
)
1125 emit_write_y(G0
, ctx
);
1126 if (is_simm13(imm
)) {
1127 emit(div
| IMMED
| RS1(dst
) | S13(imm
) | RD(tmp
), ctx
);
1128 emit(MULX
| IMMED
| RS1(tmp
) | S13(imm
) | RD(tmp
), ctx
);
1129 emit(SUB
| RS1(dst
) | RS2(tmp
) | RD(dst
), ctx
);
1131 const u8 tmp1
= bpf2sparc
[TMP_REG_1
];
1133 ctx
->tmp_1_used
= true;
1135 emit_set_const_sext(imm
, tmp1
, ctx
);
1136 emit(div
| RS1(dst
) | RS2(tmp1
) | RD(tmp
), ctx
);
1137 emit(MULX
| RS1(tmp
) | RS2(tmp1
) | RD(tmp
), ctx
);
1138 emit(SUB
| RS1(dst
) | RS2(tmp
) | RD(dst
), ctx
);
1140 goto do_alu32_trunc
;
1142 case BPF_ALU
| BPF_LSH
| BPF_K
:
1143 emit_alu_K(SLL
, dst
, imm
, ctx
);
1144 goto do_alu32_trunc
;
1145 case BPF_ALU64
| BPF_LSH
| BPF_K
:
1146 emit_alu_K(SLLX
, dst
, imm
, ctx
);
1148 case BPF_ALU
| BPF_RSH
| BPF_K
:
1149 emit_alu_K(SRL
, dst
, imm
, ctx
);
1150 if (insn_is_zext(&insn
[1]))
1153 case BPF_ALU64
| BPF_RSH
| BPF_K
:
1154 emit_alu_K(SRLX
, dst
, imm
, ctx
);
1156 case BPF_ALU
| BPF_ARSH
| BPF_K
:
1157 emit_alu_K(SRA
, dst
, imm
, ctx
);
1158 goto do_alu32_trunc
;
1159 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
1160 emit_alu_K(SRAX
, dst
, imm
, ctx
);
1164 if (BPF_CLASS(code
) == BPF_ALU
&&
1165 !ctx
->prog
->aux
->verifier_zext
)
1166 emit_alu_K(SRL
, dst
, 0, ctx
);
1170 case BPF_JMP
| BPF_JA
:
1171 emit_branch(BA
, ctx
->idx
, ctx
->offset
[i
+ off
], ctx
);
1174 /* IF (dst COND src) JUMP off */
1175 case BPF_JMP
| BPF_JEQ
| BPF_X
:
1176 case BPF_JMP
| BPF_JGT
| BPF_X
:
1177 case BPF_JMP
| BPF_JLT
| BPF_X
:
1178 case BPF_JMP
| BPF_JGE
| BPF_X
:
1179 case BPF_JMP
| BPF_JLE
| BPF_X
:
1180 case BPF_JMP
| BPF_JNE
| BPF_X
:
1181 case BPF_JMP
| BPF_JSGT
| BPF_X
:
1182 case BPF_JMP
| BPF_JSLT
| BPF_X
:
1183 case BPF_JMP
| BPF_JSGE
| BPF_X
:
1184 case BPF_JMP
| BPF_JSLE
| BPF_X
:
1185 case BPF_JMP
| BPF_JSET
| BPF_X
: {
1188 err
= emit_compare_and_branch(code
, dst
, src
, 0, false, i
+ off
, ctx
);
1193 /* IF (dst COND imm) JUMP off */
1194 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1195 case BPF_JMP
| BPF_JGT
| BPF_K
:
1196 case BPF_JMP
| BPF_JLT
| BPF_K
:
1197 case BPF_JMP
| BPF_JGE
| BPF_K
:
1198 case BPF_JMP
| BPF_JLE
| BPF_K
:
1199 case BPF_JMP
| BPF_JNE
| BPF_K
:
1200 case BPF_JMP
| BPF_JSGT
| BPF_K
:
1201 case BPF_JMP
| BPF_JSLT
| BPF_K
:
1202 case BPF_JMP
| BPF_JSGE
| BPF_K
:
1203 case BPF_JMP
| BPF_JSLE
| BPF_K
:
1204 case BPF_JMP
| BPF_JSET
| BPF_K
: {
1207 err
= emit_compare_and_branch(code
, dst
, 0, imm
, true, i
+ off
, ctx
);
1214 case BPF_JMP
| BPF_CALL
:
1216 u8
*func
= ((u8
*)__bpf_call_base
) + imm
;
1218 ctx
->saw_call
= true;
1220 emit_call((u32
*)func
, ctx
);
1223 emit_reg_move(O0
, bpf2sparc
[BPF_REG_0
], ctx
);
1228 case BPF_JMP
| BPF_TAIL_CALL
:
1229 emit_tail_call(ctx
);
1232 /* function return */
1233 case BPF_JMP
| BPF_EXIT
:
1234 /* Optimization: when last instruction is EXIT,
1235 simply fallthrough to epilogue. */
1236 if (i
== ctx
->prog
->len
- 1)
1238 emit_branch(BA
, ctx
->idx
, ctx
->epilogue_offset
, ctx
);
1243 case BPF_LD
| BPF_IMM
| BPF_DW
:
1245 const struct bpf_insn insn1
= insn
[1];
1248 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
1249 emit_loadimm64(imm64
, dst
, ctx
);
1254 /* LDX: dst = *(size *)(src + off) */
1255 case BPF_LDX
| BPF_MEM
| BPF_W
:
1256 case BPF_LDX
| BPF_MEM
| BPF_H
:
1257 case BPF_LDX
| BPF_MEM
| BPF_B
:
1258 case BPF_LDX
| BPF_MEM
| BPF_DW
: {
1259 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1260 u32 opcode
= 0, rs2
;
1262 ctx
->tmp_1_used
= true;
1263 switch (BPF_SIZE(code
)) {
1278 if (is_simm13(off
)) {
1282 emit_loadimm(off
, tmp
, ctx
);
1285 emit(opcode
| RS1(src
) | rs2
| RD(dst
), ctx
);
1286 if (opcode
!= LD64
&& insn_is_zext(&insn
[1]))
1290 /* ST: *(size *)(dst + off) = imm */
1291 case BPF_ST
| BPF_MEM
| BPF_W
:
1292 case BPF_ST
| BPF_MEM
| BPF_H
:
1293 case BPF_ST
| BPF_MEM
| BPF_B
:
1294 case BPF_ST
| BPF_MEM
| BPF_DW
: {
1295 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1296 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1297 u32 opcode
= 0, rs2
;
1299 if (insn
->dst_reg
== BPF_REG_FP
)
1300 ctx
->saw_frame_pointer
= true;
1302 ctx
->tmp_2_used
= true;
1303 emit_loadimm(imm
, tmp2
, ctx
);
1305 switch (BPF_SIZE(code
)) {
1320 if (is_simm13(off
)) {
1324 ctx
->tmp_1_used
= true;
1325 emit_loadimm(off
, tmp
, ctx
);
1328 emit(opcode
| RS1(dst
) | rs2
| RD(tmp2
), ctx
);
1332 /* STX: *(size *)(dst + off) = src */
1333 case BPF_STX
| BPF_MEM
| BPF_W
:
1334 case BPF_STX
| BPF_MEM
| BPF_H
:
1335 case BPF_STX
| BPF_MEM
| BPF_B
:
1336 case BPF_STX
| BPF_MEM
| BPF_DW
: {
1337 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1338 u32 opcode
= 0, rs2
;
1340 if (insn
->dst_reg
== BPF_REG_FP
)
1341 ctx
->saw_frame_pointer
= true;
1343 switch (BPF_SIZE(code
)) {
1357 if (is_simm13(off
)) {
1361 ctx
->tmp_1_used
= true;
1362 emit_loadimm(off
, tmp
, ctx
);
1365 emit(opcode
| RS1(dst
) | rs2
| RD(src
), ctx
);
1369 /* STX XADD: lock *(u32 *)(dst + off) += src */
1370 case BPF_STX
| BPF_XADD
| BPF_W
: {
1371 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1372 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1373 const u8 tmp3
= bpf2sparc
[TMP_REG_3
];
1375 if (insn
->dst_reg
== BPF_REG_FP
)
1376 ctx
->saw_frame_pointer
= true;
1378 ctx
->tmp_1_used
= true;
1379 ctx
->tmp_2_used
= true;
1380 ctx
->tmp_3_used
= true;
1381 emit_loadimm(off
, tmp
, ctx
);
1382 emit_alu3(ADD
, dst
, tmp
, tmp
, ctx
);
1384 emit(LD32
| RS1(tmp
) | RS2(G0
) | RD(tmp2
), ctx
);
1385 emit_alu3(ADD
, tmp2
, src
, tmp3
, ctx
);
1386 emit(CAS
| ASI(ASI_P
) | RS1(tmp
) | RS2(tmp2
) | RD(tmp3
), ctx
);
1387 emit_cmp(tmp2
, tmp3
, ctx
);
1388 emit_branch(BNE
, 4, 0, ctx
);
1392 /* STX XADD: lock *(u64 *)(dst + off) += src */
1393 case BPF_STX
| BPF_XADD
| BPF_DW
: {
1394 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1395 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1396 const u8 tmp3
= bpf2sparc
[TMP_REG_3
];
1398 if (insn
->dst_reg
== BPF_REG_FP
)
1399 ctx
->saw_frame_pointer
= true;
1401 ctx
->tmp_1_used
= true;
1402 ctx
->tmp_2_used
= true;
1403 ctx
->tmp_3_used
= true;
1404 emit_loadimm(off
, tmp
, ctx
);
1405 emit_alu3(ADD
, dst
, tmp
, tmp
, ctx
);
1407 emit(LD64
| RS1(tmp
) | RS2(G0
) | RD(tmp2
), ctx
);
1408 emit_alu3(ADD
, tmp2
, src
, tmp3
, ctx
);
1409 emit(CASX
| ASI(ASI_P
) | RS1(tmp
) | RS2(tmp2
) | RD(tmp3
), ctx
);
1410 emit_cmp(tmp2
, tmp3
, ctx
);
1411 emit_branch(BNE
, 4, 0, ctx
);
1417 pr_err_once("unknown opcode %02x\n", code
);
1424 static int build_body(struct jit_ctx
*ctx
)
1426 const struct bpf_prog
*prog
= ctx
->prog
;
1429 for (i
= 0; i
< prog
->len
; i
++) {
1430 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
1433 ret
= build_insn(insn
, ctx
);
1437 ctx
->offset
[i
] = ctx
->idx
;
1440 ctx
->offset
[i
] = ctx
->idx
;
1447 static void jit_fill_hole(void *area
, unsigned int size
)
1450 /* We are guaranteed to have aligned memory. */
1451 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1452 *ptr
++ = 0x91d02005; /* ta 5 */
1455 bool bpf_jit_needs_zext(void)
1460 struct sparc64_jit_data
{
1461 struct bpf_binary_header
*header
;
1466 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1468 struct bpf_prog
*tmp
, *orig_prog
= prog
;
1469 struct sparc64_jit_data
*jit_data
;
1470 struct bpf_binary_header
*header
;
1471 u32 prev_image_size
, image_size
;
1472 bool tmp_blinded
= false;
1473 bool extra_pass
= false;
1478 if (!prog
->jit_requested
)
1481 tmp
= bpf_jit_blind_constants(prog
);
1482 /* If blinding was requested and we failed during blinding,
1483 * we must fall back to the interpreter.
1492 jit_data
= prog
->aux
->jit_data
;
1494 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
1499 prog
->aux
->jit_data
= jit_data
;
1501 if (jit_data
->ctx
.offset
) {
1502 ctx
= jit_data
->ctx
;
1503 image_ptr
= jit_data
->image
;
1504 header
= jit_data
->header
;
1506 image_size
= sizeof(u32
) * ctx
.idx
;
1507 prev_image_size
= image_size
;
1512 memset(&ctx
, 0, sizeof(ctx
));
1515 ctx
.offset
= kmalloc_array(prog
->len
, sizeof(unsigned int), GFP_KERNEL
);
1516 if (ctx
.offset
== NULL
) {
1521 /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook
1522 * the offset array so that we converge faster.
1524 for (i
= 0; i
< prog
->len
; i
++)
1525 ctx
.offset
[i
] = i
* (12 * 4);
1527 prev_image_size
= ~0U;
1528 for (pass
= 1; pass
< 40; pass
++) {
1531 build_prologue(&ctx
);
1532 if (build_body(&ctx
)) {
1536 build_epilogue(&ctx
);
1538 if (bpf_jit_enable
> 1)
1539 pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass
,
1541 ctx
.tmp_1_used
? '1' : ' ',
1542 ctx
.tmp_2_used
? '2' : ' ',
1543 ctx
.tmp_3_used
? '3' : ' ',
1544 ctx
.saw_frame_pointer
? 'F' : ' ',
1545 ctx
.saw_call
? 'C' : ' ',
1546 ctx
.saw_tail_call
? 'T' : ' ');
1548 if (ctx
.idx
* 4 == prev_image_size
)
1550 prev_image_size
= ctx
.idx
* 4;
1554 /* Now we know the actual image size. */
1555 image_size
= sizeof(u32
) * ctx
.idx
;
1556 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1557 sizeof(u32
), jit_fill_hole
);
1558 if (header
== NULL
) {
1563 ctx
.image
= (u32
*)image_ptr
;
1567 build_prologue(&ctx
);
1569 if (build_body(&ctx
)) {
1570 bpf_jit_binary_free(header
);
1575 build_epilogue(&ctx
);
1577 if (ctx
.idx
* 4 != prev_image_size
) {
1578 pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
1579 prev_image_size
, ctx
.idx
* 4);
1580 bpf_jit_binary_free(header
);
1585 if (bpf_jit_enable
> 1)
1586 bpf_jit_dump(prog
->len
, image_size
, pass
, ctx
.image
);
1588 bpf_flush_icache(header
, (u8
*)header
+ (header
->pages
* PAGE_SIZE
));
1590 if (!prog
->is_func
|| extra_pass
) {
1591 bpf_jit_binary_lock_ro(header
);
1593 jit_data
->ctx
= ctx
;
1594 jit_data
->image
= image_ptr
;
1595 jit_data
->header
= header
;
1598 prog
->bpf_func
= (void *)ctx
.image
;
1600 prog
->jited_len
= image_size
;
1602 if (!prog
->is_func
|| extra_pass
) {
1603 bpf_prog_fill_jited_linfo(prog
, ctx
.offset
);
1607 prog
->aux
->jit_data
= NULL
;
1611 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?