1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/moduleloader.h>
3 #include <linux/workqueue.h>
4 #include <linux/netdevice.h>
5 #include <linux/filter.h>
7 #include <linux/cache.h>
8 #include <linux/if_vlan.h>
10 #include <asm/cacheflush.h>
11 #include <asm/ptrace.h>
13 #include "bpf_jit_64.h"
15 static inline bool is_simm13(unsigned int value
)
17 return value
+ 0x1000 < 0x2000;
20 static inline bool is_simm10(unsigned int value
)
22 return value
+ 0x200 < 0x400;
25 static inline bool is_simm5(unsigned int value
)
27 return value
+ 0x10 < 0x20;
30 static inline bool is_sethi(unsigned int value
)
32 return (value
& ~0x3fffff) == 0;
35 static void bpf_flush_icache(void *start_
, void *end_
)
37 /* Cheetah's I-cache is fully coherent. */
38 if (tlb_type
== spitfire
) {
39 unsigned long start
= (unsigned long) start_
;
40 unsigned long end
= (unsigned long) end_
;
43 end
= (end
+ 7UL) & ~7UL;
51 #define SEEN_DATAREF 1 /* might call external helpers */
52 #define SEEN_XREG 2 /* ebx is used */
53 #define SEEN_MEM 4 /* use mem[] for temporary storage */
55 #define S13(X) ((X) & 0x1fff)
56 #define S5(X) ((X) & 0x1f)
57 #define IMMED 0x00002000
58 #define RD(X) ((X) << 25)
59 #define RS1(X) ((X) << 14)
61 #define OP(X) ((X) << 30)
62 #define OP2(X) ((X) << 22)
63 #define OP3(X) ((X) << 19)
64 #define COND(X) (((X) & 0xf) << 25)
65 #define CBCOND(X) (((X) & 0x1f) << 25)
67 #define F2(X, Y) (OP(X) | OP2(Y))
68 #define F3(X, Y) (OP(X) | OP3(Y))
69 #define ASI(X) (((X) & 0xff) << 5)
71 #define CONDN COND(0x0)
72 #define CONDE COND(0x1)
73 #define CONDLE COND(0x2)
74 #define CONDL COND(0x3)
75 #define CONDLEU COND(0x4)
76 #define CONDCS COND(0x5)
77 #define CONDNEG COND(0x6)
78 #define CONDVC COND(0x7)
79 #define CONDA COND(0x8)
80 #define CONDNE COND(0x9)
81 #define CONDG COND(0xa)
82 #define CONDGE COND(0xb)
83 #define CONDGU COND(0xc)
84 #define CONDCC COND(0xd)
85 #define CONDPOS COND(0xe)
86 #define CONDVS COND(0xf)
88 #define CONDGEU CONDCC
91 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
92 #define WDISP19(X) (((X) >> 2) & 0x7ffff)
94 /* The 10-bit branch displacement for CBCOND is split into two fields */
95 static u32
WDISP10(u32 off
)
97 u32 ret
= ((off
>> 2) & 0xff) << 5;
99 ret
|= ((off
>> (2 + 8)) & 0x03) << 19;
104 #define CBCONDE CBCOND(0x09)
105 #define CBCONDLE CBCOND(0x0a)
106 #define CBCONDL CBCOND(0x0b)
107 #define CBCONDLEU CBCOND(0x0c)
108 #define CBCONDCS CBCOND(0x0d)
109 #define CBCONDN CBCOND(0x0e)
110 #define CBCONDVS CBCOND(0x0f)
111 #define CBCONDNE CBCOND(0x19)
112 #define CBCONDG CBCOND(0x1a)
113 #define CBCONDGE CBCOND(0x1b)
114 #define CBCONDGU CBCOND(0x1c)
115 #define CBCONDCC CBCOND(0x1d)
116 #define CBCONDPOS CBCOND(0x1e)
117 #define CBCONDVC CBCOND(0x1f)
119 #define CBCONDGEU CBCONDCC
120 #define CBCONDLU CBCONDCS
122 #define ANNUL (1 << 29)
123 #define XCC (1 << 21)
125 #define BRANCH (F2(0, 1) | XCC)
126 #define CBCOND_OP (F2(0, 3) | XCC)
128 #define BA (BRANCH | CONDA)
129 #define BG (BRANCH | CONDG)
130 #define BL (BRANCH | CONDL)
131 #define BLE (BRANCH | CONDLE)
132 #define BGU (BRANCH | CONDGU)
133 #define BLEU (BRANCH | CONDLEU)
134 #define BGE (BRANCH | CONDGE)
135 #define BGEU (BRANCH | CONDGEU)
136 #define BLU (BRANCH | CONDLU)
137 #define BE (BRANCH | CONDE)
138 #define BNE (BRANCH | CONDNE)
140 #define SETHI(K, REG) \
141 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
142 #define OR_LO(K, REG) \
143 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
145 #define ADD F3(2, 0x00)
146 #define AND F3(2, 0x01)
147 #define ANDCC F3(2, 0x11)
148 #define OR F3(2, 0x02)
149 #define XOR F3(2, 0x03)
150 #define SUB F3(2, 0x04)
151 #define SUBCC F3(2, 0x14)
152 #define MUL F3(2, 0x0a)
153 #define MULX F3(2, 0x09)
154 #define UDIVX F3(2, 0x0d)
155 #define DIV F3(2, 0x0e)
156 #define SLL F3(2, 0x25)
157 #define SLLX (F3(2, 0x25)|(1<<12))
158 #define SRA F3(2, 0x27)
159 #define SRAX (F3(2, 0x27)|(1<<12))
160 #define SRL F3(2, 0x26)
161 #define SRLX (F3(2, 0x26)|(1<<12))
162 #define JMPL F3(2, 0x38)
163 #define SAVE F3(2, 0x3c)
164 #define RESTORE F3(2, 0x3d)
166 #define BR F2(0, 0x01)
167 #define RD_Y F3(2, 0x28)
168 #define WR_Y F3(2, 0x30)
170 #define LD32 F3(3, 0x00)
171 #define LD8 F3(3, 0x01)
172 #define LD16 F3(3, 0x02)
173 #define LD64 F3(3, 0x0b)
174 #define LD64A F3(3, 0x1b)
175 #define ST8 F3(3, 0x05)
176 #define ST16 F3(3, 0x06)
177 #define ST32 F3(3, 0x04)
178 #define ST64 F3(3, 0x0e)
180 #define CAS F3(3, 0x3c)
181 #define CASX F3(3, 0x3e)
184 #define BASE_STACKFRAME 176
186 #define LD32I (LD32 | IMMED)
187 #define LD8I (LD8 | IMMED)
188 #define LD16I (LD16 | IMMED)
189 #define LD64I (LD64 | IMMED)
190 #define LDPTRI (LDPTR | IMMED)
191 #define ST32I (ST32 | IMMED)
194 struct bpf_prog
*prog
;
195 unsigned int *offset
;
202 bool saw_frame_pointer
;
208 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
209 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
210 #define SKB_HLEN_REG (MAX_BPF_JIT_REG + 2)
211 #define SKB_DATA_REG (MAX_BPF_JIT_REG + 3)
212 #define TMP_REG_3 (MAX_BPF_JIT_REG + 4)
214 /* Map BPF registers to SPARC registers */
215 static const int bpf2sparc
[] = {
216 /* return value from in-kernel function, and exit value from eBPF */
219 /* arguments from eBPF program to in-kernel function */
226 /* callee saved registers that in-kernel function will preserve */
232 /* read-only frame pointer to access stack */
237 /* temporary register for internal BPF JIT */
246 static void emit(const u32 insn
, struct jit_ctx
*ctx
)
248 if (ctx
->image
!= NULL
)
249 ctx
->image
[ctx
->idx
] = insn
;
254 static void emit_call(u32
*func
, struct jit_ctx
*ctx
)
256 if (ctx
->image
!= NULL
) {
257 void *here
= &ctx
->image
[ctx
->idx
];
260 off
= (void *)func
- here
;
261 ctx
->image
[ctx
->idx
] = CALL
| ((off
>> 2) & 0x3fffffff);
266 static void emit_nop(struct jit_ctx
*ctx
)
268 emit(SETHI(0, G0
), ctx
);
271 static void emit_reg_move(u32 from
, u32 to
, struct jit_ctx
*ctx
)
273 emit(OR
| RS1(G0
) | RS2(from
) | RD(to
), ctx
);
276 /* Emit 32-bit constant, zero extended. */
277 static void emit_set_const(s32 K
, u32 reg
, struct jit_ctx
*ctx
)
279 emit(SETHI(K
, reg
), ctx
);
280 emit(OR_LO(K
, reg
), ctx
);
283 /* Emit 32-bit constant, sign extended. */
284 static void emit_set_const_sext(s32 K
, u32 reg
, struct jit_ctx
*ctx
)
287 emit(SETHI(K
, reg
), ctx
);
288 emit(OR_LO(K
, reg
), ctx
);
290 u32 hbits
= ~(u32
) K
;
291 u32 lbits
= -0x400 | (u32
) K
;
293 emit(SETHI(hbits
, reg
), ctx
);
294 emit(XOR
| IMMED
| RS1(reg
) | S13(lbits
) | RD(reg
), ctx
);
298 static void emit_alu(u32 opcode
, u32 src
, u32 dst
, struct jit_ctx
*ctx
)
300 emit(opcode
| RS1(dst
) | RS2(src
) | RD(dst
), ctx
);
303 static void emit_alu3(u32 opcode
, u32 a
, u32 b
, u32 c
, struct jit_ctx
*ctx
)
305 emit(opcode
| RS1(a
) | RS2(b
) | RD(c
), ctx
);
308 static void emit_alu_K(unsigned int opcode
, unsigned int dst
, unsigned int imm
,
311 bool small_immed
= is_simm13(imm
);
312 unsigned int insn
= opcode
;
314 insn
|= RS1(dst
) | RD(dst
);
316 emit(insn
| IMMED
| S13(imm
), ctx
);
318 unsigned int tmp
= bpf2sparc
[TMP_REG_1
];
320 ctx
->tmp_1_used
= true;
322 emit_set_const_sext(imm
, tmp
, ctx
);
323 emit(insn
| RS2(tmp
), ctx
);
327 static void emit_alu3_K(unsigned int opcode
, unsigned int src
, unsigned int imm
,
328 unsigned int dst
, struct jit_ctx
*ctx
)
330 bool small_immed
= is_simm13(imm
);
331 unsigned int insn
= opcode
;
333 insn
|= RS1(src
) | RD(dst
);
335 emit(insn
| IMMED
| S13(imm
), ctx
);
337 unsigned int tmp
= bpf2sparc
[TMP_REG_1
];
339 ctx
->tmp_1_used
= true;
341 emit_set_const_sext(imm
, tmp
, ctx
);
342 emit(insn
| RS2(tmp
), ctx
);
346 static void emit_loadimm32(s32 K
, unsigned int dest
, struct jit_ctx
*ctx
)
348 if (K
>= 0 && is_simm13(K
)) {
349 /* or %g0, K, DEST */
350 emit(OR
| IMMED
| RS1(G0
) | S13(K
) | RD(dest
), ctx
);
352 emit_set_const(K
, dest
, ctx
);
356 static void emit_loadimm(s32 K
, unsigned int dest
, struct jit_ctx
*ctx
)
359 /* or %g0, K, DEST */
360 emit(OR
| IMMED
| RS1(G0
) | S13(K
) | RD(dest
), ctx
);
362 emit_set_const(K
, dest
, ctx
);
366 static void emit_loadimm_sext(s32 K
, unsigned int dest
, struct jit_ctx
*ctx
)
369 /* or %g0, K, DEST */
370 emit(OR
| IMMED
| RS1(G0
) | S13(K
) | RD(dest
), ctx
);
372 emit_set_const_sext(K
, dest
, ctx
);
376 static void analyze_64bit_constant(u32 high_bits
, u32 low_bits
,
377 int *hbsp
, int *lbsp
, int *abbasp
)
379 int lowest_bit_set
, highest_bit_set
, all_bits_between_are_set
;
382 lowest_bit_set
= highest_bit_set
= -1;
385 if ((lowest_bit_set
== -1) && ((low_bits
>> i
) & 1))
387 if ((highest_bit_set
== -1) && ((high_bits
>> (32 - i
- 1)) & 1))
388 highest_bit_set
= (64 - i
- 1);
389 } while (++i
< 32 && (highest_bit_set
== -1 ||
390 lowest_bit_set
== -1));
394 if (lowest_bit_set
== -1 && ((high_bits
>> i
) & 1))
395 lowest_bit_set
= i
+ 32;
396 if (highest_bit_set
== -1 &&
397 ((low_bits
>> (32 - i
- 1)) & 1))
398 highest_bit_set
= 32 - i
- 1;
399 } while (++i
< 32 && (highest_bit_set
== -1 ||
400 lowest_bit_set
== -1));
403 all_bits_between_are_set
= 1;
404 for (i
= lowest_bit_set
; i
<= highest_bit_set
; i
++) {
406 if ((low_bits
& (1 << i
)) != 0)
409 if ((high_bits
& (1 << (i
- 32))) != 0)
412 all_bits_between_are_set
= 0;
415 *hbsp
= highest_bit_set
;
416 *lbsp
= lowest_bit_set
;
417 *abbasp
= all_bits_between_are_set
;
420 static unsigned long create_simple_focus_bits(unsigned long high_bits
,
421 unsigned long low_bits
,
422 int lowest_bit_set
, int shift
)
426 if (lowest_bit_set
< 32) {
427 lo
= (low_bits
>> lowest_bit_set
) << shift
;
428 hi
= ((high_bits
<< (32 - lowest_bit_set
)) << shift
);
431 hi
= ((high_bits
>> (lowest_bit_set
- 32)) << shift
);
436 static bool const64_is_2insns(unsigned long high_bits
,
437 unsigned long low_bits
)
439 int highest_bit_set
, lowest_bit_set
, all_bits_between_are_set
;
441 if (high_bits
== 0 || high_bits
== 0xffffffff)
444 analyze_64bit_constant(high_bits
, low_bits
,
445 &highest_bit_set
, &lowest_bit_set
,
446 &all_bits_between_are_set
);
448 if ((highest_bit_set
== 63 || lowest_bit_set
== 0) &&
449 all_bits_between_are_set
!= 0)
452 if (highest_bit_set
- lowest_bit_set
< 21)
458 static void sparc_emit_set_const64_quick2(unsigned long high_bits
,
459 unsigned long low_imm
,
461 int shift_count
, struct jit_ctx
*ctx
)
463 emit_loadimm32(high_bits
, dest
, ctx
);
465 /* Now shift it up into place. */
466 emit_alu_K(SLLX
, dest
, shift_count
, ctx
);
468 /* If there is a low immediate part piece, finish up by
469 * putting that in as well.
472 emit(OR
| IMMED
| RS1(dest
) | S13(low_imm
) | RD(dest
), ctx
);
475 static void emit_loadimm64(u64 K
, unsigned int dest
, struct jit_ctx
*ctx
)
477 int all_bits_between_are_set
, lowest_bit_set
, highest_bit_set
;
478 unsigned int tmp
= bpf2sparc
[TMP_REG_1
];
479 u32 low_bits
= (K
& 0xffffffff);
480 u32 high_bits
= (K
>> 32);
482 /* These two tests also take care of all of the one
485 if (high_bits
== 0xffffffff && (low_bits
& 0x80000000))
486 return emit_loadimm_sext(K
, dest
, ctx
);
487 if (high_bits
== 0x00000000)
488 return emit_loadimm32(K
, dest
, ctx
);
490 analyze_64bit_constant(high_bits
, low_bits
, &highest_bit_set
,
491 &lowest_bit_set
, &all_bits_between_are_set
);
494 * sllx %reg, shift, %reg
496 * srlx %reg, shift, %reg
497 * 3) mov some_small_const, %reg
498 * sllx %reg, shift, %reg
500 if (((highest_bit_set
== 63 || lowest_bit_set
== 0) &&
501 all_bits_between_are_set
!= 0) ||
502 ((highest_bit_set
- lowest_bit_set
) < 12)) {
503 int shift
= lowest_bit_set
;
506 if ((highest_bit_set
!= 63 && lowest_bit_set
!= 0) ||
507 all_bits_between_are_set
== 0) {
509 create_simple_focus_bits(high_bits
, low_bits
,
511 } else if (lowest_bit_set
== 0)
512 shift
= -(63 - highest_bit_set
);
514 emit(OR
| IMMED
| RS1(G0
) | S13(the_const
) | RD(dest
), ctx
);
516 emit_alu_K(SLLX
, dest
, shift
, ctx
);
518 emit_alu_K(SRLX
, dest
, -shift
, ctx
);
523 /* Now a range of 22 or less bits set somewhere.
524 * 1) sethi %hi(focus_bits), %reg
525 * sllx %reg, shift, %reg
526 * 2) sethi %hi(focus_bits), %reg
527 * srlx %reg, shift, %reg
529 if ((highest_bit_set
- lowest_bit_set
) < 21) {
530 unsigned long focus_bits
=
531 create_simple_focus_bits(high_bits
, low_bits
,
534 emit(SETHI(focus_bits
, dest
), ctx
);
536 /* If lowest_bit_set == 10 then a sethi alone could
539 if (lowest_bit_set
< 10)
540 emit_alu_K(SRLX
, dest
, 10 - lowest_bit_set
, ctx
);
541 else if (lowest_bit_set
> 10)
542 emit_alu_K(SLLX
, dest
, lowest_bit_set
- 10, ctx
);
546 /* Ok, now 3 instruction sequences. */
548 emit_loadimm32(high_bits
, dest
, ctx
);
549 emit_alu_K(SLLX
, dest
, 32, ctx
);
553 /* We may be able to do something quick
554 * when the constant is negated, so try that.
556 if (const64_is_2insns((~high_bits
) & 0xffffffff,
557 (~low_bits
) & 0xfffffc00)) {
558 /* NOTE: The trailing bits get XOR'd so we need the
559 * non-negated bits, not the negated ones.
561 unsigned long trailing_bits
= low_bits
& 0x3ff;
563 if ((((~high_bits
) & 0xffffffff) == 0 &&
564 ((~low_bits
) & 0x80000000) == 0) ||
565 (((~high_bits
) & 0xffffffff) == 0xffffffff &&
566 ((~low_bits
) & 0x80000000) != 0)) {
567 unsigned long fast_int
= (~low_bits
& 0xffffffff);
569 if ((is_sethi(fast_int
) &&
570 (~high_bits
& 0xffffffff) == 0)) {
571 emit(SETHI(fast_int
, dest
), ctx
);
572 } else if (is_simm13(fast_int
)) {
573 emit(OR
| IMMED
| RS1(G0
) | S13(fast_int
) | RD(dest
), ctx
);
575 emit_loadimm64(fast_int
, dest
, ctx
);
578 u64 n
= ((~low_bits
) & 0xfffffc00) |
579 (((unsigned long)((~high_bits
) & 0xffffffff))<<32);
580 emit_loadimm64(n
, dest
, ctx
);
583 low_bits
= -0x400 | trailing_bits
;
585 emit(XOR
| IMMED
| RS1(dest
) | S13(low_bits
) | RD(dest
), ctx
);
589 /* 1) sethi %hi(xxx), %reg
590 * or %reg, %lo(xxx), %reg
591 * sllx %reg, yyy, %reg
593 if ((highest_bit_set
- lowest_bit_set
) < 32) {
594 unsigned long focus_bits
=
595 create_simple_focus_bits(high_bits
, low_bits
,
598 /* So what we know is that the set bits straddle the
599 * middle of the 64-bit word.
601 sparc_emit_set_const64_quick2(focus_bits
, 0, dest
,
602 lowest_bit_set
, ctx
);
606 /* 1) sethi %hi(high_bits), %reg
607 * or %reg, %lo(high_bits), %reg
608 * sllx %reg, 32, %reg
609 * or %reg, low_bits, %reg
611 if (is_simm13(low_bits
) && ((int)low_bits
> 0)) {
612 sparc_emit_set_const64_quick2(high_bits
, low_bits
,
617 /* Oh well, we tried... Do a full 64-bit decomposition. */
618 ctx
->tmp_1_used
= true;
620 emit_loadimm32(high_bits
, tmp
, ctx
);
621 emit_loadimm32(low_bits
, dest
, ctx
);
622 emit_alu_K(SLLX
, tmp
, 32, ctx
);
623 emit(OR
| RS1(dest
) | RS2(tmp
) | RD(dest
), ctx
);
626 static void emit_branch(unsigned int br_opc
, unsigned int from_idx
, unsigned int to_idx
,
629 unsigned int off
= to_idx
- from_idx
;
632 emit(br_opc
| WDISP19(off
<< 2), ctx
);
634 emit(br_opc
| WDISP22(off
<< 2), ctx
);
637 static void emit_cbcond(unsigned int cb_opc
, unsigned int from_idx
, unsigned int to_idx
,
638 const u8 dst
, const u8 src
, struct jit_ctx
*ctx
)
640 unsigned int off
= to_idx
- from_idx
;
642 emit(cb_opc
| WDISP10(off
<< 2) | RS1(dst
) | RS2(src
), ctx
);
645 static void emit_cbcondi(unsigned int cb_opc
, unsigned int from_idx
, unsigned int to_idx
,
646 const u8 dst
, s32 imm
, struct jit_ctx
*ctx
)
648 unsigned int off
= to_idx
- from_idx
;
650 emit(cb_opc
| IMMED
| WDISP10(off
<< 2) | RS1(dst
) | S5(imm
), ctx
);
653 #define emit_read_y(REG, CTX) emit(RD_Y | RD(REG), CTX)
654 #define emit_write_y(REG, CTX) emit(WR_Y | IMMED | RS1(REG) | S13(0), CTX)
656 #define emit_cmp(R1, R2, CTX) \
657 emit(SUBCC | RS1(R1) | RS2(R2) | RD(G0), CTX)
659 #define emit_cmpi(R1, IMM, CTX) \
660 emit(SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
662 #define emit_btst(R1, R2, CTX) \
663 emit(ANDCC | RS1(R1) | RS2(R2) | RD(G0), CTX)
665 #define emit_btsti(R1, IMM, CTX) \
666 emit(ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
668 static int emit_compare_and_branch(const u8 code
, const u8 dst
, u8 src
,
669 const s32 imm
, bool is_imm
, int branch_dst
,
672 bool use_cbcond
= (sparc64_elf_hwcap
& AV_SPARC_CBCOND
) != 0;
673 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
675 branch_dst
= ctx
->offset
[branch_dst
];
677 if (!is_simm10(branch_dst
- ctx
->idx
) ||
678 BPF_OP(code
) == BPF_JSET
)
687 } else if (!is_simm13(imm
)) {
691 ctx
->tmp_1_used
= true;
692 emit_loadimm_sext(imm
, tmp
, ctx
);
701 if (BPF_OP(code
) == BPF_JSET
) {
703 emit_btsti(dst
, imm
, ctx
);
705 emit_btst(dst
, src
, ctx
);
708 emit_cmpi(dst
, imm
, ctx
);
710 emit_cmp(dst
, src
, ctx
);
712 switch (BPF_OP(code
)) {
745 /* Make sure we dont leak kernel information to the
750 emit_branch(br_opcode
, ctx
->idx
, branch_dst
, ctx
);
755 switch (BPF_OP(code
)) {
757 cbcond_opcode
= CBCONDE
;
760 cbcond_opcode
= CBCONDGU
;
763 cbcond_opcode
= CBCONDLU
;
766 cbcond_opcode
= CBCONDGEU
;
769 cbcond_opcode
= CBCONDLEU
;
772 cbcond_opcode
= CBCONDNE
;
775 cbcond_opcode
= CBCONDG
;
778 cbcond_opcode
= CBCONDL
;
781 cbcond_opcode
= CBCONDGE
;
784 cbcond_opcode
= CBCONDLE
;
787 /* Make sure we dont leak kernel information to the
792 cbcond_opcode
|= CBCOND_OP
;
794 emit_cbcondi(cbcond_opcode
, ctx
->idx
, branch_dst
,
797 emit_cbcond(cbcond_opcode
, ctx
->idx
, branch_dst
,
803 static void load_skb_regs(struct jit_ctx
*ctx
, u8 r_skb
)
805 const u8 r_headlen
= bpf2sparc
[SKB_HLEN_REG
];
806 const u8 r_data
= bpf2sparc
[SKB_DATA_REG
];
807 const u8 r_tmp
= bpf2sparc
[TMP_REG_1
];
810 off
= offsetof(struct sk_buff
, len
);
811 emit(LD32I
| RS1(r_skb
) | S13(off
) | RD(r_headlen
), ctx
);
813 off
= offsetof(struct sk_buff
, data_len
);
814 emit(LD32I
| RS1(r_skb
) | S13(off
) | RD(r_tmp
), ctx
);
816 emit(SUB
| RS1(r_headlen
) | RS2(r_tmp
) | RD(r_headlen
), ctx
);
818 off
= offsetof(struct sk_buff
, data
);
819 emit(LDPTRI
| RS1(r_skb
) | S13(off
) | RD(r_data
), ctx
);
822 /* Just skip the save instruction and the ctx register move. */
823 #define BPF_TAILCALL_PROLOGUE_SKIP 16
824 #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
826 static void build_prologue(struct jit_ctx
*ctx
)
828 s32 stack_needed
= BASE_STACKFRAME
;
830 if (ctx
->saw_frame_pointer
|| ctx
->saw_tail_call
) {
831 struct bpf_prog
*prog
= ctx
->prog
;
834 stack_depth
= prog
->aux
->stack_depth
;
835 stack_needed
+= round_up(stack_depth
, 16);
838 if (ctx
->saw_tail_call
)
841 /* save %sp, -176, %sp */
842 emit(SAVE
| IMMED
| RS1(SP
) | S13(-stack_needed
) | RD(SP
), ctx
);
844 /* tail_call_cnt = 0 */
845 if (ctx
->saw_tail_call
) {
846 u32 off
= BPF_TAILCALL_CNT_SP_OFF
;
848 emit(ST32
| IMMED
| RS1(SP
) | S13(off
) | RD(G0
), ctx
);
852 if (ctx
->saw_frame_pointer
) {
853 const u8 vfp
= bpf2sparc
[BPF_REG_FP
];
855 emit(ADD
| IMMED
| RS1(FP
) | S13(STACK_BIAS
) | RD(vfp
), ctx
);
858 emit_reg_move(I0
, O0
, ctx
);
859 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
861 if (ctx
->saw_ld_abs_ind
)
862 load_skb_regs(ctx
, bpf2sparc
[BPF_REG_1
]);
865 static void build_epilogue(struct jit_ctx
*ctx
)
867 ctx
->epilogue_offset
= ctx
->idx
;
869 /* ret (jmpl %i7 + 8, %g0) */
870 emit(JMPL
| IMMED
| RS1(I7
) | S13(8) | RD(G0
), ctx
);
872 /* restore %i5, %g0, %o0 */
873 emit(RESTORE
| RS1(bpf2sparc
[BPF_REG_0
]) | RS2(G0
) | RD(O0
), ctx
);
876 static void emit_tail_call(struct jit_ctx
*ctx
)
878 const u8 bpf_array
= bpf2sparc
[BPF_REG_2
];
879 const u8 bpf_index
= bpf2sparc
[BPF_REG_3
];
880 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
883 ctx
->saw_tail_call
= true;
885 off
= offsetof(struct bpf_array
, map
.max_entries
);
886 emit(LD32
| IMMED
| RS1(bpf_array
) | S13(off
) | RD(tmp
), ctx
);
887 emit_cmp(bpf_index
, tmp
, ctx
);
889 emit_branch(BGEU
, ctx
->idx
, ctx
->idx
+ OFFSET1
, ctx
);
892 off
= BPF_TAILCALL_CNT_SP_OFF
;
893 emit(LD32
| IMMED
| RS1(SP
) | S13(off
) | RD(tmp
), ctx
);
894 emit_cmpi(tmp
, MAX_TAIL_CALL_CNT
, ctx
);
896 emit_branch(BGU
, ctx
->idx
, ctx
->idx
+ OFFSET2
, ctx
);
899 emit_alu_K(ADD
, tmp
, 1, ctx
);
900 off
= BPF_TAILCALL_CNT_SP_OFF
;
901 emit(ST32
| IMMED
| RS1(SP
) | S13(off
) | RD(tmp
), ctx
);
903 emit_alu3_K(SLL
, bpf_index
, 3, tmp
, ctx
);
904 emit_alu(ADD
, bpf_array
, tmp
, ctx
);
905 off
= offsetof(struct bpf_array
, ptrs
);
906 emit(LD64
| IMMED
| RS1(tmp
) | S13(off
) | RD(tmp
), ctx
);
908 emit_cmpi(tmp
, 0, ctx
);
910 emit_branch(BE
, ctx
->idx
, ctx
->idx
+ OFFSET3
, ctx
);
913 off
= offsetof(struct bpf_prog
, bpf_func
);
914 emit(LD64
| IMMED
| RS1(tmp
) | S13(off
) | RD(tmp
), ctx
);
916 off
= BPF_TAILCALL_PROLOGUE_SKIP
;
917 emit(JMPL
| IMMED
| RS1(tmp
) | S13(off
) | RD(G0
), ctx
);
921 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
923 const u8 code
= insn
->code
;
924 const u8 dst
= bpf2sparc
[insn
->dst_reg
];
925 const u8 src
= bpf2sparc
[insn
->src_reg
];
926 const int i
= insn
- ctx
->prog
->insnsi
;
927 const s16 off
= insn
->off
;
928 const s32 imm
= insn
->imm
;
931 if (insn
->src_reg
== BPF_REG_FP
)
932 ctx
->saw_frame_pointer
= true;
936 case BPF_ALU
| BPF_MOV
| BPF_X
:
937 emit_alu3_K(SRL
, src
, 0, dst
, ctx
);
939 case BPF_ALU64
| BPF_MOV
| BPF_X
:
940 emit_reg_move(src
, dst
, ctx
);
942 /* dst = dst OP src */
943 case BPF_ALU
| BPF_ADD
| BPF_X
:
944 case BPF_ALU64
| BPF_ADD
| BPF_X
:
945 emit_alu(ADD
, src
, dst
, ctx
);
947 case BPF_ALU
| BPF_SUB
| BPF_X
:
948 case BPF_ALU64
| BPF_SUB
| BPF_X
:
949 emit_alu(SUB
, src
, dst
, ctx
);
951 case BPF_ALU
| BPF_AND
| BPF_X
:
952 case BPF_ALU64
| BPF_AND
| BPF_X
:
953 emit_alu(AND
, src
, dst
, ctx
);
955 case BPF_ALU
| BPF_OR
| BPF_X
:
956 case BPF_ALU64
| BPF_OR
| BPF_X
:
957 emit_alu(OR
, src
, dst
, ctx
);
959 case BPF_ALU
| BPF_XOR
| BPF_X
:
960 case BPF_ALU64
| BPF_XOR
| BPF_X
:
961 emit_alu(XOR
, src
, dst
, ctx
);
963 case BPF_ALU
| BPF_MUL
| BPF_X
:
964 emit_alu(MUL
, src
, dst
, ctx
);
966 case BPF_ALU64
| BPF_MUL
| BPF_X
:
967 emit_alu(MULX
, src
, dst
, ctx
);
969 case BPF_ALU
| BPF_DIV
| BPF_X
:
970 emit_write_y(G0
, ctx
);
971 emit_alu(DIV
, src
, dst
, ctx
);
973 case BPF_ALU64
| BPF_DIV
| BPF_X
:
974 emit_alu(UDIVX
, src
, dst
, ctx
);
976 case BPF_ALU
| BPF_MOD
| BPF_X
: {
977 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
979 ctx
->tmp_1_used
= true;
981 emit_write_y(G0
, ctx
);
982 emit_alu3(DIV
, dst
, src
, tmp
, ctx
);
983 emit_alu3(MULX
, tmp
, src
, tmp
, ctx
);
984 emit_alu3(SUB
, dst
, tmp
, dst
, ctx
);
987 case BPF_ALU64
| BPF_MOD
| BPF_X
: {
988 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
990 ctx
->tmp_1_used
= true;
992 emit_alu3(UDIVX
, dst
, src
, tmp
, ctx
);
993 emit_alu3(MULX
, tmp
, src
, tmp
, ctx
);
994 emit_alu3(SUB
, dst
, tmp
, dst
, ctx
);
997 case BPF_ALU
| BPF_LSH
| BPF_X
:
998 emit_alu(SLL
, src
, dst
, ctx
);
1000 case BPF_ALU64
| BPF_LSH
| BPF_X
:
1001 emit_alu(SLLX
, src
, dst
, ctx
);
1003 case BPF_ALU
| BPF_RSH
| BPF_X
:
1004 emit_alu(SRL
, src
, dst
, ctx
);
1006 case BPF_ALU64
| BPF_RSH
| BPF_X
:
1007 emit_alu(SRLX
, src
, dst
, ctx
);
1009 case BPF_ALU
| BPF_ARSH
| BPF_X
:
1010 emit_alu(SRA
, src
, dst
, ctx
);
1011 goto do_alu32_trunc
;
1012 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
1013 emit_alu(SRAX
, src
, dst
, ctx
);
1017 case BPF_ALU
| BPF_NEG
:
1018 case BPF_ALU64
| BPF_NEG
:
1019 emit(SUB
| RS1(0) | RS2(dst
) | RD(dst
), ctx
);
1020 goto do_alu32_trunc
;
1022 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1025 emit_alu_K(SLL
, dst
, 16, ctx
);
1026 emit_alu_K(SRL
, dst
, 16, ctx
);
1029 emit_alu_K(SRL
, dst
, 0, ctx
);
1038 /* dst = BSWAP##imm(dst) */
1039 case BPF_ALU
| BPF_END
| BPF_FROM_LE
: {
1040 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1041 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1043 ctx
->tmp_1_used
= true;
1046 emit_alu3_K(AND
, dst
, 0xff, tmp
, ctx
);
1047 emit_alu3_K(SRL
, dst
, 8, dst
, ctx
);
1048 emit_alu3_K(AND
, dst
, 0xff, dst
, ctx
);
1049 emit_alu3_K(SLL
, tmp
, 8, tmp
, ctx
);
1050 emit_alu(OR
, tmp
, dst
, ctx
);
1054 ctx
->tmp_2_used
= true;
1055 emit_alu3_K(SRL
, dst
, 24, tmp
, ctx
); /* tmp = dst >> 24 */
1056 emit_alu3_K(SRL
, dst
, 16, tmp2
, ctx
); /* tmp2 = dst >> 16 */
1057 emit_alu3_K(AND
, tmp2
, 0xff, tmp2
, ctx
);/* tmp2 = tmp2 & 0xff */
1058 emit_alu3_K(SLL
, tmp2
, 8, tmp2
, ctx
); /* tmp2 = tmp2 << 8 */
1059 emit_alu(OR
, tmp2
, tmp
, ctx
); /* tmp = tmp | tmp2 */
1060 emit_alu3_K(SRL
, dst
, 8, tmp2
, ctx
); /* tmp2 = dst >> 8 */
1061 emit_alu3_K(AND
, tmp2
, 0xff, tmp2
, ctx
);/* tmp2 = tmp2 & 0xff */
1062 emit_alu3_K(SLL
, tmp2
, 16, tmp2
, ctx
); /* tmp2 = tmp2 << 16 */
1063 emit_alu(OR
, tmp2
, tmp
, ctx
); /* tmp = tmp | tmp2 */
1064 emit_alu3_K(AND
, dst
, 0xff, dst
, ctx
); /* dst = dst & 0xff */
1065 emit_alu3_K(SLL
, dst
, 24, dst
, ctx
); /* dst = dst << 24 */
1066 emit_alu(OR
, tmp
, dst
, ctx
); /* dst = dst | tmp */
1070 emit_alu3_K(ADD
, SP
, STACK_BIAS
+ 128, tmp
, ctx
);
1071 emit(ST64
| RS1(tmp
) | RS2(G0
) | RD(dst
), ctx
);
1072 emit(LD64A
| ASI(ASI_PL
) | RS1(tmp
) | RS2(G0
) | RD(dst
), ctx
);
1078 case BPF_ALU
| BPF_MOV
| BPF_K
:
1079 emit_loadimm32(imm
, dst
, ctx
);
1081 case BPF_ALU64
| BPF_MOV
| BPF_K
:
1082 emit_loadimm_sext(imm
, dst
, ctx
);
1084 /* dst = dst OP imm */
1085 case BPF_ALU
| BPF_ADD
| BPF_K
:
1086 case BPF_ALU64
| BPF_ADD
| BPF_K
:
1087 emit_alu_K(ADD
, dst
, imm
, ctx
);
1088 goto do_alu32_trunc
;
1089 case BPF_ALU
| BPF_SUB
| BPF_K
:
1090 case BPF_ALU64
| BPF_SUB
| BPF_K
:
1091 emit_alu_K(SUB
, dst
, imm
, ctx
);
1092 goto do_alu32_trunc
;
1093 case BPF_ALU
| BPF_AND
| BPF_K
:
1094 case BPF_ALU64
| BPF_AND
| BPF_K
:
1095 emit_alu_K(AND
, dst
, imm
, ctx
);
1096 goto do_alu32_trunc
;
1097 case BPF_ALU
| BPF_OR
| BPF_K
:
1098 case BPF_ALU64
| BPF_OR
| BPF_K
:
1099 emit_alu_K(OR
, dst
, imm
, ctx
);
1100 goto do_alu32_trunc
;
1101 case BPF_ALU
| BPF_XOR
| BPF_K
:
1102 case BPF_ALU64
| BPF_XOR
| BPF_K
:
1103 emit_alu_K(XOR
, dst
, imm
, ctx
);
1104 goto do_alu32_trunc
;
1105 case BPF_ALU
| BPF_MUL
| BPF_K
:
1106 emit_alu_K(MUL
, dst
, imm
, ctx
);
1107 goto do_alu32_trunc
;
1108 case BPF_ALU64
| BPF_MUL
| BPF_K
:
1109 emit_alu_K(MULX
, dst
, imm
, ctx
);
1111 case BPF_ALU
| BPF_DIV
| BPF_K
:
1115 emit_write_y(G0
, ctx
);
1116 emit_alu_K(DIV
, dst
, imm
, ctx
);
1117 goto do_alu32_trunc
;
1118 case BPF_ALU64
| BPF_DIV
| BPF_K
:
1122 emit_alu_K(UDIVX
, dst
, imm
, ctx
);
1124 case BPF_ALU64
| BPF_MOD
| BPF_K
:
1125 case BPF_ALU
| BPF_MOD
| BPF_K
: {
1126 const u8 tmp
= bpf2sparc
[TMP_REG_2
];
1132 div
= (BPF_CLASS(code
) == BPF_ALU64
) ? UDIVX
: DIV
;
1134 ctx
->tmp_2_used
= true;
1136 if (BPF_CLASS(code
) != BPF_ALU64
)
1137 emit_write_y(G0
, ctx
);
1138 if (is_simm13(imm
)) {
1139 emit(div
| IMMED
| RS1(dst
) | S13(imm
) | RD(tmp
), ctx
);
1140 emit(MULX
| IMMED
| RS1(tmp
) | S13(imm
) | RD(tmp
), ctx
);
1141 emit(SUB
| RS1(dst
) | RS2(tmp
) | RD(dst
), ctx
);
1143 const u8 tmp1
= bpf2sparc
[TMP_REG_1
];
1145 ctx
->tmp_1_used
= true;
1147 emit_set_const_sext(imm
, tmp1
, ctx
);
1148 emit(div
| RS1(dst
) | RS2(tmp1
) | RD(tmp
), ctx
);
1149 emit(MULX
| RS1(tmp
) | RS2(tmp1
) | RD(tmp
), ctx
);
1150 emit(SUB
| RS1(dst
) | RS2(tmp
) | RD(dst
), ctx
);
1152 goto do_alu32_trunc
;
1154 case BPF_ALU
| BPF_LSH
| BPF_K
:
1155 emit_alu_K(SLL
, dst
, imm
, ctx
);
1156 goto do_alu32_trunc
;
1157 case BPF_ALU64
| BPF_LSH
| BPF_K
:
1158 emit_alu_K(SLLX
, dst
, imm
, ctx
);
1160 case BPF_ALU
| BPF_RSH
| BPF_K
:
1161 emit_alu_K(SRL
, dst
, imm
, ctx
);
1163 case BPF_ALU64
| BPF_RSH
| BPF_K
:
1164 emit_alu_K(SRLX
, dst
, imm
, ctx
);
1166 case BPF_ALU
| BPF_ARSH
| BPF_K
:
1167 emit_alu_K(SRA
, dst
, imm
, ctx
);
1168 goto do_alu32_trunc
;
1169 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
1170 emit_alu_K(SRAX
, dst
, imm
, ctx
);
1174 if (BPF_CLASS(code
) == BPF_ALU
)
1175 emit_alu_K(SRL
, dst
, 0, ctx
);
1179 case BPF_JMP
| BPF_JA
:
1180 emit_branch(BA
, ctx
->idx
, ctx
->offset
[i
+ off
], ctx
);
1183 /* IF (dst COND src) JUMP off */
1184 case BPF_JMP
| BPF_JEQ
| BPF_X
:
1185 case BPF_JMP
| BPF_JGT
| BPF_X
:
1186 case BPF_JMP
| BPF_JLT
| BPF_X
:
1187 case BPF_JMP
| BPF_JGE
| BPF_X
:
1188 case BPF_JMP
| BPF_JLE
| BPF_X
:
1189 case BPF_JMP
| BPF_JNE
| BPF_X
:
1190 case BPF_JMP
| BPF_JSGT
| BPF_X
:
1191 case BPF_JMP
| BPF_JSLT
| BPF_X
:
1192 case BPF_JMP
| BPF_JSGE
| BPF_X
:
1193 case BPF_JMP
| BPF_JSLE
| BPF_X
:
1194 case BPF_JMP
| BPF_JSET
| BPF_X
: {
1197 err
= emit_compare_and_branch(code
, dst
, src
, 0, false, i
+ off
, ctx
);
1202 /* IF (dst COND imm) JUMP off */
1203 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1204 case BPF_JMP
| BPF_JGT
| BPF_K
:
1205 case BPF_JMP
| BPF_JLT
| BPF_K
:
1206 case BPF_JMP
| BPF_JGE
| BPF_K
:
1207 case BPF_JMP
| BPF_JLE
| BPF_K
:
1208 case BPF_JMP
| BPF_JNE
| BPF_K
:
1209 case BPF_JMP
| BPF_JSGT
| BPF_K
:
1210 case BPF_JMP
| BPF_JSLT
| BPF_K
:
1211 case BPF_JMP
| BPF_JSGE
| BPF_K
:
1212 case BPF_JMP
| BPF_JSLE
| BPF_K
:
1213 case BPF_JMP
| BPF_JSET
| BPF_K
: {
1216 err
= emit_compare_and_branch(code
, dst
, 0, imm
, true, i
+ off
, ctx
);
1223 case BPF_JMP
| BPF_CALL
:
1225 u8
*func
= ((u8
*)__bpf_call_base
) + imm
;
1227 ctx
->saw_call
= true;
1228 if (ctx
->saw_ld_abs_ind
&& bpf_helper_changes_pkt_data(func
))
1229 emit_reg_move(bpf2sparc
[BPF_REG_1
], L7
, ctx
);
1231 emit_call((u32
*)func
, ctx
);
1234 emit_reg_move(O0
, bpf2sparc
[BPF_REG_0
], ctx
);
1236 if (ctx
->saw_ld_abs_ind
&& bpf_helper_changes_pkt_data(func
))
1237 load_skb_regs(ctx
, L7
);
1242 case BPF_JMP
| BPF_TAIL_CALL
:
1243 emit_tail_call(ctx
);
1246 /* function return */
1247 case BPF_JMP
| BPF_EXIT
:
1248 /* Optimization: when last instruction is EXIT,
1249 simply fallthrough to epilogue. */
1250 if (i
== ctx
->prog
->len
- 1)
1252 emit_branch(BA
, ctx
->idx
, ctx
->epilogue_offset
, ctx
);
1257 case BPF_LD
| BPF_IMM
| BPF_DW
:
1259 const struct bpf_insn insn1
= insn
[1];
1262 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
1263 emit_loadimm64(imm64
, dst
, ctx
);
1268 /* LDX: dst = *(size *)(src + off) */
1269 case BPF_LDX
| BPF_MEM
| BPF_W
:
1270 case BPF_LDX
| BPF_MEM
| BPF_H
:
1271 case BPF_LDX
| BPF_MEM
| BPF_B
:
1272 case BPF_LDX
| BPF_MEM
| BPF_DW
: {
1273 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1274 u32 opcode
= 0, rs2
;
1276 ctx
->tmp_1_used
= true;
1277 switch (BPF_SIZE(code
)) {
1292 if (is_simm13(off
)) {
1296 emit_loadimm(off
, tmp
, ctx
);
1299 emit(opcode
| RS1(src
) | rs2
| RD(dst
), ctx
);
1302 /* ST: *(size *)(dst + off) = imm */
1303 case BPF_ST
| BPF_MEM
| BPF_W
:
1304 case BPF_ST
| BPF_MEM
| BPF_H
:
1305 case BPF_ST
| BPF_MEM
| BPF_B
:
1306 case BPF_ST
| BPF_MEM
| BPF_DW
: {
1307 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1308 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1309 u32 opcode
= 0, rs2
;
1311 ctx
->tmp_2_used
= true;
1312 emit_loadimm(imm
, tmp2
, ctx
);
1314 switch (BPF_SIZE(code
)) {
1329 if (is_simm13(off
)) {
1333 ctx
->tmp_1_used
= true;
1334 emit_loadimm(off
, tmp
, ctx
);
1337 emit(opcode
| RS1(dst
) | rs2
| RD(tmp2
), ctx
);
1341 /* STX: *(size *)(dst + off) = src */
1342 case BPF_STX
| BPF_MEM
| BPF_W
:
1343 case BPF_STX
| BPF_MEM
| BPF_H
:
1344 case BPF_STX
| BPF_MEM
| BPF_B
:
1345 case BPF_STX
| BPF_MEM
| BPF_DW
: {
1346 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1347 u32 opcode
= 0, rs2
;
1349 switch (BPF_SIZE(code
)) {
1363 if (is_simm13(off
)) {
1367 ctx
->tmp_1_used
= true;
1368 emit_loadimm(off
, tmp
, ctx
);
1371 emit(opcode
| RS1(dst
) | rs2
| RD(src
), ctx
);
1375 /* STX XADD: lock *(u32 *)(dst + off) += src */
1376 case BPF_STX
| BPF_XADD
| BPF_W
: {
1377 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1378 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1379 const u8 tmp3
= bpf2sparc
[TMP_REG_3
];
1381 ctx
->tmp_1_used
= true;
1382 ctx
->tmp_2_used
= true;
1383 ctx
->tmp_3_used
= true;
1384 emit_loadimm(off
, tmp
, ctx
);
1385 emit_alu3(ADD
, dst
, tmp
, tmp
, ctx
);
1387 emit(LD32
| RS1(tmp
) | RS2(G0
) | RD(tmp2
), ctx
);
1388 emit_alu3(ADD
, tmp2
, src
, tmp3
, ctx
);
1389 emit(CAS
| ASI(ASI_P
) | RS1(tmp
) | RS2(tmp2
) | RD(tmp3
), ctx
);
1390 emit_cmp(tmp2
, tmp3
, ctx
);
1391 emit_branch(BNE
, 4, 0, ctx
);
1395 /* STX XADD: lock *(u64 *)(dst + off) += src */
1396 case BPF_STX
| BPF_XADD
| BPF_DW
: {
1397 const u8 tmp
= bpf2sparc
[TMP_REG_1
];
1398 const u8 tmp2
= bpf2sparc
[TMP_REG_2
];
1399 const u8 tmp3
= bpf2sparc
[TMP_REG_3
];
1401 ctx
->tmp_1_used
= true;
1402 ctx
->tmp_2_used
= true;
1403 ctx
->tmp_3_used
= true;
1404 emit_loadimm(off
, tmp
, ctx
);
1405 emit_alu3(ADD
, dst
, tmp
, tmp
, ctx
);
1407 emit(LD64
| RS1(tmp
) | RS2(G0
) | RD(tmp2
), ctx
);
1408 emit_alu3(ADD
, tmp2
, src
, tmp3
, ctx
);
1409 emit(CASX
| ASI(ASI_P
) | RS1(tmp
) | RS2(tmp2
) | RD(tmp3
), ctx
);
1410 emit_cmp(tmp2
, tmp3
, ctx
);
1411 emit_branch(BNE
, 4, 0, ctx
);
1415 #define CHOOSE_LOAD_FUNC(K, func) \
1416 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
1418 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1419 case BPF_LD
| BPF_ABS
| BPF_W
:
1420 func
= CHOOSE_LOAD_FUNC(imm
, bpf_jit_load_word
);
1422 case BPF_LD
| BPF_ABS
| BPF_H
:
1423 func
= CHOOSE_LOAD_FUNC(imm
, bpf_jit_load_half
);
1425 case BPF_LD
| BPF_ABS
| BPF_B
:
1426 func
= CHOOSE_LOAD_FUNC(imm
, bpf_jit_load_byte
);
1428 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
1429 case BPF_LD
| BPF_IND
| BPF_W
:
1430 func
= bpf_jit_load_word
;
1432 case BPF_LD
| BPF_IND
| BPF_H
:
1433 func
= bpf_jit_load_half
;
1436 case BPF_LD
| BPF_IND
| BPF_B
:
1437 func
= bpf_jit_load_byte
;
1439 ctx
->saw_ld_abs_ind
= true;
1441 emit_reg_move(bpf2sparc
[BPF_REG_6
], O0
, ctx
);
1442 emit_loadimm(imm
, O1
, ctx
);
1444 if (BPF_MODE(code
) == BPF_IND
)
1445 emit_alu(ADD
, src
, O1
, ctx
);
1447 emit_call(func
, ctx
);
1448 emit_alu_K(SRA
, O1
, 0, ctx
);
1450 emit_reg_move(O0
, bpf2sparc
[BPF_REG_0
], ctx
);
1454 pr_err_once("unknown opcode %02x\n", code
);
1461 static int build_body(struct jit_ctx
*ctx
)
1463 const struct bpf_prog
*prog
= ctx
->prog
;
1466 for (i
= 0; i
< prog
->len
; i
++) {
1467 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
1470 ret
= build_insn(insn
, ctx
);
1474 ctx
->offset
[i
] = ctx
->idx
;
1477 ctx
->offset
[i
] = ctx
->idx
;
1484 static void jit_fill_hole(void *area
, unsigned int size
)
1487 /* We are guaranteed to have aligned memory. */
1488 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1489 *ptr
++ = 0x91d02005; /* ta 5 */
1492 struct sparc64_jit_data
{
1493 struct bpf_binary_header
*header
;
1498 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1500 struct bpf_prog
*tmp
, *orig_prog
= prog
;
1501 struct sparc64_jit_data
*jit_data
;
1502 struct bpf_binary_header
*header
;
1503 bool tmp_blinded
= false;
1504 bool extra_pass
= false;
1510 if (!prog
->jit_requested
)
1513 tmp
= bpf_jit_blind_constants(prog
);
1514 /* If blinding was requested and we failed during blinding,
1515 * we must fall back to the interpreter.
1524 jit_data
= prog
->aux
->jit_data
;
1526 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
1531 prog
->aux
->jit_data
= jit_data
;
1533 if (jit_data
->ctx
.offset
) {
1534 ctx
= jit_data
->ctx
;
1535 image_ptr
= jit_data
->image
;
1536 header
= jit_data
->header
;
1538 image_size
= sizeof(u32
) * ctx
.idx
;
1542 memset(&ctx
, 0, sizeof(ctx
));
1545 ctx
.offset
= kcalloc(prog
->len
, sizeof(unsigned int), GFP_KERNEL
);
1546 if (ctx
.offset
== NULL
) {
1551 /* Fake pass to detect features used, and get an accurate assessment
1552 * of what the final image size will be.
1554 if (build_body(&ctx
)) {
1558 build_prologue(&ctx
);
1559 build_epilogue(&ctx
);
1561 /* Now we know the actual image size. */
1562 image_size
= sizeof(u32
) * ctx
.idx
;
1563 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1564 sizeof(u32
), jit_fill_hole
);
1565 if (header
== NULL
) {
1570 ctx
.image
= (u32
*)image_ptr
;
1572 for (pass
= 1; pass
< 3; pass
++) {
1575 build_prologue(&ctx
);
1577 if (build_body(&ctx
)) {
1578 bpf_jit_binary_free(header
);
1583 build_epilogue(&ctx
);
1585 if (bpf_jit_enable
> 1)
1586 pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c%c]\n", pass
,
1587 image_size
- (ctx
.idx
* 4),
1588 ctx
.tmp_1_used
? '1' : ' ',
1589 ctx
.tmp_2_used
? '2' : ' ',
1590 ctx
.tmp_3_used
? '3' : ' ',
1591 ctx
.saw_ld_abs_ind
? 'L' : ' ',
1592 ctx
.saw_frame_pointer
? 'F' : ' ',
1593 ctx
.saw_call
? 'C' : ' ',
1594 ctx
.saw_tail_call
? 'T' : ' ');
1597 if (bpf_jit_enable
> 1)
1598 bpf_jit_dump(prog
->len
, image_size
, pass
, ctx
.image
);
1600 bpf_flush_icache(header
, (u8
*)header
+ (header
->pages
* PAGE_SIZE
));
1602 if (!prog
->is_func
|| extra_pass
) {
1603 bpf_jit_binary_lock_ro(header
);
1605 jit_data
->ctx
= ctx
;
1606 jit_data
->image
= image_ptr
;
1607 jit_data
->header
= header
;
1610 prog
->bpf_func
= (void *)ctx
.image
;
1612 prog
->jited_len
= image_size
;
1614 if (!prog
->is_func
|| extra_pass
) {
1618 prog
->aux
->jit_data
= NULL
;
1622 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?