1 #include <asm/ptrace.h>
7 #define SCRATCH_OFF STACK_BIAS + 128
8 #define BE_PTR(label) be,pn %xcc, label
11 #define SCRATCH_OFF 72
12 #define BE_PTR(label) be label
15 #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
18 .globl bpf_jit_load_word
21 bl bpf_slow_path_word_neg
23 .globl bpf_jit_load_word_positive_offset
24 bpf_jit_load_word_positive_offset:
25 sub r_HEADLEN, r_OFF, r_TMP
27 ble bpf_slow_path_word
28 add r_SKB_DATA, r_OFF, r_TMP
30 bne load_word_unaligned
35 ldub [r_TMP + 0x0], r_OFF
36 ldub [r_TMP + 0x1], r_TMP2
38 or r_OFF, r_TMP2, r_OFF
39 ldub [r_TMP + 0x2], r_TMP2
41 or r_OFF, r_TMP2, r_OFF
42 ldub [r_TMP + 0x3], r_TMP2
47 .globl bpf_jit_load_half
50 bl bpf_slow_path_half_neg
52 .globl bpf_jit_load_half_positive_offset
53 bpf_jit_load_half_positive_offset:
54 sub r_HEADLEN, r_OFF, r_TMP
56 ble bpf_slow_path_half
57 add r_SKB_DATA, r_OFF, r_TMP
59 bne load_half_unaligned
64 ldub [r_TMP + 0x0], r_OFF
65 ldub [r_TMP + 0x1], r_TMP2
70 .globl bpf_jit_load_byte
73 bl bpf_slow_path_byte_neg
75 .globl bpf_jit_load_byte_positive_offset
76 bpf_jit_load_byte_positive_offset:
78 bge bpf_slow_path_byte
81 ldub [r_SKB_DATA + r_OFF], r_A
83 .globl bpf_jit_load_byte_msh
84 bpf_jit_load_byte_msh:
86 bl bpf_slow_path_byte_msh_neg
88 .globl bpf_jit_load_byte_msh_positive_offset
89 bpf_jit_load_byte_msh_positive_offset:
91 bge bpf_slow_path_byte_msh
93 ldub [r_SKB_DATA + r_OFF], r_OFF
98 #define bpf_slow_path_common(LEN) \
99 save %sp, -SAVE_SZ, %sp; \
102 add %fp, SCRATCH_OFF, %o2; \
103 call skb_copy_bits; \
109 bpf_slow_path_common(4)
111 ld [%sp + SCRATCH_OFF], r_A
115 bpf_slow_path_common(2)
117 lduh [%sp + SCRATCH_OFF], r_A
121 bpf_slow_path_common(1)
123 ldub [%sp + SCRATCH_OFF], r_A
126 bpf_slow_path_byte_msh:
127 bpf_slow_path_common(1)
129 ldub [%sp + SCRATCH_OFF], r_A
130 and r_OFF, 0xf, r_OFF
134 #define bpf_negative_common(LEN) \
135 save %sp, -SAVE_SZ, %sp; \
138 call bpf_internal_load_pointer_neg_helper; \
145 bpf_slow_path_word_neg:
146 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
150 .globl bpf_jit_load_word_negative_offset
151 bpf_jit_load_word_negative_offset:
152 bpf_negative_common(4)
154 bne load_word_unaligned
159 bpf_slow_path_half_neg:
160 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
164 .globl bpf_jit_load_half_negative_offset
165 bpf_jit_load_half_negative_offset:
166 bpf_negative_common(2)
168 bne load_half_unaligned
173 bpf_slow_path_byte_neg:
174 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
178 .globl bpf_jit_load_byte_negative_offset
179 bpf_jit_load_byte_negative_offset:
180 bpf_negative_common(1)
184 bpf_slow_path_byte_msh_neg:
185 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
189 .globl bpf_jit_load_byte_msh_negative_offset
190 bpf_jit_load_byte_msh_negative_offset:
191 bpf_negative_common(1)
193 and r_OFF, 0xf, r_OFF
198 /* Make the JIT program return zero. The JIT epilogue
199 * stores away the original %o7 into r_saved_O7. The
200 * normal leaf function return is to use "retl" which
201 * would evalute to "jmpl %o7 + 8, %g0" but we want to
202 * use the saved value thus the sequence you see here.
204 jmpl r_saved_O7 + 8, %g0