2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
5 * Copyright (C) 2015 Imagination Technologies Ltd.
6 * Author: Markos Chandras <markos.chandras@imgtec.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License.
14 #include <asm/regdef.h>
19 * r_skb_hl skb header length
21 * r_off(a1) offset register
27 * r_s0 Scratch register 0
28 * r_s1 Scratch register 1
32 * a1: offset (imm or imm + X)
34 * All non-BPF-ABI registers are free for use. On return, we only
35 * care about r_ret. The BPF-ABI registers are assumed to remain
36 * unmodified during the entire filter operation.
41 #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
43 /* We know better :) so prevent assembler reordering etc */
46 #define is_offset_negative(TYPE) \
47 /* If offset is negative we have more work to do */ \
49 bgtz t0, bpf_slow_path_##TYPE##_neg; \
50 /* Be careful what follows in DS. */
52 #define is_offset_in_header(SIZE, TYPE) \
53 /* Reading from header? */ \
54 addiu $r_s0, $r_skb_hl, -SIZE; \
55 slt t0, $r_s0, offset; \
56 bgtz t0, bpf_slow_path_##TYPE; \
59 is_offset_negative(word)
60 .globl sk_load_word_positive
61 sk_load_word_positive:
62 is_offset_in_header(4, word)
63 /* Offset within header boundaries */
64 PTR_ADDU t1, $r_skb_data, offset
66 #ifdef CONFIG_CPU_LITTLE_ENDIAN
75 is_offset_negative(half)
76 .globl sk_load_half_positive
77 sk_load_half_positive:
78 is_offset_in_header(2, half)
79 /* Offset within header boundaries */
80 PTR_ADDU t1, $r_skb_data, offset
82 #ifdef CONFIG_CPU_LITTLE_ENDIAN
91 is_offset_negative(byte)
92 .globl sk_load_byte_positive
93 sk_load_byte_positive:
94 is_offset_in_header(1, byte)
95 /* Offset within header boundaries */
96 PTR_ADDU t1, $r_skb_data, offset
103 * call skb_copy_bits:
104 * (prototype in linux/skbuff.h)
106 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
108 * o32 mandates we leave 4 spaces for argument registers in case
109 * the callee needs to use them. Even though we don't care about
110 * the argument registers ourselves, we need to allocate that space
111 * to remain ABI compliant since the callee may want to use that space.
112 * We also allocate 2 more spaces for $r_ra and our return register (*to).
114 * n64 is a bit different. The *caller* will allocate the space to preserve
115 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
116 * good reason but it does not matter that much really.
118 * (void *to) is returned in r_s0
121 #define bpf_slow_path_common(SIZE) \
122 /* Quick check. Are we within reasonable boundaries? */ \
123 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
124 sltu $r_s0, offset, $r_s1; \
126 /* Load 4th argument in DS */ \
127 LONG_ADDIU a3, zero, SIZE; \
128 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
129 PTR_LA t0, skb_copy_bits; \
130 PTR_S $r_ra, (5 * SZREG)($r_sp); \
131 /* Assign low slot to a2 */ \
134 /* Reset our destination slot (DS but it's ok) */ \
135 INT_S zero, (4 * SZREG)($r_sp); \
137 * skb_copy_bits returns 0 on success and -EFAULT \
138 * on error. Our data live in a2. Do not bother with \
139 * our data if an error has been returned. \
141 /* Restore our frame */ \
142 PTR_L $r_ra, (5 * SZREG)($r_sp); \
143 INT_L $r_s0, (4 * SZREG)($r_sp); \
145 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
148 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
149 bpf_slow_path_common(4)
150 #ifdef CONFIG_CPU_LITTLE_ENDIAN
158 END(bpf_slow_path_word)
160 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
161 bpf_slow_path_common(2)
162 #ifdef CONFIG_CPU_LITTLE_ENDIAN
169 END(bpf_slow_path_half)
171 NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
172 bpf_slow_path_common(1)
176 END(bpf_slow_path_byte)
179 * Negative entry points
181 .macro bpf_is_end_of_data
183 /* Reading link layer data? */
186 /* Be careful what follows in DS. */
189 * call skb_copy_bits:
190 * (prototype in linux/filter.h)
192 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
193 * int k, unsigned int size)
195 * see above (bpf_slow_path_common) for ABI restrictions
197 #define bpf_negative_common(SIZE) \
198 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
199 PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
200 PTR_S $r_ra, (5 * SZREG)($r_sp); \
203 PTR_L $r_ra, (5 * SZREG)($r_sp); \
204 /* Check return pointer */ \
206 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
207 /* Preserve our pointer */ \
209 /* Set return value */ \
212 bpf_slow_path_word_neg:
214 NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
215 bpf_negative_common(4)
218 END(sk_load_word_negative)
220 bpf_slow_path_half_neg:
222 NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
223 bpf_negative_common(2)
226 END(sk_load_half_negative)
228 bpf_slow_path_byte_neg:
230 NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
231 bpf_negative_common(1)
234 END(sk_load_byte_negative)
238 addiu $r_ret, zero, 1