mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / sparc / net / bpf_jit_asm.S
blob8c83f4b8eb15a7c314c10f9b44963b2220672d25
1 #include <asm/ptrace.h>
3 #include "bpf_jit.h"
5 #ifdef CONFIG_SPARC64
6 #define SAVE_SZ         176
7 #define SCRATCH_OFF     STACK_BIAS + 128
8 #define BE_PTR(label)   be,pn %xcc, label
9 #define SIGN_EXTEND(reg)        sra reg, 0, reg
10 #else
11 #define SAVE_SZ         96
12 #define SCRATCH_OFF     72
13 #define BE_PTR(label)   be label
14 #define SIGN_EXTEND(reg)
15 #endif
17 #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
19         .text
20         .globl  bpf_jit_load_word
21 bpf_jit_load_word:
22         cmp     r_OFF, 0
23         bl      bpf_slow_path_word_neg
24          nop
25         .globl  bpf_jit_load_word_positive_offset
26 bpf_jit_load_word_positive_offset:
27         sub     r_HEADLEN, r_OFF, r_TMP
28         cmp     r_TMP, 3
29         ble     bpf_slow_path_word
30          add    r_SKB_DATA, r_OFF, r_TMP
31         andcc   r_TMP, 3, %g0
32         bne     load_word_unaligned
33          nop
34         retl
35          ld     [r_TMP], r_A
36 load_word_unaligned:
37         ldub    [r_TMP + 0x0], r_OFF
38         ldub    [r_TMP + 0x1], r_TMP2
39         sll     r_OFF, 8, r_OFF
40         or      r_OFF, r_TMP2, r_OFF
41         ldub    [r_TMP + 0x2], r_TMP2
42         sll     r_OFF, 8, r_OFF
43         or      r_OFF, r_TMP2, r_OFF
44         ldub    [r_TMP + 0x3], r_TMP2
45         sll     r_OFF, 8, r_OFF
46         retl
47          or     r_OFF, r_TMP2, r_A
49         .globl  bpf_jit_load_half
50 bpf_jit_load_half:
51         cmp     r_OFF, 0
52         bl      bpf_slow_path_half_neg
53          nop
54         .globl  bpf_jit_load_half_positive_offset
55 bpf_jit_load_half_positive_offset:
56         sub     r_HEADLEN, r_OFF, r_TMP
57         cmp     r_TMP, 1
58         ble     bpf_slow_path_half
59          add    r_SKB_DATA, r_OFF, r_TMP
60         andcc   r_TMP, 1, %g0
61         bne     load_half_unaligned
62          nop
63         retl
64          lduh   [r_TMP], r_A
65 load_half_unaligned:
66         ldub    [r_TMP + 0x0], r_OFF
67         ldub    [r_TMP + 0x1], r_TMP2
68         sll     r_OFF, 8, r_OFF
69         retl
70          or     r_OFF, r_TMP2, r_A
72         .globl  bpf_jit_load_byte
73 bpf_jit_load_byte:
74         cmp     r_OFF, 0
75         bl      bpf_slow_path_byte_neg
76          nop
77         .globl  bpf_jit_load_byte_positive_offset
78 bpf_jit_load_byte_positive_offset:
79         cmp     r_OFF, r_HEADLEN
80         bge     bpf_slow_path_byte
81          nop
82         retl
83          ldub   [r_SKB_DATA + r_OFF], r_A
85         .globl  bpf_jit_load_byte_msh
86 bpf_jit_load_byte_msh:
87         cmp     r_OFF, 0
88         bl      bpf_slow_path_byte_msh_neg
89          nop
90         .globl  bpf_jit_load_byte_msh_positive_offset
91 bpf_jit_load_byte_msh_positive_offset:
92         cmp     r_OFF, r_HEADLEN
93         bge     bpf_slow_path_byte_msh
94          nop
95         ldub    [r_SKB_DATA + r_OFF], r_OFF
96         and     r_OFF, 0xf, r_OFF
97         retl
98          sll    r_OFF, 2, r_X
100 #define bpf_slow_path_common(LEN)       \
101         save    %sp, -SAVE_SZ, %sp;     \
102         mov     %i0, %o0;               \
103         mov     r_OFF, %o1;             \
104         add     %fp, SCRATCH_OFF, %o2;  \
105         call    skb_copy_bits;          \
106          mov    (LEN), %o3;             \
107         cmp     %o0, 0;                 \
108         restore;
110 bpf_slow_path_word:
111         bpf_slow_path_common(4)
112         bl      bpf_error
113          ld     [%sp + SCRATCH_OFF], r_A
114         retl
115          nop
116 bpf_slow_path_half:
117         bpf_slow_path_common(2)
118         bl      bpf_error
119          lduh   [%sp + SCRATCH_OFF], r_A
120         retl
121          nop
122 bpf_slow_path_byte:
123         bpf_slow_path_common(1)
124         bl      bpf_error
125          ldub   [%sp + SCRATCH_OFF], r_A
126         retl
127          nop
128 bpf_slow_path_byte_msh:
129         bpf_slow_path_common(1)
130         bl      bpf_error
131          ldub   [%sp + SCRATCH_OFF], r_A
132         and     r_OFF, 0xf, r_OFF
133         retl
134          sll    r_OFF, 2, r_X
136 #define bpf_negative_common(LEN)                        \
137         save    %sp, -SAVE_SZ, %sp;                     \
138         mov     %i0, %o0;                               \
139         mov     r_OFF, %o1;                             \
140         SIGN_EXTEND(%o1);                               \
141         call    bpf_internal_load_pointer_neg_helper;   \
142          mov    (LEN), %o2;                             \
143         mov     %o0, r_TMP;                             \
144         cmp     %o0, 0;                                 \
145         BE_PTR(bpf_error);                              \
146          restore;
148 bpf_slow_path_word_neg:
149         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
150         cmp     r_OFF, r_TMP
151         bl      bpf_error
152          nop
153         .globl  bpf_jit_load_word_negative_offset
154 bpf_jit_load_word_negative_offset:
155         bpf_negative_common(4)
156         andcc   r_TMP, 3, %g0
157         bne     load_word_unaligned
158          nop
159         retl
160          ld     [r_TMP], r_A
162 bpf_slow_path_half_neg:
163         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
164         cmp     r_OFF, r_TMP
165         bl      bpf_error
166          nop
167         .globl  bpf_jit_load_half_negative_offset
168 bpf_jit_load_half_negative_offset:
169         bpf_negative_common(2)
170         andcc   r_TMP, 1, %g0
171         bne     load_half_unaligned
172          nop
173         retl
174          lduh   [r_TMP], r_A
176 bpf_slow_path_byte_neg:
177         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
178         cmp     r_OFF, r_TMP
179         bl      bpf_error
180          nop
181         .globl  bpf_jit_load_byte_negative_offset
182 bpf_jit_load_byte_negative_offset:
183         bpf_negative_common(1)
184         retl
185          ldub   [r_TMP], r_A
187 bpf_slow_path_byte_msh_neg:
188         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
189         cmp     r_OFF, r_TMP
190         bl      bpf_error
191          nop
192         .globl  bpf_jit_load_byte_msh_negative_offset
193 bpf_jit_load_byte_msh_negative_offset:
194         bpf_negative_common(1)
195         ldub    [r_TMP], r_OFF
196         and     r_OFF, 0xf, r_OFF
197         retl
198          sll    r_OFF, 2, r_X
200 bpf_error:
201         /* Make the JIT program return zero.  The JIT epilogue
202          * stores away the original %o7 into r_saved_O7.  The
203          * normal leaf function return is to use "retl" which
204          * would evalute to "jmpl %o7 + 8, %g0" but we want to
205          * use the saved value thus the sequence you see here.
206          */
207         jmpl    r_saved_O7 + 8, %g0
208          clr    %o0