x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / powerpc / net / bpf_jit_64.S
blob7d3a3b5619a2b8f2528b61dc7077b4dd84c594d2
1 /* bpf_jit.S: Packet/header access helper functions
2  * for PPC64 BPF compiler.
3  *
4  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; version 2
9  * of the License.
10  */
12 #include <asm/ppc_asm.h>
13 #include "bpf_jit.h"
16  * All of these routines are called directly from generated code,
17  * whose register usage is:
18  *
19  * r3           skb
20  * r4,r5        A,X
21  * r6           *** address parameter to helper ***
22  * r7-r10       scratch
23  * r14          skb->data
24  * r15          skb headlen
25  * r16-31       M[]
26  */
29  * To consider: These helpers are so small it could be better to just
30  * generate them inline.  Inline code can do the simple headlen check
31  * then branch directly to slow_path_XXX if required.  (In fact, could
32  * load a spare GPR with the address of slow_path_generic and pass size
33  * as an argument, making the call site a mtlr, li and bllr.)
34  */
35         .globl  sk_load_word
36 sk_load_word:
37         cmpdi   r_addr, 0
38         blt     bpf_slow_path_word_neg
39         .globl  sk_load_word_positive_offset
40 sk_load_word_positive_offset:
41         /* Are we accessing past headlen? */
42         subi    r_scratch1, r_HL, 4
43         cmpd    r_scratch1, r_addr
44         blt     bpf_slow_path_word
45         /* Nope, just hitting the header.  cr0 here is eq or gt! */
46         lwzx    r_A, r_D, r_addr
47         /* When big endian we don't need to byteswap. */
48         blr     /* Return success, cr0 != LT */
50         .globl  sk_load_half
51 sk_load_half:
52         cmpdi   r_addr, 0
53         blt     bpf_slow_path_half_neg
54         .globl  sk_load_half_positive_offset
55 sk_load_half_positive_offset:
56         subi    r_scratch1, r_HL, 2
57         cmpd    r_scratch1, r_addr
58         blt     bpf_slow_path_half
59         lhzx    r_A, r_D, r_addr
60         blr
62         .globl  sk_load_byte
63 sk_load_byte:
64         cmpdi   r_addr, 0
65         blt     bpf_slow_path_byte_neg
66         .globl  sk_load_byte_positive_offset
67 sk_load_byte_positive_offset:
68         cmpd    r_HL, r_addr
69         ble     bpf_slow_path_byte
70         lbzx    r_A, r_D, r_addr
71         blr
74  * BPF_S_LDX_B_MSH: ldxb  4*([offset]&0xf)
75  * r_addr is the offset value
76  */
77         .globl sk_load_byte_msh
78 sk_load_byte_msh:
79         cmpdi   r_addr, 0
80         blt     bpf_slow_path_byte_msh_neg
81         .globl sk_load_byte_msh_positive_offset
82 sk_load_byte_msh_positive_offset:
83         cmpd    r_HL, r_addr
84         ble     bpf_slow_path_byte_msh
85         lbzx    r_X, r_D, r_addr
86         rlwinm  r_X, r_X, 2, 32-4-2, 31-2
87         blr
89 /* Call out to skb_copy_bits:
90  * We'll need to back up our volatile regs first; we have
91  * local variable space at r1+(BPF_PPC_STACK_BASIC).
92  * Allocate a new stack frame here to remain ABI-compliant in
93  * stashing LR.
94  */
95 #define bpf_slow_path_common(SIZE)                              \
96         mflr    r0;                                             \
97         std     r0, 16(r1);                                     \
98         /* R3 goes in parameter space of caller's frame */      \
99         std     r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
100         std     r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
101         std     r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
102         addi    r5, r1, BPF_PPC_STACK_BASIC+(2*8);              \
103         stdu    r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
104         /* R3 = r_skb, as passed */                             \
105         mr      r4, r_addr;                                     \
106         li      r6, SIZE;                                       \
107         bl      skb_copy_bits;                                  \
108         nop;                                                    \
109         /* R3 = 0 on success */                                 \
110         addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
111         ld      r0, 16(r1);                                     \
112         ld      r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
113         ld      r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
114         mtlr    r0;                                             \
115         cmpdi   r3, 0;                                          \
116         blt     bpf_error;      /* cr0 = LT */                  \
117         ld      r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
118         /* Great success! */
120 bpf_slow_path_word:
121         bpf_slow_path_common(4)
122         /* Data value is on stack, and cr0 != LT */
123         lwz     r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
124         blr
126 bpf_slow_path_half:
127         bpf_slow_path_common(2)
128         lhz     r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
129         blr
131 bpf_slow_path_byte:
132         bpf_slow_path_common(1)
133         lbz     r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
134         blr
136 bpf_slow_path_byte_msh:
137         bpf_slow_path_common(1)
138         lbz     r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
139         rlwinm  r_X, r_X, 2, 32-4-2, 31-2
140         blr
142 /* Call out to bpf_internal_load_pointer_neg_helper:
143  * We'll need to back up our volatile regs first; we have
144  * local variable space at r1+(BPF_PPC_STACK_BASIC).
145  * Allocate a new stack frame here to remain ABI-compliant in
146  * stashing LR.
147  */
148 #define sk_negative_common(SIZE)                                \
149         mflr    r0;                                             \
150         std     r0, 16(r1);                                     \
151         /* R3 goes in parameter space of caller's frame */      \
152         std     r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
153         std     r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
154         std     r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
155         stdu    r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
156         /* R3 = r_skb, as passed */                             \
157         mr      r4, r_addr;                                     \
158         li      r5, SIZE;                                       \
159         bl      bpf_internal_load_pointer_neg_helper;           \
160         nop;                                                    \
161         /* R3 != 0 on success */                                \
162         addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
163         ld      r0, 16(r1);                                     \
164         ld      r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
165         ld      r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
166         mtlr    r0;                                             \
167         cmpldi  r3, 0;                                          \
168         beq     bpf_error_slow; /* cr0 = EQ */                  \
169         mr      r_addr, r3;                                     \
170         ld      r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
171         /* Great success! */
173 bpf_slow_path_word_neg:
174         lis     r_scratch1,-32  /* SKF_LL_OFF */
175         cmpd    r_addr, r_scratch1      /* addr < SKF_* */
176         blt     bpf_error       /* cr0 = LT */
177         .globl  sk_load_word_negative_offset
178 sk_load_word_negative_offset:
179         sk_negative_common(4)
180         lwz     r_A, 0(r_addr)
181         blr
183 bpf_slow_path_half_neg:
184         lis     r_scratch1,-32  /* SKF_LL_OFF */
185         cmpd    r_addr, r_scratch1      /* addr < SKF_* */
186         blt     bpf_error       /* cr0 = LT */
187         .globl  sk_load_half_negative_offset
188 sk_load_half_negative_offset:
189         sk_negative_common(2)
190         lhz     r_A, 0(r_addr)
191         blr
193 bpf_slow_path_byte_neg:
194         lis     r_scratch1,-32  /* SKF_LL_OFF */
195         cmpd    r_addr, r_scratch1      /* addr < SKF_* */
196         blt     bpf_error       /* cr0 = LT */
197         .globl  sk_load_byte_negative_offset
198 sk_load_byte_negative_offset:
199         sk_negative_common(1)
200         lbz     r_A, 0(r_addr)
201         blr
203 bpf_slow_path_byte_msh_neg:
204         lis     r_scratch1,-32  /* SKF_LL_OFF */
205         cmpd    r_addr, r_scratch1      /* addr < SKF_* */
206         blt     bpf_error       /* cr0 = LT */
207         .globl  sk_load_byte_msh_negative_offset
208 sk_load_byte_msh_negative_offset:
209         sk_negative_common(1)
210         lbz     r_X, 0(r_addr)
211         rlwinm  r_X, r_X, 2, 32-4-2, 31-2
212         blr
214 bpf_error_slow:
215         /* fabricate a cr0 = lt */
216         li      r_scratch1, -1
217         cmpdi   r_scratch1, 0
218 bpf_error:
219         /* Entered with cr0 = lt */
220         li      r3, 0
221         /* Generated code will 'blt epilogue', returning 0. */
222         blr