1 /* bpf_jit.S: Packet/header access helper functions
2 * for PPC64 BPF compiler.
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
12 #include <asm/ppc_asm.h>
16 * All of these routines are called directly from generated code,
17 * whose register usage is:
21 * r6 *** address parameter to helper ***
29 * To consider: These helpers are so small it could be better to just
30 * generate them inline. Inline code can do the simple headlen check
31 * then branch directly to slow_path_XXX if required. (In fact, could
32 * load a spare GPR with the address of slow_path_generic and pass size
33 * as an argument, making the call site a mtlr, li and bllr.)
38 blt bpf_slow_path_word_neg
39 .globl sk_load_word_positive_offset
40 sk_load_word_positive_offset:
41 /* Are we accessing past headlen? */
42 subi r_scratch1, r_HL, 4
43 cmpd r_scratch1, r_addr
44 blt bpf_slow_path_word
45 /* Nope, just hitting the header. cr0 here is eq or gt! */
47 /* When big endian we don't need to byteswap. */
48 blr /* Return success, cr0 != LT */
53 blt bpf_slow_path_half_neg
54 .globl sk_load_half_positive_offset
55 sk_load_half_positive_offset:
56 subi r_scratch1, r_HL, 2
57 cmpd r_scratch1, r_addr
58 blt bpf_slow_path_half
65 blt bpf_slow_path_byte_neg
66 .globl sk_load_byte_positive_offset
67 sk_load_byte_positive_offset:
69 ble bpf_slow_path_byte
74 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
75 * r_addr is the offset value
77 .globl sk_load_byte_msh
80 blt bpf_slow_path_byte_msh_neg
81 .globl sk_load_byte_msh_positive_offset
82 sk_load_byte_msh_positive_offset:
84 ble bpf_slow_path_byte_msh
86 rlwinm r_X, r_X, 2, 32-4-2, 31-2
89 /* Call out to skb_copy_bits:
90 * We'll need to back up our volatile regs first; we have
91 * local variable space at r1+(BPF_PPC_STACK_BASIC).
92 * Allocate a new stack frame here to remain ABI-compliant in
95 #define bpf_slow_path_common(SIZE) \
98 /* R3 goes in parameter space of caller's frame */ \
99 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
100 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
101 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
102 addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
103 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
104 /* R3 = r_skb, as passed */ \
109 /* R3 = 0 on success */ \
110 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
112 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
113 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
116 blt bpf_error; /* cr0 = LT */ \
117 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
121 bpf_slow_path_common(4)
122 /* Data value is on stack, and cr0 != LT */
123 lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
127 bpf_slow_path_common(2)
128 lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
132 bpf_slow_path_common(1)
133 lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
136 bpf_slow_path_byte_msh:
137 bpf_slow_path_common(1)
138 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
139 rlwinm r_X, r_X, 2, 32-4-2, 31-2
142 /* Call out to bpf_internal_load_pointer_neg_helper:
143 * We'll need to back up our volatile regs first; we have
144 * local variable space at r1+(BPF_PPC_STACK_BASIC).
145 * Allocate a new stack frame here to remain ABI-compliant in
148 #define sk_negative_common(SIZE) \
151 /* R3 goes in parameter space of caller's frame */ \
152 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
153 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
154 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
155 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
156 /* R3 = r_skb, as passed */ \
159 bl bpf_internal_load_pointer_neg_helper; \
161 /* R3 != 0 on success */ \
162 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
164 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
165 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
168 beq bpf_error_slow; /* cr0 = EQ */ \
170 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
173 bpf_slow_path_word_neg:
174 lis r_scratch1,-32 /* SKF_LL_OFF */
175 cmpd r_addr, r_scratch1 /* addr < SKF_* */
176 blt bpf_error /* cr0 = LT */
177 .globl sk_load_word_negative_offset
178 sk_load_word_negative_offset:
179 sk_negative_common(4)
183 bpf_slow_path_half_neg:
184 lis r_scratch1,-32 /* SKF_LL_OFF */
185 cmpd r_addr, r_scratch1 /* addr < SKF_* */
186 blt bpf_error /* cr0 = LT */
187 .globl sk_load_half_negative_offset
188 sk_load_half_negative_offset:
189 sk_negative_common(2)
193 bpf_slow_path_byte_neg:
194 lis r_scratch1,-32 /* SKF_LL_OFF */
195 cmpd r_addr, r_scratch1 /* addr < SKF_* */
196 blt bpf_error /* cr0 = LT */
197 .globl sk_load_byte_negative_offset
198 sk_load_byte_negative_offset:
199 sk_negative_common(1)
203 bpf_slow_path_byte_msh_neg:
204 lis r_scratch1,-32 /* SKF_LL_OFF */
205 cmpd r_addr, r_scratch1 /* addr < SKF_* */
206 blt bpf_error /* cr0 = LT */
207 .globl sk_load_byte_msh_negative_offset
208 sk_load_byte_msh_negative_offset:
209 sk_negative_common(1)
211 rlwinm r_X, r_X, 2, 32-4-2, 31-2
215 /* fabricate a cr0 = lt */
219 /* Entered with cr0 = lt */
221 /* Generated code will 'blt epilogue', returning 0. */