x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / powerpc / net / bpf_jit64.h
blobbb944b6018d7427945dac99f50e58b1878b1615c
1 /*
2 * bpf_jit64.h: BPF JIT compiler for PPC64
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5 * IBM Corporation
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
12 #ifndef _BPF_JIT64_H
13 #define _BPF_JIT64_H
15 #include "bpf_jit.h"
18 * Stack layout:
19 * Ensure the top half (upto local_tmp_var) stays consistent
20 * with our redzone usage.
22 * [ prev sp ] <-------------
23 * [ nv gpr save area ] 8*8 |
24 * [ tail_call_cnt ] 8 |
25 * [ local_tmp_var ] 8 |
26 * fp (r31) --> [ ebpf stack space ] 512 |
27 * [ frame header ] 32/112 |
28 * sp (r1) ---> [ stack pointer ] --------------
31 /* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
32 #define BPF_PPC_STACK_SAVE (8*8)
33 /* for bpf JIT code internal usage */
34 #define BPF_PPC_STACK_LOCALS 16
35 /* Ensure this is quadword aligned */
36 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
37 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
39 #ifndef __ASSEMBLY__
41 /* BPF register usage */
42 #define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0)
43 #define SKB_DATA_REG (MAX_BPF_JIT_REG + 1)
44 #define TMP_REG_1 (MAX_BPF_JIT_REG + 2)
45 #define TMP_REG_2 (MAX_BPF_JIT_REG + 3)
47 /* BPF to ppc register mappings */
48 static const int b2p[] = {
49 /* function return value */
50 [BPF_REG_0] = 8,
51 /* function arguments */
52 [BPF_REG_1] = 3,
53 [BPF_REG_2] = 4,
54 [BPF_REG_3] = 5,
55 [BPF_REG_4] = 6,
56 [BPF_REG_5] = 7,
57 /* non volatile registers */
58 [BPF_REG_6] = 27,
59 [BPF_REG_7] = 28,
60 [BPF_REG_8] = 29,
61 [BPF_REG_9] = 30,
62 /* frame pointer aka BPF_REG_10 */
63 [BPF_REG_FP] = 31,
64 /* eBPF jit internal registers */
65 [BPF_REG_AX] = 2,
66 [SKB_HLEN_REG] = 25,
67 [SKB_DATA_REG] = 26,
68 [TMP_REG_1] = 9,
69 [TMP_REG_2] = 10
72 /* PPC NVR range -- update this if we ever use NVRs below r24 */
73 #define BPF_PPC_NVR_MIN 24
75 /* Assembly helpers */
76 #define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
77 u64 func##_negative_offset(u64 r3, u64 r4); \
78 u64 func##_positive_offset(u64 r3, u64 r4);
80 DECLARE_LOAD_FUNC(sk_load_word);
81 DECLARE_LOAD_FUNC(sk_load_half);
82 DECLARE_LOAD_FUNC(sk_load_byte);
84 #define CHOOSE_LOAD_FUNC(imm, func) \
85 (imm < 0 ? \
86 (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
87 func##_positive_offset)
90 * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
91 * so ensure that it isn't in use already.
93 #define PPC_BPF_LL(r, base, i) do { \
94 if ((i) % 4) { \
95 PPC_LI(b2p[TMP_REG_2], (i)); \
96 PPC_LDX(r, base, b2p[TMP_REG_2]); \
97 } else \
98 PPC_LD(r, base, i); \
99 } while(0)
100 #define PPC_BPF_STL(r, base, i) do { \
101 if ((i) % 4) { \
102 PPC_LI(b2p[TMP_REG_2], (i)); \
103 PPC_STDX(r, base, b2p[TMP_REG_2]); \
104 } else \
105 PPC_STD(r, base, i); \
106 } while(0)
107 #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
109 #define SEEN_FUNC 0x1000 /* might call external helpers */
110 #define SEEN_STACK 0x2000 /* uses BPF stack */
111 #define SEEN_SKB 0x4000 /* uses sk_buff */
112 #define SEEN_TAILCALL 0x8000 /* uses tail calls */
114 struct codegen_context {
116 * This is used to track register usage as well
117 * as calls to external helpers.
118 * - register usage is tracked with corresponding
119 * bits (r3-r10 and r25-r31)
120 * - rest of the bits can be used to track other
121 * things -- for now, we use bits 16 to 23
122 * encoded in SEEN_* macros above
124 unsigned int seen;
125 unsigned int idx;
128 #endif /* !__ASSEMBLY__ */
130 #endif