Merge tag 'acpi-part2-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / samples / seccomp / bpf-helper.h
blob0cc9816fe8e8286a92cab65bc9541e0492da4798
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Example wrapper around BPF macros.
5 * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
6 * Author: Will Drewry <wad@chromium.org>
8 * The code may be used by anyone for any purpose,
9 * and can serve as a starting point for developing
10 * applications using prctl(PR_SET_SECCOMP, 2, ...).
12 * No guarantees are provided with respect to the correctness
13 * or functionality of this code.
15 #ifndef __BPF_HELPER_H__
16 #define __BPF_HELPER_H__
18 #include <asm/bitsperlong.h> /* for __BITS_PER_LONG */
19 #include <endian.h>
20 #include <linux/filter.h>
21 #include <linux/seccomp.h> /* for seccomp_data */
22 #include <linux/types.h>
23 #include <linux/unistd.h>
24 #include <stddef.h>
26 #define BPF_LABELS_MAX 256
27 struct bpf_labels {
28 int count;
29 struct __bpf_label {
30 const char *label;
31 __u32 location;
32 } labels[BPF_LABELS_MAX];
35 int bpf_resolve_jumps(struct bpf_labels *labels,
36 struct sock_filter *filter, size_t count);
37 __u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
38 void seccomp_bpf_print(struct sock_filter *filter, size_t count);
40 #define JUMP_JT 0xff
41 #define JUMP_JF 0xff
42 #define LABEL_JT 0xfe
43 #define LABEL_JF 0xfe
45 #define ALLOW \
46 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
47 #define DENY \
48 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
49 #define JUMP(labels, label) \
50 BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
51 JUMP_JT, JUMP_JF)
52 #define LABEL(labels, label) \
53 BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
54 LABEL_JT, LABEL_JF)
55 #define SYSCALL(nr, jt) \
56 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
59 /* Lame, but just an example */
60 #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
62 #define EXPAND(...) __VA_ARGS__
64 /* Ensure that we load the logically correct offset. */
65 #if __BYTE_ORDER == __LITTLE_ENDIAN
66 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
67 #elif __BYTE_ORDER == __BIG_ENDIAN
68 #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
69 #else
70 #error "Unknown endianness"
71 #endif
73 /* Map all width-sensitive operations */
74 #if __BITS_PER_LONG == 32
76 #define JEQ(x, jt) JEQ32(x, EXPAND(jt))
77 #define JNE(x, jt) JNE32(x, EXPAND(jt))
78 #define JGT(x, jt) JGT32(x, EXPAND(jt))
79 #define JLT(x, jt) JLT32(x, EXPAND(jt))
80 #define JGE(x, jt) JGE32(x, EXPAND(jt))
81 #define JLE(x, jt) JLE32(x, EXPAND(jt))
82 #define JA(x, jt) JA32(x, EXPAND(jt))
83 #define ARG(i) ARG_32(i)
85 #elif __BITS_PER_LONG == 64
87 /* Ensure that we load the logically correct offset. */
88 #if __BYTE_ORDER == __LITTLE_ENDIAN
89 #define ENDIAN(_lo, _hi) _lo, _hi
90 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
91 #elif __BYTE_ORDER == __BIG_ENDIAN
92 #define ENDIAN(_lo, _hi) _hi, _lo
93 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
94 #endif
96 union arg64 {
97 struct {
98 __u32 ENDIAN(lo32, hi32);
100 __u64 u64;
103 #define JEQ(x, jt) \
104 JEQ64(((union arg64){.u64 = (x)}).lo32, \
105 ((union arg64){.u64 = (x)}).hi32, \
106 EXPAND(jt))
107 #define JGT(x, jt) \
108 JGT64(((union arg64){.u64 = (x)}).lo32, \
109 ((union arg64){.u64 = (x)}).hi32, \
110 EXPAND(jt))
111 #define JGE(x, jt) \
112 JGE64(((union arg64){.u64 = (x)}).lo32, \
113 ((union arg64){.u64 = (x)}).hi32, \
114 EXPAND(jt))
115 #define JNE(x, jt) \
116 JNE64(((union arg64){.u64 = (x)}).lo32, \
117 ((union arg64){.u64 = (x)}).hi32, \
118 EXPAND(jt))
119 #define JLT(x, jt) \
120 JLT64(((union arg64){.u64 = (x)}).lo32, \
121 ((union arg64){.u64 = (x)}).hi32, \
122 EXPAND(jt))
123 #define JLE(x, jt) \
124 JLE64(((union arg64){.u64 = (x)}).lo32, \
125 ((union arg64){.u64 = (x)}).hi32, \
126 EXPAND(jt))
128 #define JA(x, jt) \
129 JA64(((union arg64){.u64 = (x)}).lo32, \
130 ((union arg64){.u64 = (x)}).hi32, \
131 EXPAND(jt))
132 #define ARG(i) ARG_64(i)
134 #else
135 #error __BITS_PER_LONG value unusable.
136 #endif
138 /* Loads the arg into A */
139 #define ARG_32(idx) \
140 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
142 /* Loads lo into M[0] and hi into M[1] and A */
143 #define ARG_64(idx) \
144 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
145 BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
146 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
147 BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
149 #define JEQ32(value, jt) \
150 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
153 #define JNE32(value, jt) \
154 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
157 #define JA32(value, jt) \
158 BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
161 #define JGE32(value, jt) \
162 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
165 #define JGT32(value, jt) \
166 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
169 #define JLE32(value, jt) \
170 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
173 #define JLT32(value, jt) \
174 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
178 * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
179 * A and M[1]. This invariant is kept by restoring A if necessary.
181 #define JEQ64(lo, hi, jt) \
182 /* if (hi != arg.hi) goto NOMATCH; */ \
183 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
184 BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
185 /* if (lo != arg.lo) goto NOMATCH; */ \
186 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
187 BPF_STMT(BPF_LD+BPF_MEM, 1), \
188 jt, \
189 BPF_STMT(BPF_LD+BPF_MEM, 1)
191 #define JNE64(lo, hi, jt) \
192 /* if (hi != arg.hi) goto MATCH; */ \
193 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
194 BPF_STMT(BPF_LD+BPF_MEM, 0), \
195 /* if (lo != arg.lo) goto MATCH; */ \
196 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
197 BPF_STMT(BPF_LD+BPF_MEM, 1), \
198 jt, \
199 BPF_STMT(BPF_LD+BPF_MEM, 1)
201 #define JA64(lo, hi, jt) \
202 /* if (hi & arg.hi) goto MATCH; */ \
203 BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
204 BPF_STMT(BPF_LD+BPF_MEM, 0), \
205 /* if (lo & arg.lo) goto MATCH; */ \
206 BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
207 BPF_STMT(BPF_LD+BPF_MEM, 1), \
208 jt, \
209 BPF_STMT(BPF_LD+BPF_MEM, 1)
211 #define JGE64(lo, hi, jt) \
212 /* if (hi > arg.hi) goto MATCH; */ \
213 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
214 /* if (hi != arg.hi) goto NOMATCH; */ \
215 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
216 BPF_STMT(BPF_LD+BPF_MEM, 0), \
217 /* if (lo >= arg.lo) goto MATCH; */ \
218 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
219 BPF_STMT(BPF_LD+BPF_MEM, 1), \
220 jt, \
221 BPF_STMT(BPF_LD+BPF_MEM, 1)
223 #define JGT64(lo, hi, jt) \
224 /* if (hi > arg.hi) goto MATCH; */ \
225 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
226 /* if (hi != arg.hi) goto NOMATCH; */ \
227 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
228 BPF_STMT(BPF_LD+BPF_MEM, 0), \
229 /* if (lo > arg.lo) goto MATCH; */ \
230 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
231 BPF_STMT(BPF_LD+BPF_MEM, 1), \
232 jt, \
233 BPF_STMT(BPF_LD+BPF_MEM, 1)
235 #define JLE64(lo, hi, jt) \
236 /* if (hi < arg.hi) goto MATCH; */ \
237 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
238 /* if (hi != arg.hi) goto NOMATCH; */ \
239 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
240 BPF_STMT(BPF_LD+BPF_MEM, 0), \
241 /* if (lo <= arg.lo) goto MATCH; */ \
242 BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
243 BPF_STMT(BPF_LD+BPF_MEM, 1), \
244 jt, \
245 BPF_STMT(BPF_LD+BPF_MEM, 1)
247 #define JLT64(lo, hi, jt) \
248 /* if (hi < arg.hi) goto MATCH; */ \
249 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
250 /* if (hi != arg.hi) goto NOMATCH; */ \
251 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
252 BPF_STMT(BPF_LD+BPF_MEM, 0), \
253 /* if (lo < arg.lo) goto MATCH; */ \
254 BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
255 BPF_STMT(BPF_LD+BPF_MEM, 1), \
256 jt, \
257 BPF_STMT(BPF_LD+BPF_MEM, 1)
259 #define LOAD_SYSCALL_NR \
260 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
261 offsetof(struct seccomp_data, nr))
263 #endif /* __BPF_HELPER_H__ */