Use multiline attribute to check for IA2_STATE_MULTILINE.
[chromium-blink-merge.git] / sandbox / linux / bpf_dsl / verifier.cc
blobadbc960faf606ec7545337f5b3d0e50f4da25649
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/bpf_dsl/verifier.h"
7 #include <string.h>
9 #include <limits>
11 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
12 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
13 #include "sandbox/linux/bpf_dsl/policy.h"
14 #include "sandbox/linux/bpf_dsl/policy_compiler.h"
15 #include "sandbox/linux/bpf_dsl/seccomp_macros.h"
16 #include "sandbox/linux/bpf_dsl/syscall_set.h"
17 #include "sandbox/linux/seccomp-bpf/errorcode.h"
18 #include "sandbox/linux/system_headers/linux_seccomp.h"
20 namespace sandbox {
21 namespace bpf_dsl {
23 namespace {
25 const uint64_t kLower32Bits = std::numeric_limits<uint32_t>::max();
26 const uint64_t kUpper32Bits = static_cast<uint64_t>(kLower32Bits) << 32;
28 struct State {
29 State(const std::vector<struct sock_filter>& p,
30 const struct arch_seccomp_data& d)
31 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
32 const std::vector<struct sock_filter>& program;
33 const struct arch_seccomp_data& data;
34 unsigned int ip;
35 uint32_t accumulator;
36 bool acc_is_valid;
38 private:
39 DISALLOW_IMPLICIT_CONSTRUCTORS(State);
42 uint32_t EvaluateErrorCode(bpf_dsl::PolicyCompiler* compiler,
43 const ErrorCode& code,
44 const struct arch_seccomp_data& data) {
45 if (code.error_type() == ErrorCode::ET_SIMPLE ||
46 code.error_type() == ErrorCode::ET_TRAP) {
47 return code.err();
48 } else if (code.error_type() == ErrorCode::ET_COND) {
49 if (code.width() == ErrorCode::TP_32BIT &&
50 (data.args[code.argno()] >> 32) &&
51 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
52 0xFFFFFFFF80000000ull) {
53 return compiler->Unexpected64bitArgument().err();
55 bool equal = (data.args[code.argno()] & code.mask()) == code.value();
56 return EvaluateErrorCode(compiler, equal ? *code.passed() : *code.failed(),
57 data);
58 } else {
59 return SECCOMP_RET_INVALID;
63 bool VerifyErrorCode(bpf_dsl::PolicyCompiler* compiler,
64 const std::vector<struct sock_filter>& program,
65 struct arch_seccomp_data* data,
66 const ErrorCode& root_code,
67 const ErrorCode& code,
68 const char** err) {
69 if (code.error_type() == ErrorCode::ET_SIMPLE ||
70 code.error_type() == ErrorCode::ET_TRAP) {
71 const uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
72 if (*err) {
73 return false;
75 const uint32_t policy_ret = EvaluateErrorCode(compiler, root_code, *data);
76 if (computed_ret != policy_ret) {
77 // For efficiency's sake, we'd much rather compare "computed_ret"
78 // against "code.err()". This works most of the time, but it doesn't
79 // always work for nested conditional expressions. The test values
80 // that we generate on the fly to probe expressions can trigger
81 // code flow decisions in multiple nodes of the decision tree, and the
82 // only way to compute the correct error code in that situation is by
83 // calling EvaluateErrorCode().
84 *err = "Exit code from BPF program doesn't match";
85 return false;
87 } else if (code.error_type() == ErrorCode::ET_COND) {
88 if (code.argno() < 0 || code.argno() >= 6) {
89 *err = "Invalid argument number in error code";
90 return false;
93 // TODO(mdempsky): The test values generated here try to provide good
94 // coverage for generated BPF instructions while avoiding combinatorial
95 // explosion on large policies. Ideally we would instead take a fuzzing-like
96 // approach and generate a bounded number of test cases regardless of policy
97 // size.
99 // Verify that we can check a value for simple equality.
100 data->args[code.argno()] = code.value();
101 if (!VerifyErrorCode(compiler, program, data, root_code, *code.passed(),
102 err)) {
103 return false;
106 // If mask ignores any bits, verify that setting those bits is still
107 // detected as equality.
108 uint64_t ignored_bits = ~code.mask();
109 if (code.width() == ErrorCode::TP_32BIT) {
110 ignored_bits = static_cast<uint32_t>(ignored_bits);
112 if ((ignored_bits & kLower32Bits) != 0) {
113 data->args[code.argno()] = code.value() | (ignored_bits & kLower32Bits);
114 if (!VerifyErrorCode(compiler, program, data, root_code, *code.passed(),
115 err)) {
116 return false;
119 if ((ignored_bits & kUpper32Bits) != 0) {
120 data->args[code.argno()] = code.value() | (ignored_bits & kUpper32Bits);
121 if (!VerifyErrorCode(compiler, program, data, root_code, *code.passed(),
122 err)) {
123 return false;
127 // Verify that changing bits included in the mask is detected as inequality.
128 if ((code.mask() & kLower32Bits) != 0) {
129 data->args[code.argno()] = code.value() ^ (code.mask() & kLower32Bits);
130 if (!VerifyErrorCode(compiler, program, data, root_code, *code.failed(),
131 err)) {
132 return false;
135 if ((code.mask() & kUpper32Bits) != 0) {
136 data->args[code.argno()] = code.value() ^ (code.mask() & kUpper32Bits);
137 if (!VerifyErrorCode(compiler, program, data, root_code, *code.failed(),
138 err)) {
139 return false;
143 if (code.width() == ErrorCode::TP_32BIT) {
144 // For 32-bit system call arguments, we emit additional instructions to
145 // validate the upper 32-bits. Here we test that validation.
147 // Arbitrary 64-bit values should be rejected.
148 data->args[code.argno()] = 1ULL << 32;
149 if (!VerifyErrorCode(compiler, program, data, root_code,
150 compiler->Unexpected64bitArgument(), err)) {
151 return false;
154 // Upper 32-bits set without the MSB of the lower 32-bits set should be
155 // rejected too.
156 data->args[code.argno()] = kUpper32Bits;
157 if (!VerifyErrorCode(compiler, program, data, root_code,
158 compiler->Unexpected64bitArgument(), err)) {
159 return false;
162 } else {
163 *err = "Attempting to return invalid error code from BPF program";
164 return false;
166 return true;
169 void Ld(State* state, const struct sock_filter& insn, const char** err) {
170 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS ||
171 insn.jt != 0 || insn.jf != 0) {
172 *err = "Invalid BPF_LD instruction";
173 return;
175 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
176 // We only allow loading of properly aligned 32bit quantities.
177 memcpy(&state->accumulator,
178 reinterpret_cast<const char*>(&state->data) + insn.k, 4);
179 } else {
180 *err = "Invalid operand in BPF_LD instruction";
181 return;
183 state->acc_is_valid = true;
184 return;
187 void Jmp(State* state, const struct sock_filter& insn, const char** err) {
188 if (BPF_OP(insn.code) == BPF_JA) {
189 if (state->ip + insn.k + 1 >= state->program.size() ||
190 state->ip + insn.k + 1 <= state->ip) {
191 compilation_failure:
192 *err = "Invalid BPF_JMP instruction";
193 return;
195 state->ip += insn.k;
196 } else {
197 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
198 state->ip + insn.jt + 1 >= state->program.size() ||
199 state->ip + insn.jf + 1 >= state->program.size()) {
200 goto compilation_failure;
202 switch (BPF_OP(insn.code)) {
203 case BPF_JEQ:
204 if (state->accumulator == insn.k) {
205 state->ip += insn.jt;
206 } else {
207 state->ip += insn.jf;
209 break;
210 case BPF_JGT:
211 if (state->accumulator > insn.k) {
212 state->ip += insn.jt;
213 } else {
214 state->ip += insn.jf;
216 break;
217 case BPF_JGE:
218 if (state->accumulator >= insn.k) {
219 state->ip += insn.jt;
220 } else {
221 state->ip += insn.jf;
223 break;
224 case BPF_JSET:
225 if (state->accumulator & insn.k) {
226 state->ip += insn.jt;
227 } else {
228 state->ip += insn.jf;
230 break;
231 default:
232 goto compilation_failure;
237 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
238 if (BPF_SRC(insn.code) != BPF_K) {
239 *err = "Invalid BPF_RET instruction";
240 return 0;
242 return insn.k;
245 void Alu(State* state, const struct sock_filter& insn, const char** err) {
246 if (BPF_OP(insn.code) == BPF_NEG) {
247 state->accumulator = -state->accumulator;
248 return;
249 } else {
250 if (BPF_SRC(insn.code) != BPF_K) {
251 *err = "Unexpected source operand in arithmetic operation";
252 return;
254 switch (BPF_OP(insn.code)) {
255 case BPF_ADD:
256 state->accumulator += insn.k;
257 break;
258 case BPF_SUB:
259 state->accumulator -= insn.k;
260 break;
261 case BPF_MUL:
262 state->accumulator *= insn.k;
263 break;
264 case BPF_DIV:
265 if (!insn.k) {
266 *err = "Illegal division by zero";
267 break;
269 state->accumulator /= insn.k;
270 break;
271 case BPF_MOD:
272 if (!insn.k) {
273 *err = "Illegal division by zero";
274 break;
276 state->accumulator %= insn.k;
277 break;
278 case BPF_OR:
279 state->accumulator |= insn.k;
280 break;
281 case BPF_XOR:
282 state->accumulator ^= insn.k;
283 break;
284 case BPF_AND:
285 state->accumulator &= insn.k;
286 break;
287 case BPF_LSH:
288 if (insn.k > 32) {
289 *err = "Illegal shift operation";
290 break;
292 state->accumulator <<= insn.k;
293 break;
294 case BPF_RSH:
295 if (insn.k > 32) {
296 *err = "Illegal shift operation";
297 break;
299 state->accumulator >>= insn.k;
300 break;
301 default:
302 *err = "Invalid operator in arithmetic operation";
303 break;
308 } // namespace
310 bool Verifier::VerifyBPF(bpf_dsl::PolicyCompiler* compiler,
311 const std::vector<struct sock_filter>& program,
312 const bpf_dsl::Policy& policy,
313 const char** err) {
314 *err = NULL;
315 for (uint32_t sysnum : SyscallSet::All()) {
316 // We ideally want to iterate over the full system call range and values
317 // just above and just below this range. This gives us the full result set
318 // of the "evaluators".
319 // On Intel systems, this can fail in a surprising way, as a cleared bit 30
320 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
321 // unless we pay attention to setting this bit correctly, an early check in
322 // our BPF program will make us fail with a misleading error code.
323 struct arch_seccomp_data data = {static_cast<int>(sysnum),
324 static_cast<uint32_t>(SECCOMP_ARCH)};
325 #if defined(__i386__) || defined(__x86_64__)
326 #if defined(__x86_64__) && defined(__ILP32__)
327 if (!(sysnum & 0x40000000u)) {
328 continue;
330 #else
331 if (sysnum & 0x40000000u) {
332 continue;
334 #endif
335 #endif
336 ErrorCode code = SyscallSet::IsValid(sysnum)
337 ? policy.EvaluateSyscall(sysnum)->Compile(compiler)
338 : policy.InvalidSyscall()->Compile(compiler);
339 if (!VerifyErrorCode(compiler, program, &data, code, code, err)) {
340 return false;
343 return true;
346 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
347 const struct arch_seccomp_data& data,
348 const char** err) {
349 *err = NULL;
350 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
351 *err = "Invalid program length";
352 return 0;
354 for (State state(program, data); !*err; ++state.ip) {
355 if (state.ip >= program.size()) {
356 *err = "Invalid instruction pointer in BPF program";
357 break;
359 const struct sock_filter& insn = program[state.ip];
360 switch (BPF_CLASS(insn.code)) {
361 case BPF_LD:
362 Ld(&state, insn, err);
363 break;
364 case BPF_JMP:
365 Jmp(&state, insn, err);
366 break;
367 case BPF_RET: {
368 uint32_t r = Ret(&state, insn, err);
369 switch (r & SECCOMP_RET_ACTION) {
370 case SECCOMP_RET_TRAP:
371 case SECCOMP_RET_ERRNO:
372 case SECCOMP_RET_TRACE:
373 case SECCOMP_RET_ALLOW:
374 break;
375 case SECCOMP_RET_KILL: // We don't ever generate this
376 case SECCOMP_RET_INVALID: // Should never show up in BPF program
377 default:
378 *err = "Unexpected return code found in BPF program";
379 return 0;
381 return r;
383 case BPF_ALU:
384 Alu(&state, insn, err);
385 break;
386 default:
387 *err = "Unexpected instruction in BPF program";
388 break;
391 return 0;
394 } // namespace bpf_dsl
395 } // namespace sandbox