Updating trunk VERSION from 2139.0 to 2140.0
[chromium-blink-merge.git] / sandbox / linux / seccomp-bpf / verifier.cc
blob0863556e0808e7e28d4805b0245dda7fa390bdd3
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <string.h>
7 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
8 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
9 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
10 #include "sandbox/linux/seccomp-bpf/verifier.h"
13 namespace sandbox {
15 namespace {
17 struct State {
18 State(const std::vector<struct sock_filter>& p,
19 const struct arch_seccomp_data& d)
20 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
21 const std::vector<struct sock_filter>& program;
22 const struct arch_seccomp_data& data;
23 unsigned int ip;
24 uint32_t accumulator;
25 bool acc_is_valid;
27 private:
28 DISALLOW_IMPLICIT_CONSTRUCTORS(State);
31 uint32_t EvaluateErrorCode(SandboxBPF* sandbox,
32 const ErrorCode& code,
33 const struct arch_seccomp_data& data) {
34 if (code.error_type() == ErrorCode::ET_SIMPLE ||
35 code.error_type() == ErrorCode::ET_TRAP) {
36 return code.err();
37 } else if (code.error_type() == ErrorCode::ET_COND) {
38 if (code.width() == ErrorCode::TP_32BIT &&
39 (data.args[code.argno()] >> 32) &&
40 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
41 0xFFFFFFFF80000000ull) {
42 return sandbox->Unexpected64bitArgument().err();
44 switch (code.op()) {
45 case ErrorCode::OP_EQUAL:
46 return EvaluateErrorCode(sandbox,
47 (code.width() == ErrorCode::TP_32BIT
48 ? uint32_t(data.args[code.argno()])
49 : data.args[code.argno()]) == code.value()
50 ? *code.passed()
51 : *code.failed(),
52 data);
53 case ErrorCode::OP_HAS_ALL_BITS:
54 return EvaluateErrorCode(sandbox,
55 ((code.width() == ErrorCode::TP_32BIT
56 ? uint32_t(data.args[code.argno()])
57 : data.args[code.argno()]) &
58 code.value()) == code.value()
59 ? *code.passed()
60 : *code.failed(),
61 data);
62 case ErrorCode::OP_HAS_ANY_BITS:
63 return EvaluateErrorCode(sandbox,
64 (code.width() == ErrorCode::TP_32BIT
65 ? uint32_t(data.args[code.argno()])
66 : data.args[code.argno()]) &
67 code.value()
68 ? *code.passed()
69 : *code.failed(),
70 data);
71 default:
72 return SECCOMP_RET_INVALID;
74 } else {
75 return SECCOMP_RET_INVALID;
79 bool VerifyErrorCode(SandboxBPF* sandbox,
80 const std::vector<struct sock_filter>& program,
81 struct arch_seccomp_data* data,
82 const ErrorCode& root_code,
83 const ErrorCode& code,
84 const char** err) {
85 if (code.error_type() == ErrorCode::ET_SIMPLE ||
86 code.error_type() == ErrorCode::ET_TRAP) {
87 uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
88 if (*err) {
89 return false;
90 } else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) {
91 // For efficiency's sake, we'd much rather compare "computed_ret"
92 // against "code.err()". This works most of the time, but it doesn't
93 // always work for nested conditional expressions. The test values
94 // that we generate on the fly to probe expressions can trigger
95 // code flow decisions in multiple nodes of the decision tree, and the
96 // only way to compute the correct error code in that situation is by
97 // calling EvaluateErrorCode().
98 *err = "Exit code from BPF program doesn't match";
99 return false;
101 } else if (code.error_type() == ErrorCode::ET_COND) {
102 if (code.argno() < 0 || code.argno() >= 6) {
103 *err = "Invalid argument number in error code";
104 return false;
106 switch (code.op()) {
107 case ErrorCode::OP_EQUAL:
108 // Verify that we can check a 32bit value (or the LSB of a 64bit value)
109 // for equality.
110 data->args[code.argno()] = code.value();
111 if (!VerifyErrorCode(
112 sandbox, program, data, root_code, *code.passed(), err)) {
113 return false;
116 // Change the value to no longer match and verify that this is detected
117 // as an inequality.
118 data->args[code.argno()] = code.value() ^ 0x55AA55AA;
119 if (!VerifyErrorCode(
120 sandbox, program, data, root_code, *code.failed(), err)) {
121 return false;
124 // BPF programs can only ever operate on 32bit values. So, we have
125 // generated additional BPF instructions that inspect the MSB. Verify
126 // that they behave as intended.
127 if (code.width() == ErrorCode::TP_32BIT) {
128 if (code.value() >> 32) {
129 SANDBOX_DIE(
130 "Invalid comparison of a 32bit system call argument "
131 "against a 64bit constant; this test is always false.");
134 // If the system call argument was intended to be a 32bit parameter,
135 // verify that it is a fatal error if a 64bit value is ever passed
136 // here.
137 data->args[code.argno()] = 0x100000000ull;
138 if (!VerifyErrorCode(sandbox,
139 program,
140 data,
141 root_code,
142 sandbox->Unexpected64bitArgument(),
143 err)) {
144 return false;
146 } else {
147 // If the system call argument was intended to be a 64bit parameter,
148 // verify that we can handle (in-)equality for the MSB. This is
149 // essentially the same test that we did earlier for the LSB.
150 // We only need to verify the behavior of the inequality test. We
151 // know that the equality test already passed, as unlike the kernel
152 // the Verifier does operate on 64bit quantities.
153 data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull;
154 if (!VerifyErrorCode(
155 sandbox, program, data, root_code, *code.failed(), err)) {
156 return false;
159 break;
160 case ErrorCode::OP_HAS_ALL_BITS:
161 case ErrorCode::OP_HAS_ANY_BITS:
162 // A comprehensive test of bit values is difficult and potentially
163 // rather
164 // time-expensive. We avoid doing so at run-time and instead rely on the
165 // unittest for full testing. The test that we have here covers just the
166 // common cases. We test against the bitmask itself, all zeros and all
167 // ones.
169 // Testing "any" bits against a zero mask is always false. So, there
170 // are some cases, where we expect tests to take the "failed()" branch
171 // even though this is a test that normally should take "passed()".
172 const ErrorCode& passed =
173 (!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) ||
175 // On a 32bit system, it is impossible to pass a 64bit
176 // value as a
177 // system call argument. So, some additional tests always
178 // evaluate
179 // as false.
180 ((code.value() & ~uint64_t(uintptr_t(-1))) &&
181 code.op() == ErrorCode::OP_HAS_ALL_BITS) ||
182 (code.value() && !(code.value() & uintptr_t(-1)) &&
183 code.op() == ErrorCode::OP_HAS_ANY_BITS)
184 ? *code.failed()
185 : *code.passed();
187 // Similary, testing for "all" bits in a zero mask is always true. So,
188 // some cases pass despite them normally failing.
189 const ErrorCode& failed =
190 !code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS
191 ? *code.passed()
192 : *code.failed();
194 data->args[code.argno()] = code.value() & uintptr_t(-1);
195 if (!VerifyErrorCode(
196 sandbox, program, data, root_code, passed, err)) {
197 return false;
199 data->args[code.argno()] = uintptr_t(-1);
200 if (!VerifyErrorCode(
201 sandbox, program, data, root_code, passed, err)) {
202 return false;
204 data->args[code.argno()] = 0;
205 if (!VerifyErrorCode(
206 sandbox, program, data, root_code, failed, err)) {
207 return false;
210 break;
211 default: // TODO(markus): Need to add support for OP_GREATER
212 *err = "Unsupported operation in conditional error code";
213 return false;
215 } else {
216 *err = "Attempting to return invalid error code from BPF program";
217 return false;
219 return true;
222 void Ld(State* state, const struct sock_filter& insn, const char** err) {
223 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) {
224 *err = "Invalid BPF_LD instruction";
225 return;
227 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
228 // We only allow loading of properly aligned 32bit quantities.
229 memcpy(&state->accumulator,
230 reinterpret_cast<const char*>(&state->data) + insn.k,
232 } else {
233 *err = "Invalid operand in BPF_LD instruction";
234 return;
236 state->acc_is_valid = true;
237 return;
240 void Jmp(State* state, const struct sock_filter& insn, const char** err) {
241 if (BPF_OP(insn.code) == BPF_JA) {
242 if (state->ip + insn.k + 1 >= state->program.size() ||
243 state->ip + insn.k + 1 <= state->ip) {
244 compilation_failure:
245 *err = "Invalid BPF_JMP instruction";
246 return;
248 state->ip += insn.k;
249 } else {
250 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
251 state->ip + insn.jt + 1 >= state->program.size() ||
252 state->ip + insn.jf + 1 >= state->program.size()) {
253 goto compilation_failure;
255 switch (BPF_OP(insn.code)) {
256 case BPF_JEQ:
257 if (state->accumulator == insn.k) {
258 state->ip += insn.jt;
259 } else {
260 state->ip += insn.jf;
262 break;
263 case BPF_JGT:
264 if (state->accumulator > insn.k) {
265 state->ip += insn.jt;
266 } else {
267 state->ip += insn.jf;
269 break;
270 case BPF_JGE:
271 if (state->accumulator >= insn.k) {
272 state->ip += insn.jt;
273 } else {
274 state->ip += insn.jf;
276 break;
277 case BPF_JSET:
278 if (state->accumulator & insn.k) {
279 state->ip += insn.jt;
280 } else {
281 state->ip += insn.jf;
283 break;
284 default:
285 goto compilation_failure;
290 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
291 if (BPF_SRC(insn.code) != BPF_K) {
292 *err = "Invalid BPF_RET instruction";
293 return 0;
295 return insn.k;
298 void Alu(State* state, const struct sock_filter& insn, const char** err) {
299 if (BPF_OP(insn.code) == BPF_NEG) {
300 state->accumulator = -state->accumulator;
301 return;
302 } else {
303 if (BPF_SRC(insn.code) != BPF_K) {
304 *err = "Unexpected source operand in arithmetic operation";
305 return;
307 switch (BPF_OP(insn.code)) {
308 case BPF_ADD:
309 state->accumulator += insn.k;
310 break;
311 case BPF_SUB:
312 state->accumulator -= insn.k;
313 break;
314 case BPF_MUL:
315 state->accumulator *= insn.k;
316 break;
317 case BPF_DIV:
318 if (!insn.k) {
319 *err = "Illegal division by zero";
320 break;
322 state->accumulator /= insn.k;
323 break;
324 case BPF_MOD:
325 if (!insn.k) {
326 *err = "Illegal division by zero";
327 break;
329 state->accumulator %= insn.k;
330 break;
331 case BPF_OR:
332 state->accumulator |= insn.k;
333 break;
334 case BPF_XOR:
335 state->accumulator ^= insn.k;
336 break;
337 case BPF_AND:
338 state->accumulator &= insn.k;
339 break;
340 case BPF_LSH:
341 if (insn.k > 32) {
342 *err = "Illegal shift operation";
343 break;
345 state->accumulator <<= insn.k;
346 break;
347 case BPF_RSH:
348 if (insn.k > 32) {
349 *err = "Illegal shift operation";
350 break;
352 state->accumulator >>= insn.k;
353 break;
354 default:
355 *err = "Invalid operator in arithmetic operation";
356 break;
361 } // namespace
363 bool Verifier::VerifyBPF(SandboxBPF* sandbox,
364 const std::vector<struct sock_filter>& program,
365 const SandboxBPFPolicy& policy,
366 const char** err) {
367 *err = NULL;
368 for (SyscallIterator iter(false); !iter.Done();) {
369 uint32_t sysnum = iter.Next();
370 // We ideally want to iterate over the full system call range and values
371 // just above and just below this range. This gives us the full result set
372 // of the "evaluators".
373 // On Intel systems, this can fail in a surprising way, as a cleared bit 30
374 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
375 // unless we pay attention to setting this bit correctly, an early check in
376 // our BPF program will make us fail with a misleading error code.
377 struct arch_seccomp_data data = {static_cast<int>(sysnum),
378 static_cast<uint32_t>(SECCOMP_ARCH)};
379 #if defined(__i386__) || defined(__x86_64__)
380 #if defined(__x86_64__) && defined(__ILP32__)
381 if (!(sysnum & 0x40000000u)) {
382 continue;
384 #else
385 if (sysnum & 0x40000000u) {
386 continue;
388 #endif
389 #endif
390 ErrorCode code = iter.IsValid(sysnum)
391 ? policy.EvaluateSyscall(sandbox, sysnum)
392 : policy.InvalidSyscall(sandbox);
393 if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) {
394 return false;
397 return true;
400 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
401 const struct arch_seccomp_data& data,
402 const char** err) {
403 *err = NULL;
404 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
405 *err = "Invalid program length";
406 return 0;
408 for (State state(program, data); !*err; ++state.ip) {
409 if (state.ip >= program.size()) {
410 *err = "Invalid instruction pointer in BPF program";
411 break;
413 const struct sock_filter& insn = program[state.ip];
414 switch (BPF_CLASS(insn.code)) {
415 case BPF_LD:
416 Ld(&state, insn, err);
417 break;
418 case BPF_JMP:
419 Jmp(&state, insn, err);
420 break;
421 case BPF_RET: {
422 uint32_t r = Ret(&state, insn, err);
423 switch (r & SECCOMP_RET_ACTION) {
424 case SECCOMP_RET_TRAP:
425 case SECCOMP_RET_ERRNO:
426 case SECCOMP_RET_TRACE:
427 case SECCOMP_RET_ALLOW:
428 break;
429 case SECCOMP_RET_KILL: // We don't ever generate this
430 case SECCOMP_RET_INVALID: // Should never show up in BPF program
431 default:
432 *err = "Unexpected return code found in BPF program";
433 return 0;
435 return r;
437 case BPF_ALU:
438 Alu(&state, insn, err);
439 break;
440 default:
441 *err = "Unexpected instruction in BPF program";
442 break;
445 return 0;
448 } // namespace sandbox