ozone: evdev: Sync caps lock LED state to evdev
[chromium-blink-merge.git] / sandbox / linux / seccomp-bpf / verifier.cc
blobe533b2d91493bad8f2baffa568ea254df425d591
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/seccomp-bpf/verifier.h"
7 #include <string.h>
9 #include <limits>
11 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
12 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
13 #include "sandbox/linux/bpf_dsl/policy.h"
14 #include "sandbox/linux/bpf_dsl/policy_compiler.h"
15 #include "sandbox/linux/bpf_dsl/seccomp_macros.h"
16 #include "sandbox/linux/bpf_dsl/syscall_set.h"
17 #include "sandbox/linux/seccomp-bpf/errorcode.h"
18 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
19 #include "sandbox/linux/system_headers/linux_seccomp.h"
21 namespace sandbox {
23 namespace {
25 const uint64_t kLower32Bits = std::numeric_limits<uint32_t>::max();
26 const uint64_t kUpper32Bits = static_cast<uint64_t>(kLower32Bits) << 32;
27 const uint64_t kFull64Bits = std::numeric_limits<uint64_t>::max();
29 struct State {
30 State(const std::vector<struct sock_filter>& p,
31 const struct arch_seccomp_data& d)
32 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
33 const std::vector<struct sock_filter>& program;
34 const struct arch_seccomp_data& data;
35 unsigned int ip;
36 uint32_t accumulator;
37 bool acc_is_valid;
39 private:
40 DISALLOW_IMPLICIT_CONSTRUCTORS(State);
43 uint32_t EvaluateErrorCode(bpf_dsl::PolicyCompiler* compiler,
44 const ErrorCode& code,
45 const struct arch_seccomp_data& data) {
46 if (code.error_type() == ErrorCode::ET_SIMPLE ||
47 code.error_type() == ErrorCode::ET_TRAP) {
48 return code.err();
49 } else if (code.error_type() == ErrorCode::ET_COND) {
50 if (code.width() == ErrorCode::TP_32BIT &&
51 (data.args[code.argno()] >> 32) &&
52 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
53 0xFFFFFFFF80000000ull) {
54 return compiler->Unexpected64bitArgument().err();
56 bool equal = (data.args[code.argno()] & code.mask()) == code.value();
57 return EvaluateErrorCode(
58 compiler, equal ? *code.passed() : *code.failed(), data);
59 } else {
60 return SECCOMP_RET_INVALID;
64 bool VerifyErrorCode(bpf_dsl::PolicyCompiler* compiler,
65 const std::vector<struct sock_filter>& program,
66 struct arch_seccomp_data* data,
67 const ErrorCode& root_code,
68 const ErrorCode& code,
69 const char** err) {
70 if (code.error_type() == ErrorCode::ET_SIMPLE ||
71 code.error_type() == ErrorCode::ET_TRAP) {
72 uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
73 if (*err) {
74 return false;
75 } else if (computed_ret != EvaluateErrorCode(compiler, root_code, *data)) {
76 // For efficiency's sake, we'd much rather compare "computed_ret"
77 // against "code.err()". This works most of the time, but it doesn't
78 // always work for nested conditional expressions. The test values
79 // that we generate on the fly to probe expressions can trigger
80 // code flow decisions in multiple nodes of the decision tree, and the
81 // only way to compute the correct error code in that situation is by
82 // calling EvaluateErrorCode().
83 *err = "Exit code from BPF program doesn't match";
84 return false;
86 } else if (code.error_type() == ErrorCode::ET_COND) {
87 if (code.argno() < 0 || code.argno() >= 6) {
88 *err = "Invalid argument number in error code";
89 return false;
92 // TODO(mdempsky): The test values generated here try to provide good
93 // coverage for generated BPF instructions while avoiding combinatorial
94 // explosion on large policies. Ideally we would instead take a fuzzing-like
95 // approach and generate a bounded number of test cases regardless of policy
96 // size.
98 // Verify that we can check a value for simple equality.
99 data->args[code.argno()] = code.value();
100 if (!VerifyErrorCode(
101 compiler, program, data, root_code, *code.passed(), err)) {
102 return false;
105 // If mask ignores any bits, verify that setting those bits is still
106 // detected as equality.
107 uint64_t ignored_bits = ~code.mask();
108 if (code.width() == ErrorCode::TP_32BIT) {
109 ignored_bits = static_cast<uint32_t>(ignored_bits);
111 if ((ignored_bits & kLower32Bits) != 0) {
112 data->args[code.argno()] = code.value() | (ignored_bits & kLower32Bits);
113 if (!VerifyErrorCode(
114 compiler, program, data, root_code, *code.passed(), err)) {
115 return false;
118 if ((ignored_bits & kUpper32Bits) != 0) {
119 data->args[code.argno()] = code.value() | (ignored_bits & kUpper32Bits);
120 if (!VerifyErrorCode(
121 compiler, program, data, root_code, *code.passed(), err)) {
122 return false;
126 // Verify that changing bits included in the mask is detected as inequality.
127 if ((code.mask() & kLower32Bits) != 0) {
128 data->args[code.argno()] = code.value() ^ (code.mask() & kLower32Bits);
129 if (!VerifyErrorCode(
130 compiler, program, data, root_code, *code.failed(), err)) {
131 return false;
134 if ((code.mask() & kUpper32Bits) != 0) {
135 data->args[code.argno()] = code.value() ^ (code.mask() & kUpper32Bits);
136 if (!VerifyErrorCode(
137 compiler, program, data, root_code, *code.failed(), err)) {
138 return false;
142 if (code.width() == ErrorCode::TP_32BIT) {
143 // For 32-bit system call arguments, we emit additional instructions to
144 // validate the upper 32-bits. Here we test that validation.
146 // Arbitrary 64-bit values should be rejected.
147 data->args[code.argno()] = 1ULL << 32;
148 if (!VerifyErrorCode(compiler,
149 program,
150 data,
151 root_code,
152 compiler->Unexpected64bitArgument(),
153 err)) {
154 return false;
157 // Upper 32-bits set without the MSB of the lower 32-bits set should be
158 // rejected too.
159 data->args[code.argno()] = kUpper32Bits;
160 if (!VerifyErrorCode(compiler,
161 program,
162 data,
163 root_code,
164 compiler->Unexpected64bitArgument(),
165 err)) {
166 return false;
169 } else {
170 *err = "Attempting to return invalid error code from BPF program";
171 return false;
173 return true;
176 void Ld(State* state, const struct sock_filter& insn, const char** err) {
177 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS ||
178 insn.jt != 0 || insn.jf != 0) {
179 *err = "Invalid BPF_LD instruction";
180 return;
182 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
183 // We only allow loading of properly aligned 32bit quantities.
184 memcpy(&state->accumulator,
185 reinterpret_cast<const char*>(&state->data) + insn.k,
187 } else {
188 *err = "Invalid operand in BPF_LD instruction";
189 return;
191 state->acc_is_valid = true;
192 return;
195 void Jmp(State* state, const struct sock_filter& insn, const char** err) {
196 if (BPF_OP(insn.code) == BPF_JA) {
197 if (state->ip + insn.k + 1 >= state->program.size() ||
198 state->ip + insn.k + 1 <= state->ip) {
199 compilation_failure:
200 *err = "Invalid BPF_JMP instruction";
201 return;
203 state->ip += insn.k;
204 } else {
205 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
206 state->ip + insn.jt + 1 >= state->program.size() ||
207 state->ip + insn.jf + 1 >= state->program.size()) {
208 goto compilation_failure;
210 switch (BPF_OP(insn.code)) {
211 case BPF_JEQ:
212 if (state->accumulator == insn.k) {
213 state->ip += insn.jt;
214 } else {
215 state->ip += insn.jf;
217 break;
218 case BPF_JGT:
219 if (state->accumulator > insn.k) {
220 state->ip += insn.jt;
221 } else {
222 state->ip += insn.jf;
224 break;
225 case BPF_JGE:
226 if (state->accumulator >= insn.k) {
227 state->ip += insn.jt;
228 } else {
229 state->ip += insn.jf;
231 break;
232 case BPF_JSET:
233 if (state->accumulator & insn.k) {
234 state->ip += insn.jt;
235 } else {
236 state->ip += insn.jf;
238 break;
239 default:
240 goto compilation_failure;
245 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
246 if (BPF_SRC(insn.code) != BPF_K) {
247 *err = "Invalid BPF_RET instruction";
248 return 0;
250 return insn.k;
253 void Alu(State* state, const struct sock_filter& insn, const char** err) {
254 if (BPF_OP(insn.code) == BPF_NEG) {
255 state->accumulator = -state->accumulator;
256 return;
257 } else {
258 if (BPF_SRC(insn.code) != BPF_K) {
259 *err = "Unexpected source operand in arithmetic operation";
260 return;
262 switch (BPF_OP(insn.code)) {
263 case BPF_ADD:
264 state->accumulator += insn.k;
265 break;
266 case BPF_SUB:
267 state->accumulator -= insn.k;
268 break;
269 case BPF_MUL:
270 state->accumulator *= insn.k;
271 break;
272 case BPF_DIV:
273 if (!insn.k) {
274 *err = "Illegal division by zero";
275 break;
277 state->accumulator /= insn.k;
278 break;
279 case BPF_MOD:
280 if (!insn.k) {
281 *err = "Illegal division by zero";
282 break;
284 state->accumulator %= insn.k;
285 break;
286 case BPF_OR:
287 state->accumulator |= insn.k;
288 break;
289 case BPF_XOR:
290 state->accumulator ^= insn.k;
291 break;
292 case BPF_AND:
293 state->accumulator &= insn.k;
294 break;
295 case BPF_LSH:
296 if (insn.k > 32) {
297 *err = "Illegal shift operation";
298 break;
300 state->accumulator <<= insn.k;
301 break;
302 case BPF_RSH:
303 if (insn.k > 32) {
304 *err = "Illegal shift operation";
305 break;
307 state->accumulator >>= insn.k;
308 break;
309 default:
310 *err = "Invalid operator in arithmetic operation";
311 break;
316 } // namespace
318 bool Verifier::VerifyBPF(bpf_dsl::PolicyCompiler* compiler,
319 const std::vector<struct sock_filter>& program,
320 const bpf_dsl::Policy& policy,
321 const char** err) {
322 *err = NULL;
323 for (uint32_t sysnum : SyscallSet::All()) {
324 // We ideally want to iterate over the full system call range and values
325 // just above and just below this range. This gives us the full result set
326 // of the "evaluators".
327 // On Intel systems, this can fail in a surprising way, as a cleared bit 30
328 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
329 // unless we pay attention to setting this bit correctly, an early check in
330 // our BPF program will make us fail with a misleading error code.
331 struct arch_seccomp_data data = {static_cast<int>(sysnum),
332 static_cast<uint32_t>(SECCOMP_ARCH)};
333 #if defined(__i386__) || defined(__x86_64__)
334 #if defined(__x86_64__) && defined(__ILP32__)
335 if (!(sysnum & 0x40000000u)) {
336 continue;
338 #else
339 if (sysnum & 0x40000000u) {
340 continue;
342 #endif
343 #endif
344 ErrorCode code = SyscallSet::IsValid(sysnum)
345 ? policy.EvaluateSyscall(sysnum)->Compile(compiler)
346 : policy.InvalidSyscall()->Compile(compiler);
347 if (!VerifyErrorCode(compiler, program, &data, code, code, err)) {
348 return false;
351 return true;
354 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
355 const struct arch_seccomp_data& data,
356 const char** err) {
357 *err = NULL;
358 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
359 *err = "Invalid program length";
360 return 0;
362 for (State state(program, data); !*err; ++state.ip) {
363 if (state.ip >= program.size()) {
364 *err = "Invalid instruction pointer in BPF program";
365 break;
367 const struct sock_filter& insn = program[state.ip];
368 switch (BPF_CLASS(insn.code)) {
369 case BPF_LD:
370 Ld(&state, insn, err);
371 break;
372 case BPF_JMP:
373 Jmp(&state, insn, err);
374 break;
375 case BPF_RET: {
376 uint32_t r = Ret(&state, insn, err);
377 switch (r & SECCOMP_RET_ACTION) {
378 case SECCOMP_RET_TRAP:
379 case SECCOMP_RET_ERRNO:
380 case SECCOMP_RET_TRACE:
381 case SECCOMP_RET_ALLOW:
382 break;
383 case SECCOMP_RET_KILL: // We don't ever generate this
384 case SECCOMP_RET_INVALID: // Should never show up in BPF program
385 default:
386 *err = "Unexpected return code found in BPF program";
387 return 0;
389 return r;
391 case BPF_ALU:
392 Alu(&state, insn, err);
393 break;
394 default:
395 *err = "Unexpected instruction in BPF program";
396 break;
399 return 0;
402 } // namespace sandbox