Supervised users: Re-check ManagementPolicy when ProfileIsSupervised changes.
[chromium-blink-merge.git] / sandbox / linux / bpf_dsl / verifier.cc
blob417c663e306d28caa15122669b9251d0ce7a8236
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/bpf_dsl/verifier.h"
7 #include <string.h>
9 #include <limits>
11 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
12 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
13 #include "sandbox/linux/bpf_dsl/policy.h"
14 #include "sandbox/linux/bpf_dsl/policy_compiler.h"
15 #include "sandbox/linux/bpf_dsl/seccomp_macros.h"
16 #include "sandbox/linux/bpf_dsl/syscall_set.h"
17 #include "sandbox/linux/seccomp-bpf/errorcode.h"
18 #include "sandbox/linux/system_headers/linux_filter.h"
19 #include "sandbox/linux/system_headers/linux_seccomp.h"
21 namespace sandbox {
22 namespace bpf_dsl {
24 namespace {
26 const uint64_t kLower32Bits = std::numeric_limits<uint32_t>::max();
27 const uint64_t kUpper32Bits = static_cast<uint64_t>(kLower32Bits) << 32;
29 struct State {
30 State(const std::vector<struct sock_filter>& p,
31 const struct arch_seccomp_data& d)
32 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
33 const std::vector<struct sock_filter>& program;
34 const struct arch_seccomp_data& data;
35 unsigned int ip;
36 uint32_t accumulator;
37 bool acc_is_valid;
39 private:
40 DISALLOW_IMPLICIT_CONSTRUCTORS(State);
43 uint32_t EvaluateErrorCode(bpf_dsl::PolicyCompiler* compiler,
44 const ErrorCode& code,
45 const struct arch_seccomp_data& data) {
46 if (code.error_type() == ErrorCode::ET_SIMPLE ||
47 code.error_type() == ErrorCode::ET_TRAP) {
48 return code.err();
49 } else if (code.error_type() == ErrorCode::ET_COND) {
50 if (code.width() == ErrorCode::TP_32BIT &&
51 (data.args[code.argno()] >> 32) &&
52 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
53 0xFFFFFFFF80000000ull) {
54 return compiler->Unexpected64bitArgument().err();
56 bool equal = (data.args[code.argno()] & code.mask()) == code.value();
57 return EvaluateErrorCode(compiler, equal ? *code.passed() : *code.failed(),
58 data);
59 } else {
60 return SECCOMP_RET_INVALID;
64 bool VerifyErrorCode(bpf_dsl::PolicyCompiler* compiler,
65 const std::vector<struct sock_filter>& program,
66 struct arch_seccomp_data* data,
67 const ErrorCode& root_code,
68 const ErrorCode& code,
69 const char** err) {
70 if (code.error_type() == ErrorCode::ET_SIMPLE ||
71 code.error_type() == ErrorCode::ET_TRAP) {
72 const uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
73 if (*err) {
74 return false;
76 const uint32_t policy_ret = EvaluateErrorCode(compiler, root_code, *data);
77 if (computed_ret != policy_ret) {
78 // For efficiency's sake, we'd much rather compare "computed_ret"
79 // against "code.err()". This works most of the time, but it doesn't
80 // always work for nested conditional expressions. The test values
81 // that we generate on the fly to probe expressions can trigger
82 // code flow decisions in multiple nodes of the decision tree, and the
83 // only way to compute the correct error code in that situation is by
84 // calling EvaluateErrorCode().
85 *err = "Exit code from BPF program doesn't match";
86 return false;
88 } else if (code.error_type() == ErrorCode::ET_COND) {
89 if (code.argno() < 0 || code.argno() >= 6) {
90 *err = "Invalid argument number in error code";
91 return false;
94 // TODO(mdempsky): The test values generated here try to provide good
95 // coverage for generated BPF instructions while avoiding combinatorial
96 // explosion on large policies. Ideally we would instead take a fuzzing-like
97 // approach and generate a bounded number of test cases regardless of policy
98 // size.
100 // Verify that we can check a value for simple equality.
101 data->args[code.argno()] = code.value();
102 if (!VerifyErrorCode(compiler, program, data, root_code, *code.passed(),
103 err)) {
104 return false;
107 // If mask ignores any bits, verify that setting those bits is still
108 // detected as equality.
109 uint64_t ignored_bits = ~code.mask();
110 if (code.width() == ErrorCode::TP_32BIT) {
111 ignored_bits = static_cast<uint32_t>(ignored_bits);
113 if ((ignored_bits & kLower32Bits) != 0) {
114 data->args[code.argno()] = code.value() | (ignored_bits & kLower32Bits);
115 if (!VerifyErrorCode(compiler, program, data, root_code, *code.passed(),
116 err)) {
117 return false;
120 if ((ignored_bits & kUpper32Bits) != 0) {
121 data->args[code.argno()] = code.value() | (ignored_bits & kUpper32Bits);
122 if (!VerifyErrorCode(compiler, program, data, root_code, *code.passed(),
123 err)) {
124 return false;
128 // Verify that changing bits included in the mask is detected as inequality.
129 if ((code.mask() & kLower32Bits) != 0) {
130 data->args[code.argno()] = code.value() ^ (code.mask() & kLower32Bits);
131 if (!VerifyErrorCode(compiler, program, data, root_code, *code.failed(),
132 err)) {
133 return false;
136 if ((code.mask() & kUpper32Bits) != 0) {
137 data->args[code.argno()] = code.value() ^ (code.mask() & kUpper32Bits);
138 if (!VerifyErrorCode(compiler, program, data, root_code, *code.failed(),
139 err)) {
140 return false;
144 if (code.width() == ErrorCode::TP_32BIT) {
145 // For 32-bit system call arguments, we emit additional instructions to
146 // validate the upper 32-bits. Here we test that validation.
148 // Arbitrary 64-bit values should be rejected.
149 data->args[code.argno()] = 1ULL << 32;
150 if (!VerifyErrorCode(compiler, program, data, root_code,
151 compiler->Unexpected64bitArgument(), err)) {
152 return false;
155 // Upper 32-bits set without the MSB of the lower 32-bits set should be
156 // rejected too.
157 data->args[code.argno()] = kUpper32Bits;
158 if (!VerifyErrorCode(compiler, program, data, root_code,
159 compiler->Unexpected64bitArgument(), err)) {
160 return false;
163 } else {
164 *err = "Attempting to return invalid error code from BPF program";
165 return false;
167 return true;
170 void Ld(State* state, const struct sock_filter& insn, const char** err) {
171 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS ||
172 insn.jt != 0 || insn.jf != 0) {
173 *err = "Invalid BPF_LD instruction";
174 return;
176 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
177 // We only allow loading of properly aligned 32bit quantities.
178 memcpy(&state->accumulator,
179 reinterpret_cast<const char*>(&state->data) + insn.k, 4);
180 } else {
181 *err = "Invalid operand in BPF_LD instruction";
182 return;
184 state->acc_is_valid = true;
185 return;
188 void Jmp(State* state, const struct sock_filter& insn, const char** err) {
189 if (BPF_OP(insn.code) == BPF_JA) {
190 if (state->ip + insn.k + 1 >= state->program.size() ||
191 state->ip + insn.k + 1 <= state->ip) {
192 compilation_failure:
193 *err = "Invalid BPF_JMP instruction";
194 return;
196 state->ip += insn.k;
197 } else {
198 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
199 state->ip + insn.jt + 1 >= state->program.size() ||
200 state->ip + insn.jf + 1 >= state->program.size()) {
201 goto compilation_failure;
203 switch (BPF_OP(insn.code)) {
204 case BPF_JEQ:
205 if (state->accumulator == insn.k) {
206 state->ip += insn.jt;
207 } else {
208 state->ip += insn.jf;
210 break;
211 case BPF_JGT:
212 if (state->accumulator > insn.k) {
213 state->ip += insn.jt;
214 } else {
215 state->ip += insn.jf;
217 break;
218 case BPF_JGE:
219 if (state->accumulator >= insn.k) {
220 state->ip += insn.jt;
221 } else {
222 state->ip += insn.jf;
224 break;
225 case BPF_JSET:
226 if (state->accumulator & insn.k) {
227 state->ip += insn.jt;
228 } else {
229 state->ip += insn.jf;
231 break;
232 default:
233 goto compilation_failure;
238 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
239 if (BPF_SRC(insn.code) != BPF_K) {
240 *err = "Invalid BPF_RET instruction";
241 return 0;
243 return insn.k;
246 void Alu(State* state, const struct sock_filter& insn, const char** err) {
247 if (BPF_OP(insn.code) == BPF_NEG) {
248 state->accumulator = -state->accumulator;
249 return;
250 } else {
251 if (BPF_SRC(insn.code) != BPF_K) {
252 *err = "Unexpected source operand in arithmetic operation";
253 return;
255 switch (BPF_OP(insn.code)) {
256 case BPF_ADD:
257 state->accumulator += insn.k;
258 break;
259 case BPF_SUB:
260 state->accumulator -= insn.k;
261 break;
262 case BPF_MUL:
263 state->accumulator *= insn.k;
264 break;
265 case BPF_DIV:
266 if (!insn.k) {
267 *err = "Illegal division by zero";
268 break;
270 state->accumulator /= insn.k;
271 break;
272 case BPF_MOD:
273 if (!insn.k) {
274 *err = "Illegal division by zero";
275 break;
277 state->accumulator %= insn.k;
278 break;
279 case BPF_OR:
280 state->accumulator |= insn.k;
281 break;
282 case BPF_XOR:
283 state->accumulator ^= insn.k;
284 break;
285 case BPF_AND:
286 state->accumulator &= insn.k;
287 break;
288 case BPF_LSH:
289 if (insn.k > 32) {
290 *err = "Illegal shift operation";
291 break;
293 state->accumulator <<= insn.k;
294 break;
295 case BPF_RSH:
296 if (insn.k > 32) {
297 *err = "Illegal shift operation";
298 break;
300 state->accumulator >>= insn.k;
301 break;
302 default:
303 *err = "Invalid operator in arithmetic operation";
304 break;
309 } // namespace
311 bool Verifier::VerifyBPF(bpf_dsl::PolicyCompiler* compiler,
312 const std::vector<struct sock_filter>& program,
313 const bpf_dsl::Policy& policy,
314 const char** err) {
315 *err = NULL;
316 for (uint32_t sysnum : SyscallSet::All()) {
317 // We ideally want to iterate over the full system call range and values
318 // just above and just below this range. This gives us the full result set
319 // of the "evaluators".
320 // On Intel systems, this can fail in a surprising way, as a cleared bit 30
321 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
322 // unless we pay attention to setting this bit correctly, an early check in
323 // our BPF program will make us fail with a misleading error code.
324 struct arch_seccomp_data data = {static_cast<int>(sysnum),
325 static_cast<uint32_t>(SECCOMP_ARCH)};
326 #if defined(__i386__) || defined(__x86_64__)
327 #if defined(__x86_64__) && defined(__ILP32__)
328 if (!(sysnum & 0x40000000u)) {
329 continue;
331 #else
332 if (sysnum & 0x40000000u) {
333 continue;
335 #endif
336 #endif
337 ErrorCode code = SyscallSet::IsValid(sysnum)
338 ? policy.EvaluateSyscall(sysnum)->Compile(compiler)
339 : policy.InvalidSyscall()->Compile(compiler);
340 if (!VerifyErrorCode(compiler, program, &data, code, code, err)) {
341 return false;
344 return true;
347 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
348 const struct arch_seccomp_data& data,
349 const char** err) {
350 *err = NULL;
351 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
352 *err = "Invalid program length";
353 return 0;
355 for (State state(program, data); !*err; ++state.ip) {
356 if (state.ip >= program.size()) {
357 *err = "Invalid instruction pointer in BPF program";
358 break;
360 const struct sock_filter& insn = program[state.ip];
361 switch (BPF_CLASS(insn.code)) {
362 case BPF_LD:
363 Ld(&state, insn, err);
364 break;
365 case BPF_JMP:
366 Jmp(&state, insn, err);
367 break;
368 case BPF_RET: {
369 uint32_t r = Ret(&state, insn, err);
370 switch (r & SECCOMP_RET_ACTION) {
371 case SECCOMP_RET_TRAP:
372 case SECCOMP_RET_ERRNO:
373 case SECCOMP_RET_TRACE:
374 case SECCOMP_RET_ALLOW:
375 break;
376 case SECCOMP_RET_KILL: // We don't ever generate this
377 case SECCOMP_RET_INVALID: // Should never show up in BPF program
378 default:
379 *err = "Unexpected return code found in BPF program";
380 return 0;
382 return r;
384 case BPF_ALU:
385 Alu(&state, insn, err);
386 break;
387 default:
388 *err = "Unexpected instruction in BPF program";
389 break;
392 return 0;
395 } // namespace bpf_dsl
396 } // namespace sandbox