1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/bpf_dsl/verifier.h"
11 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
12 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
13 #include "sandbox/linux/bpf_dsl/policy.h"
14 #include "sandbox/linux/bpf_dsl/policy_compiler.h"
15 #include "sandbox/linux/bpf_dsl/seccomp_macros.h"
16 #include "sandbox/linux/bpf_dsl/syscall_set.h"
17 #include "sandbox/linux/seccomp-bpf/errorcode.h"
18 #include "sandbox/linux/system_headers/linux_seccomp.h"
25 const uint64_t kLower32Bits
= std::numeric_limits
<uint32_t>::max();
26 const uint64_t kUpper32Bits
= static_cast<uint64_t>(kLower32Bits
) << 32;
29 State(const std::vector
<struct sock_filter
>& p
,
30 const struct arch_seccomp_data
& d
)
31 : program(p
), data(d
), ip(0), accumulator(0), acc_is_valid(false) {}
32 const std::vector
<struct sock_filter
>& program
;
33 const struct arch_seccomp_data
& data
;
39 DISALLOW_IMPLICIT_CONSTRUCTORS(State
);
42 uint32_t EvaluateErrorCode(bpf_dsl::PolicyCompiler
* compiler
,
43 const ErrorCode
& code
,
44 const struct arch_seccomp_data
& data
) {
45 if (code
.error_type() == ErrorCode::ET_SIMPLE
||
46 code
.error_type() == ErrorCode::ET_TRAP
) {
48 } else if (code
.error_type() == ErrorCode::ET_COND
) {
49 if (code
.width() == ErrorCode::TP_32BIT
&&
50 (data
.args
[code
.argno()] >> 32) &&
51 (data
.args
[code
.argno()] & 0xFFFFFFFF80000000ull
) !=
52 0xFFFFFFFF80000000ull
) {
53 return compiler
->Unexpected64bitArgument().err();
55 bool equal
= (data
.args
[code
.argno()] & code
.mask()) == code
.value();
56 return EvaluateErrorCode(compiler
, equal
? *code
.passed() : *code
.failed(),
59 return SECCOMP_RET_INVALID
;
63 bool VerifyErrorCode(bpf_dsl::PolicyCompiler
* compiler
,
64 const std::vector
<struct sock_filter
>& program
,
65 struct arch_seccomp_data
* data
,
66 const ErrorCode
& root_code
,
67 const ErrorCode
& code
,
69 if (code
.error_type() == ErrorCode::ET_SIMPLE
||
70 code
.error_type() == ErrorCode::ET_TRAP
) {
71 const uint32_t computed_ret
= Verifier::EvaluateBPF(program
, *data
, err
);
75 const uint32_t policy_ret
= EvaluateErrorCode(compiler
, root_code
, *data
);
76 if (computed_ret
!= policy_ret
) {
77 // For efficiency's sake, we'd much rather compare "computed_ret"
78 // against "code.err()". This works most of the time, but it doesn't
79 // always work for nested conditional expressions. The test values
80 // that we generate on the fly to probe expressions can trigger
81 // code flow decisions in multiple nodes of the decision tree, and the
82 // only way to compute the correct error code in that situation is by
83 // calling EvaluateErrorCode().
84 *err
= "Exit code from BPF program doesn't match";
87 } else if (code
.error_type() == ErrorCode::ET_COND
) {
88 if (code
.argno() < 0 || code
.argno() >= 6) {
89 *err
= "Invalid argument number in error code";
93 // TODO(mdempsky): The test values generated here try to provide good
94 // coverage for generated BPF instructions while avoiding combinatorial
95 // explosion on large policies. Ideally we would instead take a fuzzing-like
96 // approach and generate a bounded number of test cases regardless of policy
99 // Verify that we can check a value for simple equality.
100 data
->args
[code
.argno()] = code
.value();
101 if (!VerifyErrorCode(compiler
, program
, data
, root_code
, *code
.passed(),
106 // If mask ignores any bits, verify that setting those bits is still
107 // detected as equality.
108 uint64_t ignored_bits
= ~code
.mask();
109 if (code
.width() == ErrorCode::TP_32BIT
) {
110 ignored_bits
= static_cast<uint32_t>(ignored_bits
);
112 if ((ignored_bits
& kLower32Bits
) != 0) {
113 data
->args
[code
.argno()] = code
.value() | (ignored_bits
& kLower32Bits
);
114 if (!VerifyErrorCode(compiler
, program
, data
, root_code
, *code
.passed(),
119 if ((ignored_bits
& kUpper32Bits
) != 0) {
120 data
->args
[code
.argno()] = code
.value() | (ignored_bits
& kUpper32Bits
);
121 if (!VerifyErrorCode(compiler
, program
, data
, root_code
, *code
.passed(),
127 // Verify that changing bits included in the mask is detected as inequality.
128 if ((code
.mask() & kLower32Bits
) != 0) {
129 data
->args
[code
.argno()] = code
.value() ^ (code
.mask() & kLower32Bits
);
130 if (!VerifyErrorCode(compiler
, program
, data
, root_code
, *code
.failed(),
135 if ((code
.mask() & kUpper32Bits
) != 0) {
136 data
->args
[code
.argno()] = code
.value() ^ (code
.mask() & kUpper32Bits
);
137 if (!VerifyErrorCode(compiler
, program
, data
, root_code
, *code
.failed(),
143 if (code
.width() == ErrorCode::TP_32BIT
) {
144 // For 32-bit system call arguments, we emit additional instructions to
145 // validate the upper 32-bits. Here we test that validation.
147 // Arbitrary 64-bit values should be rejected.
148 data
->args
[code
.argno()] = 1ULL << 32;
149 if (!VerifyErrorCode(compiler
, program
, data
, root_code
,
150 compiler
->Unexpected64bitArgument(), err
)) {
154 // Upper 32-bits set without the MSB of the lower 32-bits set should be
156 data
->args
[code
.argno()] = kUpper32Bits
;
157 if (!VerifyErrorCode(compiler
, program
, data
, root_code
,
158 compiler
->Unexpected64bitArgument(), err
)) {
163 *err
= "Attempting to return invalid error code from BPF program";
169 void Ld(State
* state
, const struct sock_filter
& insn
, const char** err
) {
170 if (BPF_SIZE(insn
.code
) != BPF_W
|| BPF_MODE(insn
.code
) != BPF_ABS
||
171 insn
.jt
!= 0 || insn
.jf
!= 0) {
172 *err
= "Invalid BPF_LD instruction";
175 if (insn
.k
< sizeof(struct arch_seccomp_data
) && (insn
.k
& 3) == 0) {
176 // We only allow loading of properly aligned 32bit quantities.
177 memcpy(&state
->accumulator
,
178 reinterpret_cast<const char*>(&state
->data
) + insn
.k
, 4);
180 *err
= "Invalid operand in BPF_LD instruction";
183 state
->acc_is_valid
= true;
187 void Jmp(State
* state
, const struct sock_filter
& insn
, const char** err
) {
188 if (BPF_OP(insn
.code
) == BPF_JA
) {
189 if (state
->ip
+ insn
.k
+ 1 >= state
->program
.size() ||
190 state
->ip
+ insn
.k
+ 1 <= state
->ip
) {
192 *err
= "Invalid BPF_JMP instruction";
197 if (BPF_SRC(insn
.code
) != BPF_K
|| !state
->acc_is_valid
||
198 state
->ip
+ insn
.jt
+ 1 >= state
->program
.size() ||
199 state
->ip
+ insn
.jf
+ 1 >= state
->program
.size()) {
200 goto compilation_failure
;
202 switch (BPF_OP(insn
.code
)) {
204 if (state
->accumulator
== insn
.k
) {
205 state
->ip
+= insn
.jt
;
207 state
->ip
+= insn
.jf
;
211 if (state
->accumulator
> insn
.k
) {
212 state
->ip
+= insn
.jt
;
214 state
->ip
+= insn
.jf
;
218 if (state
->accumulator
>= insn
.k
) {
219 state
->ip
+= insn
.jt
;
221 state
->ip
+= insn
.jf
;
225 if (state
->accumulator
& insn
.k
) {
226 state
->ip
+= insn
.jt
;
228 state
->ip
+= insn
.jf
;
232 goto compilation_failure
;
237 uint32_t Ret(State
*, const struct sock_filter
& insn
, const char** err
) {
238 if (BPF_SRC(insn
.code
) != BPF_K
) {
239 *err
= "Invalid BPF_RET instruction";
245 void Alu(State
* state
, const struct sock_filter
& insn
, const char** err
) {
246 if (BPF_OP(insn
.code
) == BPF_NEG
) {
247 state
->accumulator
= -state
->accumulator
;
250 if (BPF_SRC(insn
.code
) != BPF_K
) {
251 *err
= "Unexpected source operand in arithmetic operation";
254 switch (BPF_OP(insn
.code
)) {
256 state
->accumulator
+= insn
.k
;
259 state
->accumulator
-= insn
.k
;
262 state
->accumulator
*= insn
.k
;
266 *err
= "Illegal division by zero";
269 state
->accumulator
/= insn
.k
;
273 *err
= "Illegal division by zero";
276 state
->accumulator
%= insn
.k
;
279 state
->accumulator
|= insn
.k
;
282 state
->accumulator
^= insn
.k
;
285 state
->accumulator
&= insn
.k
;
289 *err
= "Illegal shift operation";
292 state
->accumulator
<<= insn
.k
;
296 *err
= "Illegal shift operation";
299 state
->accumulator
>>= insn
.k
;
302 *err
= "Invalid operator in arithmetic operation";
310 bool Verifier::VerifyBPF(bpf_dsl::PolicyCompiler
* compiler
,
311 const std::vector
<struct sock_filter
>& program
,
312 const bpf_dsl::Policy
& policy
,
315 for (uint32_t sysnum
: SyscallSet::All()) {
316 // We ideally want to iterate over the full system call range and values
317 // just above and just below this range. This gives us the full result set
318 // of the "evaluators".
319 // On Intel systems, this can fail in a surprising way, as a cleared bit 30
320 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
321 // unless we pay attention to setting this bit correctly, an early check in
322 // our BPF program will make us fail with a misleading error code.
323 struct arch_seccomp_data data
= {static_cast<int>(sysnum
),
324 static_cast<uint32_t>(SECCOMP_ARCH
)};
325 #if defined(__i386__) || defined(__x86_64__)
326 #if defined(__x86_64__) && defined(__ILP32__)
327 if (!(sysnum
& 0x40000000u
)) {
331 if (sysnum
& 0x40000000u
) {
336 ErrorCode code
= SyscallSet::IsValid(sysnum
)
337 ? policy
.EvaluateSyscall(sysnum
)->Compile(compiler
)
338 : policy
.InvalidSyscall()->Compile(compiler
);
339 if (!VerifyErrorCode(compiler
, program
, &data
, code
, code
, err
)) {
346 uint32_t Verifier::EvaluateBPF(const std::vector
<struct sock_filter
>& program
,
347 const struct arch_seccomp_data
& data
,
350 if (program
.size() < 1 || program
.size() >= SECCOMP_MAX_PROGRAM_SIZE
) {
351 *err
= "Invalid program length";
354 for (State
state(program
, data
); !*err
; ++state
.ip
) {
355 if (state
.ip
>= program
.size()) {
356 *err
= "Invalid instruction pointer in BPF program";
359 const struct sock_filter
& insn
= program
[state
.ip
];
360 switch (BPF_CLASS(insn
.code
)) {
362 Ld(&state
, insn
, err
);
365 Jmp(&state
, insn
, err
);
368 uint32_t r
= Ret(&state
, insn
, err
);
369 switch (r
& SECCOMP_RET_ACTION
) {
370 case SECCOMP_RET_TRAP
:
371 case SECCOMP_RET_ERRNO
:
372 case SECCOMP_RET_TRACE
:
373 case SECCOMP_RET_ALLOW
:
375 case SECCOMP_RET_KILL
: // We don't ever generate this
376 case SECCOMP_RET_INVALID
: // Should never show up in BPF program
378 *err
= "Unexpected return code found in BPF program";
384 Alu(&state
, insn
, err
);
387 *err
= "Unexpected instruction in BPF program";
394 } // namespace bpf_dsl
395 } // namespace sandbox