Convert sandbox_bpf_unittest.cc to use bpf_dsl
[chromium-blink-merge.git] / sandbox / linux / bpf_dsl / bpf_dsl_more_unittest.cc
blobb5d04e1da455d624bec5eea13b2f0e1af4e4dcf0
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <errno.h>
6 #include <pthread.h>
7 #include <sched.h>
8 #include <signal.h>
9 #include <sys/prctl.h>
10 #include <sys/ptrace.h>
11 #include <sys/syscall.h>
12 #include <sys/time.h>
13 #include <sys/types.h>
14 #include <sys/utsname.h>
15 #include <unistd.h>
16 #include <sys/socket.h>
18 #if defined(ANDROID)
19 // Work-around for buggy headers in Android's NDK
20 #define __user
21 #endif
22 #include <linux/futex.h>
24 #include <ostream>
26 #include "base/bind.h"
27 #include "base/logging.h"
28 #include "base/macros.h"
29 #include "base/memory/scoped_ptr.h"
30 #include "base/posix/eintr_wrapper.h"
31 #include "base/synchronization/waitable_event.h"
32 #include "base/threading/thread.h"
33 #include "build/build_config.h"
34 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
35 #include "sandbox/linux/seccomp-bpf/bpf_tests.h"
36 #include "sandbox/linux/seccomp-bpf/syscall.h"
37 #include "sandbox/linux/seccomp-bpf/trap.h"
38 #include "sandbox/linux/seccomp-bpf/verifier.h"
39 #include "sandbox/linux/services/broker_process.h"
40 #include "sandbox/linux/services/linux_syscalls.h"
41 #include "sandbox/linux/tests/scoped_temporary_file.h"
42 #include "sandbox/linux/tests/unit_tests.h"
43 #include "testing/gtest/include/gtest/gtest.h"
45 // Workaround for Android's prctl.h file.
46 #ifndef PR_GET_ENDIAN
47 #define PR_GET_ENDIAN 19
48 #endif
49 #ifndef PR_CAPBSET_READ
50 #define PR_CAPBSET_READ 23
51 #define PR_CAPBSET_DROP 24
52 #endif
54 namespace sandbox {
55 namespace bpf_dsl {
57 namespace {
59 const int kExpectedReturnValue = 42;
60 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
62 // Set the global environment to allow the use of UnsafeTrap() policies.
63 void EnableUnsafeTraps() {
64 // The use of UnsafeTrap() causes us to print a warning message. This is
65 // generally desirable, but it results in the unittest failing, as it doesn't
66 // expect any messages on "stderr". So, temporarily disable messages. The
67 // BPF_TEST() is guaranteed to turn messages back on, after the policy
68 // function has completed.
69 setenv(kSandboxDebuggingEnv, "t", 0);
70 Die::SuppressInfoMessages(true);
73 // This test should execute no matter whether we have kernel support. So,
74 // we make it a TEST() instead of a BPF_TEST().
75 TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupports)) {
76 // We check that we don't crash, but it's ok if the kernel doesn't
77 // support it.
78 bool seccomp_bpf_supported =
79 SandboxBPF::SupportsSeccompSandbox(-1) == SandboxBPF::STATUS_AVAILABLE;
80 // We want to log whether or not seccomp BPF is actually supported
81 // since actual test coverage depends on it.
82 RecordProperty("SeccompBPFSupported",
83 seccomp_bpf_supported ? "true." : "false.");
84 std::cout << "Seccomp BPF supported: "
85 << (seccomp_bpf_supported ? "true." : "false.") << "\n";
86 RecordProperty("PointerSize", sizeof(void*));
87 std::cout << "Pointer size: " << sizeof(void*) << "\n";
90 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupportsTwice)) {
91 SandboxBPF::SupportsSeccompSandbox(-1);
92 SandboxBPF::SupportsSeccompSandbox(-1);
95 // BPF_TEST does a lot of the boiler-plate code around setting up a
96 // policy and optional passing data between the caller, the policy and
97 // any Trap() handlers. This is great for writing short and concise tests,
98 // and it helps us accidentally forgetting any of the crucial steps in
99 // setting up the sandbox. But it wouldn't hurt to have at least one test
100 // that explicitly walks through all these steps.
102 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) {
103 BPF_ASSERT(aux);
104 int* counter = static_cast<int*>(aux);
105 return (*counter)++;
108 class VerboseAPITestingPolicy : public SandboxBPFDSLPolicy {
109 public:
110 VerboseAPITestingPolicy(int* counter_ptr) : counter_ptr_(counter_ptr) {}
111 virtual ~VerboseAPITestingPolicy() {}
113 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
114 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
115 if (sysno == __NR_uname) {
116 return Trap(IncreaseCounter, counter_ptr_);
118 return Allow();
121 private:
122 int* counter_ptr_;
124 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy);
127 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) {
128 if (SandboxBPF::SupportsSeccompSandbox(-1) ==
129 sandbox::SandboxBPF::STATUS_AVAILABLE) {
130 static int counter = 0;
132 SandboxBPF sandbox;
133 sandbox.SetSandboxPolicy(new VerboseAPITestingPolicy(&counter));
134 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
136 BPF_ASSERT_EQ(0, counter);
137 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0));
138 BPF_ASSERT_EQ(1, counter);
139 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0));
140 BPF_ASSERT_EQ(2, counter);
144 // A simple blacklist test
146 class BlacklistNanosleepPolicy : public SandboxBPFDSLPolicy {
147 public:
148 BlacklistNanosleepPolicy() {}
149 virtual ~BlacklistNanosleepPolicy() {}
151 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
152 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
153 switch (sysno) {
154 case __NR_nanosleep:
155 return Error(EACCES);
156 default:
157 return Allow();
161 static void AssertNanosleepFails() {
162 const struct timespec ts = {0, 0};
163 errno = 0;
164 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
165 BPF_ASSERT_EQ(EACCES, errno);
168 private:
169 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy);
172 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) {
173 BlacklistNanosleepPolicy::AssertNanosleepFails();
176 // Now do a simple whitelist test
178 class WhitelistGetpidPolicy : public SandboxBPFDSLPolicy {
179 public:
180 WhitelistGetpidPolicy() {}
181 virtual ~WhitelistGetpidPolicy() {}
183 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
184 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
185 switch (sysno) {
186 case __NR_getpid:
187 case __NR_exit_group:
188 return Allow();
189 default:
190 return Error(ENOMEM);
194 private:
195 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy);
198 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) {
199 // getpid() should be allowed
200 errno = 0;
201 BPF_ASSERT(syscall(__NR_getpid) > 0);
202 BPF_ASSERT(errno == 0);
204 // getpgid() should be denied
205 BPF_ASSERT(getpgid(0) == -1);
206 BPF_ASSERT(errno == ENOMEM);
209 // A simple blacklist policy, with a SIGSYS handler
210 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
211 // We also check that the auxiliary data is correct
212 SANDBOX_ASSERT(aux);
213 *(static_cast<int*>(aux)) = kExpectedReturnValue;
214 return -ENOMEM;
217 ErrorCode BlacklistNanosleepPolicySigsys(SandboxBPF* sandbox,
218 int sysno,
219 int* aux) {
220 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
221 switch (sysno) {
222 case __NR_nanosleep:
223 return sandbox->Trap(EnomemHandler, aux);
224 default:
225 return ErrorCode(ErrorCode::ERR_ALLOWED);
229 BPF_TEST(SandboxBPF,
230 BasicBlacklistWithSigsys,
231 BlacklistNanosleepPolicySigsys,
232 int /* (*BPF_AUX) */) {
233 // getpid() should work properly
234 errno = 0;
235 BPF_ASSERT(syscall(__NR_getpid) > 0);
236 BPF_ASSERT(errno == 0);
238 // Our Auxiliary Data, should be reset by the signal handler
239 *BPF_AUX = -1;
240 const struct timespec ts = {0, 0};
241 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
242 BPF_ASSERT(errno == ENOMEM);
244 // We expect the signal handler to modify AuxData
245 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue);
248 // A simple test that verifies we can return arbitrary errno values.
250 class ErrnoTestPolicy : public SandboxBPFDSLPolicy {
251 public:
252 ErrnoTestPolicy() {}
253 virtual ~ErrnoTestPolicy() {}
255 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
257 private:
258 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy);
261 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const {
262 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
263 switch (sysno) {
264 case __NR_dup3: // dup2 is a wrapper of dup3 in android
265 #if defined(__NR_dup2)
266 case __NR_dup2:
267 #endif
268 // Pretend that dup2() worked, but don't actually do anything.
269 return Error(0);
270 case __NR_setuid:
271 #if defined(__NR_setuid32)
272 case __NR_setuid32:
273 #endif
274 // Return errno = 1.
275 return Error(1);
276 case __NR_setgid:
277 #if defined(__NR_setgid32)
278 case __NR_setgid32:
279 #endif
280 // Return maximum errno value (typically 4095).
281 return Error(ErrorCode::ERR_MAX_ERRNO);
282 case __NR_uname:
283 // Return errno = 42;
284 return Error(42);
285 default:
286 return Allow();
290 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) {
291 // Verify that dup2() returns success, but doesn't actually run.
292 int fds[4];
293 BPF_ASSERT(pipe(fds) == 0);
294 BPF_ASSERT(pipe(fds + 2) == 0);
295 BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
296 char buf[1] = {};
297 BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
298 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
299 BPF_ASSERT(read(fds[0], buf, 1) == 1);
301 // If dup2() executed, we will read \xAA, but it dup2() has been turned
302 // into a no-op by our policy, then we will read \x55.
303 BPF_ASSERT(buf[0] == '\x55');
305 // Verify that we can return the minimum and maximum errno values.
306 errno = 0;
307 BPF_ASSERT(setuid(0) == -1);
308 BPF_ASSERT(errno == 1);
310 // On Android, errno is only supported up to 255, otherwise errno
311 // processing is skipped.
312 // We work around this (crbug.com/181647).
313 if (sandbox::IsAndroid() && setgid(0) != -1) {
314 errno = 0;
315 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO);
316 BPF_ASSERT(errno == 0);
317 } else {
318 errno = 0;
319 BPF_ASSERT(setgid(0) == -1);
320 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO);
323 // Finally, test an errno in between the minimum and maximum.
324 errno = 0;
325 struct utsname uts_buf;
326 BPF_ASSERT(uname(&uts_buf) == -1);
327 BPF_ASSERT(errno == 42);
330 // Testing the stacking of two sandboxes
332 class StackingPolicyPartOne : public SandboxBPFDSLPolicy {
333 public:
334 StackingPolicyPartOne() {}
335 virtual ~StackingPolicyPartOne() {}
337 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
338 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
339 switch (sysno) {
340 case __NR_getppid: {
341 const Arg<int> arg(0);
342 return If(arg == 0, Allow()).Else(Error(EPERM));
344 default:
345 return Allow();
349 private:
350 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne);
353 class StackingPolicyPartTwo : public SandboxBPFDSLPolicy {
354 public:
355 StackingPolicyPartTwo() {}
356 virtual ~StackingPolicyPartTwo() {}
358 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
359 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
360 switch (sysno) {
361 case __NR_getppid: {
362 const Arg<int> arg(0);
363 return If(arg == 0, Error(EINVAL)).Else(Allow());
365 default:
366 return Allow();
370 private:
371 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo);
374 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) {
375 errno = 0;
376 BPF_ASSERT(syscall(__NR_getppid, 0) > 0);
377 BPF_ASSERT(errno == 0);
379 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
380 BPF_ASSERT(errno == EPERM);
382 // Stack a second sandbox with its own policy. Verify that we can further
383 // restrict filters, but we cannot relax existing filters.
384 SandboxBPF sandbox;
385 sandbox.SetSandboxPolicy(new StackingPolicyPartTwo());
386 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
388 errno = 0;
389 BPF_ASSERT(syscall(__NR_getppid, 0) == -1);
390 BPF_ASSERT(errno == EINVAL);
392 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
393 BPF_ASSERT(errno == EPERM);
396 // A more complex, but synthetic policy. This tests the correctness of the BPF
397 // program by iterating through all syscalls and checking for an errno that
398 // depends on the syscall number. Unlike the Verifier, this exercises the BPF
399 // interpreter in the kernel.
401 // We try to make sure we exercise optimizations in the BPF compiler. We make
402 // sure that the compiler can have an opportunity to coalesce syscalls with
403 // contiguous numbers and we also make sure that disjoint sets can return the
404 // same errno.
405 int SysnoToRandomErrno(int sysno) {
406 // Small contiguous sets of 3 system calls return an errno equal to the
407 // index of that set + 1 (so that we never return a NUL errno).
408 return ((sysno & ~3) >> 2) % 29 + 1;
411 class SyntheticPolicy : public SandboxBPFDSLPolicy {
412 public:
413 SyntheticPolicy() {}
414 virtual ~SyntheticPolicy() {}
416 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
417 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
418 if (sysno == __NR_exit_group || sysno == __NR_write) {
419 // exit_group() is special, we really need it to work.
420 // write() is needed for BPF_ASSERT() to report a useful error message.
421 return Allow();
423 return Error(SysnoToRandomErrno(sysno));
426 private:
427 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy);
430 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) {
431 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
432 // overflow.
433 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
434 static_cast<int>(MAX_PUBLIC_SYSCALL));
436 for (int syscall_number = static_cast<int>(MIN_SYSCALL);
437 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
438 ++syscall_number) {
439 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) {
440 // exit_group() is special
441 continue;
443 errno = 0;
444 BPF_ASSERT(syscall(syscall_number) == -1);
445 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number));
449 #if defined(__arm__)
450 // A simple policy that tests whether ARM private system calls are supported
451 // by our BPF compiler and by the BPF interpreter in the kernel.
453 // For ARM private system calls, return an errno equal to their offset from
454 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno).
455 int ArmPrivateSysnoToErrno(int sysno) {
456 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) &&
457 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
458 return (sysno - MIN_PRIVATE_SYSCALL) + 1;
459 } else {
460 return ENOSYS;
464 class ArmPrivatePolicy : public SandboxBPFDSLPolicy {
465 public:
466 ArmPrivatePolicy() {}
467 virtual ~ArmPrivatePolicy() {}
469 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
470 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
471 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual
472 // ARM private system calls.
473 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) &&
474 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
475 return Error(ArmPrivateSysnoToErrno(sysno));
477 return Allow();
480 private:
481 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy);
484 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) {
485 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
486 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
487 ++syscall_number) {
488 errno = 0;
489 BPF_ASSERT(syscall(syscall_number) == -1);
490 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
493 #endif // defined(__arm__)
495 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
496 // Count all invocations of our callback function.
497 ++*reinterpret_cast<int*>(aux);
499 // Verify that within the callback function all filtering is temporarily
500 // disabled.
501 BPF_ASSERT(syscall(__NR_getpid) > 1);
503 // Verify that we can now call the underlying system call without causing
504 // infinite recursion.
505 return SandboxBPF::ForwardSyscall(args);
508 ErrorCode GreyListedPolicy(SandboxBPF* sandbox, int sysno, int* aux) {
509 // Set the global environment for unsafe traps once.
510 if (sysno == MIN_SYSCALL) {
511 EnableUnsafeTraps();
514 // Some system calls must always be allowed, if our policy wants to make
515 // use of UnsafeTrap()
516 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) {
517 return ErrorCode(ErrorCode::ERR_ALLOWED);
518 } else if (sysno == __NR_getpid) {
519 // Disallow getpid()
520 return ErrorCode(EPERM);
521 } else if (SandboxBPF::IsValidSyscallNumber(sysno)) {
522 // Allow (and count) all other system calls.
523 return sandbox->UnsafeTrap(CountSyscalls, aux);
524 } else {
525 return ErrorCode(ENOSYS);
529 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) {
530 BPF_ASSERT(syscall(__NR_getpid) == -1);
531 BPF_ASSERT(errno == EPERM);
532 BPF_ASSERT(*BPF_AUX == 0);
533 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
534 BPF_ASSERT(*BPF_AUX == 2);
535 char name[17] = {};
536 BPF_ASSERT(!syscall(__NR_prctl,
537 PR_GET_NAME,
538 name,
539 (void*)NULL,
540 (void*)NULL,
541 (void*)NULL));
542 BPF_ASSERT(*BPF_AUX == 3);
543 BPF_ASSERT(*name);
546 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) {
547 // Disabling warning messages that could confuse our test framework.
548 setenv(kSandboxDebuggingEnv, "t", 0);
549 Die::SuppressInfoMessages(true);
551 unsetenv(kSandboxDebuggingEnv);
552 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
553 setenv(kSandboxDebuggingEnv, "", 1);
554 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
555 setenv(kSandboxDebuggingEnv, "t", 1);
556 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true);
559 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
560 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
561 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
562 // return an error. But our handler allows this call.
563 return 0;
564 } else {
565 return SandboxBPF::ForwardSyscall(args);
569 class PrctlPolicy : public SandboxBPFDSLPolicy {
570 public:
571 PrctlPolicy() {}
572 virtual ~PrctlPolicy() {}
574 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
575 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
576 setenv(kSandboxDebuggingEnv, "t", 0);
577 Die::SuppressInfoMessages(true);
579 if (sysno == __NR_prctl) {
580 // Handle prctl() inside an UnsafeTrap()
581 return UnsafeTrap(PrctlHandler, NULL);
584 // Allow all other system calls.
585 return Allow();
588 private:
589 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy);
592 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) {
593 // This call should never be allowed. But our policy will intercept it and
594 // let it pass successfully.
595 BPF_ASSERT(
596 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
598 // Verify that the call will fail, if it makes it all the way to the kernel.
599 BPF_ASSERT(
600 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
602 // And verify that other uses of prctl() work just fine.
603 char name[17] = {};
604 BPF_ASSERT(!syscall(__NR_prctl,
605 PR_GET_NAME,
606 name,
607 (void*)NULL,
608 (void*)NULL,
609 (void*)NULL));
610 BPF_ASSERT(*name);
612 // Finally, verify that system calls other than prctl() are completely
613 // unaffected by our policy.
614 struct utsname uts = {};
615 BPF_ASSERT(!uname(&uts));
616 BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
619 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
620 return SandboxBPF::ForwardSyscall(args);
623 class RedirectAllSyscallsPolicy : public SandboxBPFDSLPolicy {
624 public:
625 RedirectAllSyscallsPolicy() {}
626 virtual ~RedirectAllSyscallsPolicy() {}
628 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
630 private:
631 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy);
634 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const {
635 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
636 setenv(kSandboxDebuggingEnv, "t", 0);
637 Die::SuppressInfoMessages(true);
639 // Some system calls must always be allowed, if our policy wants to make
640 // use of UnsafeTrap()
641 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
642 return Allow();
643 return UnsafeTrap(AllowRedirectedSyscall, NULL);
646 int bus_handler_fd_ = -1;
648 void SigBusHandler(int, siginfo_t* info, void* void_context) {
649 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
652 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) {
653 // We use the SIGBUS bit in the signal mask as a thread-local boolean
654 // value in the implementation of UnsafeTrap(). This is obviously a bit
655 // of a hack that could conceivably interfere with code that uses SIGBUS
656 // in more traditional ways. This test verifies that basic functionality
657 // of SIGBUS is not impacted, but it is certainly possibly to construe
658 // more complex uses of signals where our use of the SIGBUS mask is not
659 // 100% transparent. This is expected behavior.
660 int fds[2];
661 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
662 bus_handler_fd_ = fds[1];
663 struct sigaction sa = {};
664 sa.sa_sigaction = SigBusHandler;
665 sa.sa_flags = SA_SIGINFO;
666 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
667 raise(SIGBUS);
668 char c = '\000';
669 BPF_ASSERT(read(fds[0], &c, 1) == 1);
670 BPF_ASSERT(close(fds[0]) == 0);
671 BPF_ASSERT(close(fds[1]) == 0);
672 BPF_ASSERT(c == 0x55);
675 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) {
676 // Signal masks are potentially tricky to handle. For instance, if we
677 // ever tried to update them from inside a Trap() or UnsafeTrap() handler,
678 // the call to sigreturn() at the end of the signal handler would undo
679 // all of our efforts. So, it makes sense to test that sigprocmask()
680 // works, even if we have a policy in place that makes use of UnsafeTrap().
681 // In practice, this works because we force sigprocmask() to be handled
682 // entirely in the kernel.
683 sigset_t mask0, mask1, mask2;
685 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't
686 // change the mask (it shouldn't have been, as it isn't blocked by default
687 // in POSIX).
689 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose.
690 sigemptyset(&mask0);
691 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1));
692 BPF_ASSERT(!sigismember(&mask1, SIGUSR2));
694 // Try again, and this time we verify that we can block it. This
695 // requires a second call to sigprocmask().
696 sigaddset(&mask0, SIGUSR2);
697 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL));
698 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2));
699 BPF_ASSERT(sigismember(&mask2, SIGUSR2));
702 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
703 // An UnsafeTrap() (or for that matter, a Trap()) has to report error
704 // conditions by returning an exit code in the range -1..-4096. This
705 // should happen automatically if using ForwardSyscall(). If the TrapFnc()
706 // uses some other method to make system calls, then it is responsible
707 // for computing the correct return code.
708 // This test verifies that ForwardSyscall() does the correct thing.
710 // The glibc system wrapper will ultimately set errno for us. So, from normal
711 // userspace, all of this should be completely transparent.
712 errno = 0;
713 BPF_ASSERT(close(-1) == -1);
714 BPF_ASSERT(errno == EBADF);
716 // Explicitly avoid the glibc wrapper. This is not normally the way anybody
717 // would make system calls, but it allows us to verify that we don't
718 // accidentally mess with errno, when we shouldn't.
719 errno = 0;
720 struct arch_seccomp_data args = {};
721 args.nr = __NR_close;
722 args.args[0] = -1;
723 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF);
724 BPF_ASSERT(errno == 0);
727 bool NoOpCallback() {
728 return true;
731 // Test a trap handler that makes use of a broker process to open().
733 class InitializedOpenBroker {
734 public:
735 InitializedOpenBroker() : initialized_(false) {
736 std::vector<std::string> allowed_files;
737 allowed_files.push_back("/proc/allowed");
738 allowed_files.push_back("/proc/cpuinfo");
740 broker_process_.reset(
741 new BrokerProcess(EPERM, allowed_files, std::vector<std::string>()));
742 BPF_ASSERT(broker_process() != NULL);
743 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback)));
745 initialized_ = true;
747 bool initialized() { return initialized_; }
748 class BrokerProcess* broker_process() { return broker_process_.get(); }
750 private:
751 bool initialized_;
752 scoped_ptr<class BrokerProcess> broker_process_;
753 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker);
756 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args,
757 void* aux) {
758 BPF_ASSERT(aux);
759 BrokerProcess* broker_process = static_cast<BrokerProcess*>(aux);
760 switch (args.nr) {
761 case __NR_faccessat: // access is a wrapper of faccessat in android
762 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
763 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]),
764 static_cast<int>(args.args[2]));
765 #if defined(__NR_access)
766 case __NR_access:
767 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]),
768 static_cast<int>(args.args[1]));
769 #endif
770 #if defined(__NR_open)
771 case __NR_open:
772 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]),
773 static_cast<int>(args.args[1]));
774 #endif
775 case __NR_openat:
776 // We only call open() so if we arrive here, it's because glibc uses
777 // the openat() system call.
778 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
779 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]),
780 static_cast<int>(args.args[2]));
781 default:
782 BPF_ASSERT(false);
783 return -ENOSYS;
787 ErrorCode DenyOpenPolicy(SandboxBPF* sandbox,
788 int sysno,
789 InitializedOpenBroker* iob) {
790 if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
791 return ErrorCode(ENOSYS);
794 switch (sysno) {
795 case __NR_faccessat:
796 #if defined(__NR_access)
797 case __NR_access:
798 #endif
799 #if defined(__NR_open)
800 case __NR_open:
801 #endif
802 case __NR_openat:
803 // We get a InitializedOpenBroker class, but our trap handler wants
804 // the BrokerProcess object.
805 return ErrorCode(
806 sandbox->Trap(BrokerOpenTrapHandler, iob->broker_process()));
807 default:
808 return ErrorCode(ErrorCode::ERR_ALLOWED);
812 // We use a InitializedOpenBroker class, so that we can run unsandboxed
813 // code in its constructor, which is the only way to do so in a BPF_TEST.
814 BPF_TEST(SandboxBPF,
815 UseOpenBroker,
816 DenyOpenPolicy,
817 InitializedOpenBroker /* (*BPF_AUX) */) {
818 BPF_ASSERT(BPF_AUX->initialized());
819 BrokerProcess* broker_process = BPF_AUX->broker_process();
820 BPF_ASSERT(broker_process != NULL);
822 // First, use the broker "manually"
823 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM);
824 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM);
825 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT);
826 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT);
828 // Now use glibc's open() as an external library would.
829 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1);
830 BPF_ASSERT(errno == EPERM);
832 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1);
833 BPF_ASSERT(errno == ENOENT);
835 // Also test glibc's openat(), some versions of libc use it transparently
836 // instead of open().
837 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1);
838 BPF_ASSERT(errno == EPERM);
840 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1);
841 BPF_ASSERT(errno == ENOENT);
843 // And test glibc's access().
844 BPF_ASSERT(access("/proc/denied", R_OK) == -1);
845 BPF_ASSERT(errno == EPERM);
847 BPF_ASSERT(access("/proc/allowed", R_OK) == -1);
848 BPF_ASSERT(errno == ENOENT);
850 // This is also white listed and does exist.
851 int cpu_info_access = access("/proc/cpuinfo", R_OK);
852 BPF_ASSERT(cpu_info_access == 0);
853 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY);
854 BPF_ASSERT(cpu_info_fd >= 0);
855 char buf[1024];
856 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0);
859 // Simple test demonstrating how to use SandboxBPF::Cond()
861 class SimpleCondTestPolicy : public SandboxBPFDSLPolicy {
862 public:
863 SimpleCondTestPolicy() {}
864 virtual ~SimpleCondTestPolicy() {}
866 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
868 private:
869 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy);
872 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const {
873 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
875 // We deliberately return unusual errno values upon failure, so that we
876 // can uniquely test for these values. In a "real" policy, you would want
877 // to return more traditional values.
878 int flags_argument_position = -1;
879 switch (sysno) {
880 #if defined(__NR_open)
881 case __NR_open:
882 flags_argument_position = 1;
883 #endif
884 case __NR_openat: { // open can be a wrapper for openat(2).
885 if (sysno == __NR_openat)
886 flags_argument_position = 2;
888 // Allow opening files for reading, but don't allow writing.
889 COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits);
890 const Arg<int> flags(flags_argument_position);
891 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow());
893 case __NR_prctl: {
894 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
895 // disallow everything else.
896 const Arg<int> option(0);
897 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow())
898 .Else(Error(ENOMEM));
900 default:
901 return Allow();
905 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) {
906 int fd;
907 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1);
908 BPF_ASSERT(errno == EROFS);
909 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0);
910 close(fd);
912 int ret;
913 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
914 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
915 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
916 BPF_ASSERT(errno == ENOMEM);
919 // This test exercises the SandboxBPF::Cond() method by building a complex
920 // tree of conditional equality operations. It then makes system calls and
921 // verifies that they return the values that we expected from our BPF
922 // program.
923 class EqualityStressTest {
924 public:
925 EqualityStressTest() {
926 // We want a deterministic test
927 srand(0);
929 // Iterates over system call numbers and builds a random tree of
930 // equality tests.
931 // We are actually constructing a graph of ArgValue objects. This
932 // graph will later be used to a) compute our sandbox policy, and
933 // b) drive the code that verifies the output from the BPF program.
934 COMPILE_ASSERT(
935 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
936 num_test_cases_must_be_significantly_smaller_than_num_system_calls);
937 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
938 if (IsReservedSyscall(sysno)) {
939 // Skip reserved system calls. This ensures that our test frame
940 // work isn't impacted by the fact that we are overriding
941 // a lot of different system calls.
942 ++end;
943 arg_values_.push_back(NULL);
944 } else {
945 arg_values_.push_back(
946 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
951 ~EqualityStressTest() {
952 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
953 iter != arg_values_.end();
954 ++iter) {
955 DeleteArgValue(*iter);
959 ErrorCode Policy(SandboxBPF* sandbox, int sysno) {
960 if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
961 // FIXME: we should really not have to do that in a trivial policy
962 return ErrorCode(ENOSYS);
963 } else if (sysno < 0 || sysno >= (int)arg_values_.size() ||
964 IsReservedSyscall(sysno)) {
965 // We only return ErrorCode values for the system calls that
966 // are part of our test data. Every other system call remains
967 // allowed.
968 return ErrorCode(ErrorCode::ERR_ALLOWED);
969 } else {
970 // ToErrorCode() turns an ArgValue object into an ErrorCode that is
971 // suitable for use by a sandbox policy.
972 return ToErrorCode(sandbox, arg_values_[sysno]);
976 void VerifyFilter() {
977 // Iterate over all system calls. Skip the system calls that have
978 // previously been determined as being reserved.
979 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) {
980 if (!arg_values_[sysno]) {
981 // Skip reserved system calls.
982 continue;
984 // Verify that system calls return the values that we expect them to
985 // return. This involves passing different combinations of system call
986 // parameters in order to exercise all possible code paths through the
987 // BPF filter program.
988 // We arbitrarily start by setting all six system call arguments to
989 // zero. And we then recursive traverse our tree of ArgValues to
990 // determine the necessary combinations of parameters.
991 intptr_t args[6] = {};
992 Verify(sysno, args, *arg_values_[sysno]);
996 private:
997 struct ArgValue {
998 int argno; // Argument number to inspect.
999 int size; // Number of test cases (must be > 0).
1000 struct Tests {
1001 uint32_t k_value; // Value to compare syscall arg against.
1002 int err; // If non-zero, errno value to return.
1003 struct ArgValue* arg_value; // Otherwise, more args needs inspecting.
1004 }* tests;
1005 int err; // If none of the tests passed, this is what
1006 struct ArgValue* arg_value; // we'll return (this is the "else" branch).
1009 bool IsReservedSyscall(int sysno) {
1010 // There are a handful of system calls that we should never use in our
1011 // test cases. These system calls are needed to allow the test framework
1012 // to run properly.
1013 // If we wanted to write fully generic code, there are more system calls
1014 // that could be listed here, and it is quite difficult to come up with a
1015 // truly comprehensive list. After all, we are deliberately making system
1016 // calls unavailable. In practice, we have a pretty good idea of the system
1017 // calls that will be made by this particular test. So, this small list is
1018 // sufficient. But if anybody copy'n'pasted this code for other uses, they
1019 // would have to review that the list.
1020 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
1021 sysno == __NR_exit_group || sysno == __NR_restart_syscall;
1024 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
1025 // Create a new ArgValue and fill it with random data. We use as bit mask
1026 // to keep track of the system call parameters that have previously been
1027 // set; this ensures that we won't accidentally define a contradictory
1028 // set of equality tests.
1029 struct ArgValue* arg_value = new ArgValue();
1030 args_mask |= 1 << argno;
1031 arg_value->argno = argno;
1033 // Apply some restrictions on just how complex our tests can be.
1034 // Otherwise, we end up with a BPF program that is too complicated for
1035 // the kernel to load.
1036 int fan_out = kMaxFanOut;
1037 if (remaining_args > 3) {
1038 fan_out = 1;
1039 } else if (remaining_args > 2) {
1040 fan_out = 2;
1043 // Create a couple of different test cases with randomized values that
1044 // we want to use when comparing system call parameter number "argno".
1045 arg_value->size = rand() % fan_out + 1;
1046 arg_value->tests = new ArgValue::Tests[arg_value->size];
1048 uint32_t k_value = rand();
1049 for (int n = 0; n < arg_value->size; ++n) {
1050 // Ensure that we have unique values
1051 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
1053 // There are two possible types of nodes. Either this is a leaf node;
1054 // in that case, we have completed all the equality tests that we
1055 // wanted to perform, and we can now compute a random "errno" value that
1056 // we should return. Or this is part of a more complex boolean
1057 // expression; in that case, we have to recursively add tests for some
1058 // of system call parameters that we have not yet included in our
1059 // tests.
1060 arg_value->tests[n].k_value = k_value;
1061 if (!remaining_args || (rand() & 1)) {
1062 arg_value->tests[n].err = (rand() % 1000) + 1;
1063 arg_value->tests[n].arg_value = NULL;
1064 } else {
1065 arg_value->tests[n].err = 0;
1066 arg_value->tests[n].arg_value =
1067 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
1070 // Finally, we have to define what we should return if none of the
1071 // previous equality tests pass. Again, we can either deal with a leaf
1072 // node, or we can randomly add another couple of tests.
1073 if (!remaining_args || (rand() & 1)) {
1074 arg_value->err = (rand() % 1000) + 1;
1075 arg_value->arg_value = NULL;
1076 } else {
1077 arg_value->err = 0;
1078 arg_value->arg_value =
1079 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
1081 // We have now built a new (sub-)tree of ArgValues defining a set of
1082 // boolean expressions for testing random system call arguments against
1083 // random values. Return this tree to our caller.
1084 return arg_value;
1087 int RandomArg(int args_mask) {
1088 // Compute a random system call parameter number.
1089 int argno = rand() % kMaxArgs;
1091 // Make sure that this same parameter number has not previously been
1092 // used. Otherwise, we could end up with a test that is impossible to
1093 // satisfy (e.g. args[0] == 1 && args[0] == 2).
1094 while (args_mask & (1 << argno)) {
1095 argno = (argno + 1) % kMaxArgs;
1097 return argno;
1100 void DeleteArgValue(ArgValue* arg_value) {
1101 // Delete an ArgValue and all of its child nodes. This requires
1102 // recursively descending into the tree.
1103 if (arg_value) {
1104 if (arg_value->size) {
1105 for (int n = 0; n < arg_value->size; ++n) {
1106 if (!arg_value->tests[n].err) {
1107 DeleteArgValue(arg_value->tests[n].arg_value);
1110 delete[] arg_value->tests;
1112 if (!arg_value->err) {
1113 DeleteArgValue(arg_value->arg_value);
1115 delete arg_value;
1119 ErrorCode ToErrorCode(SandboxBPF* sandbox, ArgValue* arg_value) {
1120 // Compute the ErrorCode that should be returned, if none of our
1121 // tests succeed (i.e. the system call parameter doesn't match any
1122 // of the values in arg_value->tests[].k_value).
1123 ErrorCode err;
1124 if (arg_value->err) {
1125 // If this was a leaf node, return the errno value that we expect to
1126 // return from the BPF filter program.
1127 err = ErrorCode(arg_value->err);
1128 } else {
1129 // If this wasn't a leaf node yet, recursively descend into the rest
1130 // of the tree. This will end up adding a few more SandboxBPF::Cond()
1131 // tests to our ErrorCode.
1132 err = ToErrorCode(sandbox, arg_value->arg_value);
1135 // Now, iterate over all the test cases that we want to compare against.
1136 // This builds a chain of SandboxBPF::Cond() tests
1137 // (aka "if ... elif ... elif ... elif ... fi")
1138 for (int n = arg_value->size; n-- > 0;) {
1139 ErrorCode matched;
1140 // Again, we distinguish between leaf nodes and subtrees.
1141 if (arg_value->tests[n].err) {
1142 matched = ErrorCode(arg_value->tests[n].err);
1143 } else {
1144 matched = ToErrorCode(sandbox, arg_value->tests[n].arg_value);
1146 // For now, all of our tests are limited to 32bit.
1147 // We have separate tests that check the behavior of 32bit vs. 64bit
1148 // conditional expressions.
1149 err = sandbox->Cond(arg_value->argno,
1150 ErrorCode::TP_32BIT,
1151 ErrorCode::OP_EQUAL,
1152 arg_value->tests[n].k_value,
1153 matched,
1154 err);
1156 return err;
1159 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
1160 uint32_t mismatched = 0;
1161 // Iterate over all the k_values in arg_value.tests[] and verify that
1162 // we see the expected return values from system calls, when we pass
1163 // the k_value as a parameter in a system call.
1164 for (int n = arg_value.size; n-- > 0;) {
1165 mismatched += arg_value.tests[n].k_value;
1166 args[arg_value.argno] = arg_value.tests[n].k_value;
1167 if (arg_value.tests[n].err) {
1168 VerifyErrno(sysno, args, arg_value.tests[n].err);
1169 } else {
1170 Verify(sysno, args, *arg_value.tests[n].arg_value);
1173 // Find a k_value that doesn't match any of the k_values in
1174 // arg_value.tests[]. In most cases, the current value of "mismatched"
1175 // would fit this requirement. But on the off-chance that it happens
1176 // to collide, we double-check.
1177 try_again:
1178 for (int n = arg_value.size; n-- > 0;) {
1179 if (mismatched == arg_value.tests[n].k_value) {
1180 ++mismatched;
1181 goto try_again;
1184 // Now verify that we see the expected return value from system calls,
1185 // if we pass a value that doesn't match any of the conditions (i.e. this
1186 // is testing the "else" clause of the conditions).
1187 args[arg_value.argno] = mismatched;
1188 if (arg_value.err) {
1189 VerifyErrno(sysno, args, arg_value.err);
1190 } else {
1191 Verify(sysno, args, *arg_value.arg_value);
1193 // Reset args[arg_value.argno]. This is not technically needed, but it
1194 // makes it easier to reason about the correctness of our tests.
1195 args[arg_value.argno] = 0;
1198 void VerifyErrno(int sysno, intptr_t* args, int err) {
1199 // We installed BPF filters that return different errno values
1200 // based on the system call number and the parameters that we decided
1201 // to pass in. Verify that this condition holds true.
1202 BPF_ASSERT(
1203 Syscall::Call(
1204 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
1205 -err);
1208 // Vector of ArgValue trees. These trees define all the possible boolean
1209 // expressions that we want to turn into a BPF filter program.
1210 std::vector<ArgValue*> arg_values_;
1212 // Don't increase these values. We are pushing the limits of the maximum
1213 // BPF program that the kernel will allow us to load. If the values are
1214 // increased too much, the test will start failing.
1215 #if defined(__aarch64__)
1216 static const int kNumTestCases = 30;
1217 #else
1218 static const int kNumTestCases = 40;
1219 #endif
1220 static const int kMaxFanOut = 3;
1221 static const int kMaxArgs = 6;
1224 ErrorCode EqualityStressTestPolicy(SandboxBPF* sandbox,
1225 int sysno,
1226 EqualityStressTest* aux) {
1227 DCHECK(aux);
1228 return aux->Policy(sandbox, sysno);
1231 BPF_TEST(SandboxBPF,
1232 EqualityTests,
1233 EqualityStressTestPolicy,
1234 EqualityStressTest /* (*BPF_AUX) */) {
1235 BPF_AUX->VerifyFilter();
1238 class EqualityArgumentWidthPolicy : public SandboxBPFDSLPolicy {
1239 public:
1240 EqualityArgumentWidthPolicy() {}
1241 virtual ~EqualityArgumentWidthPolicy() {}
1243 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
1245 private:
1246 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy);
1249 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const {
1250 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1251 if (sysno == __NR_uname) {
1252 const Arg<int> option(0);
1253 const Arg<uint32_t> arg32(1);
1254 const Arg<uint64_t> arg64(1);
1255 return Switch(option)
1256 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2)))
1257 #if __SIZEOF_POINTER__ > 4
1258 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2)))
1259 #endif
1260 .Default(Error(3));
1262 return Allow();
1265 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
1266 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1);
1267 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2);
1268 #if __SIZEOF_POINTER__ > 4
1269 // On 32bit machines, there is no way to pass a 64bit argument through the
1270 // syscall interface. So, we have to skip the part of the test that requires
1271 // 64bit arguments.
1272 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1);
1273 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2);
1274 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2);
1275 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2);
1276 #endif
1279 #if __SIZEOF_POINTER__ > 4
1280 // On 32bit machines, there is no way to pass a 64bit argument through the
1281 // syscall interface. So, we have to skip the part of the test that requires
1282 // 64bit arguments.
1283 BPF_DEATH_TEST_C(SandboxBPF,
1284 EqualityArgumentUnallowed64bit,
1285 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1286 EqualityArgumentWidthPolicy) {
1287 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL);
1289 #endif
1291 class EqualityWithNegativeArgumentsPolicy : public SandboxBPFDSLPolicy {
1292 public:
1293 EqualityWithNegativeArgumentsPolicy() {}
1294 virtual ~EqualityWithNegativeArgumentsPolicy() {}
1296 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
1297 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1298 if (sysno == __NR_uname) {
1299 // TODO(mdempsky): This currently can't be Arg<int> because then
1300 // 0xFFFFFFFF will be treated as a (signed) int, and then when
1301 // Arg::EqualTo casts it to uint64_t, it will be sign extended.
1302 const Arg<unsigned> arg(0);
1303 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2));
1305 return Allow();
1308 private:
1309 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy);
1312 BPF_TEST_C(SandboxBPF,
1313 EqualityWithNegativeArguments,
1314 EqualityWithNegativeArgumentsPolicy) {
1315 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1);
1316 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1);
1317 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1);
1320 #if __SIZEOF_POINTER__ > 4
1321 BPF_DEATH_TEST_C(SandboxBPF,
1322 EqualityWithNegative64bitArguments,
1323 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1324 EqualityWithNegativeArgumentsPolicy) {
1325 // When expecting a 32bit system call argument, we look at the MSB of the
1326 // 64bit value and allow both "0" and "-1". But the latter is allowed only
1327 // iff the LSB was negative. So, this death test should error out.
1328 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
1330 #endif
1332 class AllBitTestPolicy : public SandboxBPFDSLPolicy {
1333 public:
1334 AllBitTestPolicy() {}
1335 virtual ~AllBitTestPolicy() {}
1337 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
1339 private:
1340 static ResultExpr HasAllBits32(uint32_t bits);
1341 static ResultExpr HasAllBits64(uint64_t bits);
1343 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy);
1346 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) {
1347 if (bits == 0) {
1348 return Error(1);
1350 const Arg<uint32_t> arg(1);
1351 return If((arg & bits) == bits, Error(1)).Else(Error(0));
1354 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) {
1355 if (bits == 0) {
1356 return Error(1);
1358 const Arg<uint64_t> arg(1);
1359 return If((arg & bits) == bits, Error(1)).Else(Error(0));
1362 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const {
1363 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1364 // Test masked-equality cases that should trigger the "has all bits"
1365 // peephole optimizations. We try to find bitmasks that could conceivably
1366 // touch corner cases.
1367 // For all of these tests, we override the uname(). We can make use with
1368 // a single system call number, as we use the first system call argument to
1369 // select the different bit masks that we want to test against.
1370 if (sysno == __NR_uname) {
1371 const Arg<int> option(0);
1372 return Switch(option)
1373 .Case(0, HasAllBits32(0x0))
1374 .Case(1, HasAllBits32(0x1))
1375 .Case(2, HasAllBits32(0x3))
1376 .Case(3, HasAllBits32(0x80000000))
1377 #if __SIZEOF_POINTER__ > 4
1378 .Case(4, HasAllBits64(0x0))
1379 .Case(5, HasAllBits64(0x1))
1380 .Case(6, HasAllBits64(0x3))
1381 .Case(7, HasAllBits64(0x80000000))
1382 .Case(8, HasAllBits64(0x100000000ULL))
1383 .Case(9, HasAllBits64(0x300000000ULL))
1384 .Case(10, HasAllBits64(0x100000001ULL))
1385 #endif
1386 .Default(Kill("Invalid test case number"));
1388 return Allow();
1391 // Define a macro that performs tests using our test policy.
1392 // NOTE: Not all of the arguments in this macro are actually used!
1393 // They are here just to serve as documentation of the conditions
1394 // implemented in the test policy.
1395 // Most notably, "op" and "mask" are unused by the macro. If you want
1396 // to make changes to these values, you will have to edit the
1397 // test policy instead.
1398 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
1399 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value))
1401 // Our uname() system call returns ErrorCode(1) for success and
1402 // ErrorCode(0) for failure. Syscall::Call() turns this into an
1403 // exit code of -1 or 0.
1404 #define EXPECT_FAILURE 0
1405 #define EXPECT_SUCCESS -1
1407 // A couple of our tests behave differently on 32bit and 64bit systems, as
1408 // there is no way for a 32bit system call to pass in a 64bit system call
1409 // argument "arg".
1410 // We expect these tests to succeed on 64bit systems, but to tail on 32bit
1411 // systems.
1412 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
1413 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) {
1414 // 32bit test: all of 0x0 (should always be true)
1415 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS);
1416 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS);
1417 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS);
1418 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS);
1419 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS);
1421 // 32bit test: all of 0x1
1422 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE);
1423 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS);
1424 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE);
1425 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS);
1427 // 32bit test: all of 0x3
1428 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE);
1429 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE);
1430 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE);
1431 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS);
1432 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS);
1434 // 32bit test: all of 0x80000000
1435 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1436 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1437 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1438 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1439 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1441 #if __SIZEOF_POINTER__ > 4
1442 // 64bit test: all of 0x0 (should always be true)
1443 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS);
1444 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS);
1445 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS);
1446 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS);
1447 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1448 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1449 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1450 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS);
1452 // 64bit test: all of 0x1
1453 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE);
1454 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS);
1455 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE);
1456 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS);
1457 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE);
1458 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS);
1459 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE);
1460 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS);
1462 // 64bit test: all of 0x3
1463 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE);
1464 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE);
1465 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE);
1466 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS);
1467 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS);
1468 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE);
1469 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE);
1470 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE);
1471 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS);
1472 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS);
1474 // 64bit test: all of 0x80000000
1475 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1476 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1477 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1478 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1479 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1480 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1481 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1482 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1483 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1484 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1486 // 64bit test: all of 0x100000000
1487 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1488 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1489 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1490 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1491 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1492 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1493 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1494 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1496 // 64bit test: all of 0x300000000
1497 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1498 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1499 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1500 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1501 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1502 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1503 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1504 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1505 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1506 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1508 // 64bit test: all of 0x100000001
1509 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1510 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1511 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1512 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1513 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE);
1514 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1515 #endif
1518 class AnyBitTestPolicy : public SandboxBPFDSLPolicy {
1519 public:
1520 AnyBitTestPolicy() {}
1521 virtual ~AnyBitTestPolicy() {}
1523 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
1525 private:
1526 static ResultExpr HasAnyBits32(uint32_t);
1527 static ResultExpr HasAnyBits64(uint64_t);
1529 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy);
1532 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) {
1533 if (bits == 0) {
1534 return Error(0);
1536 const Arg<uint32_t> arg(1);
1537 return If((arg & bits) != 0, Error(1)).Else(Error(0));
1540 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) {
1541 if (bits == 0) {
1542 return Error(0);
1544 const Arg<uint64_t> arg(1);
1545 return If((arg & bits) != 0, Error(1)).Else(Error(0));
1548 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const {
1549 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1550 // Test masked-equality cases that should trigger the "has any bits"
1551 // peephole optimizations. We try to find bitmasks that could conceivably
1552 // touch corner cases.
1553 // For all of these tests, we override the uname(). We can make use with
1554 // a single system call number, as we use the first system call argument to
1555 // select the different bit masks that we want to test against.
1556 if (sysno == __NR_uname) {
1557 const Arg<int> option(0);
1558 return Switch(option)
1559 .Case(0, HasAnyBits32(0x0))
1560 .Case(1, HasAnyBits32(0x1))
1561 .Case(2, HasAnyBits32(0x3))
1562 .Case(3, HasAnyBits32(0x80000000))
1563 #if __SIZEOF_POINTER__ > 4
1564 .Case(4, HasAnyBits64(0x0))
1565 .Case(5, HasAnyBits64(0x1))
1566 .Case(6, HasAnyBits64(0x3))
1567 .Case(7, HasAnyBits64(0x80000000))
1568 .Case(8, HasAnyBits64(0x100000000ULL))
1569 .Case(9, HasAnyBits64(0x300000000ULL))
1570 .Case(10, HasAnyBits64(0x100000001ULL))
1571 #endif
1572 .Default(Kill("Invalid test case number"));
1574 return Allow();
1577 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) {
1578 // 32bit test: any of 0x0 (should always be false)
1579 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE);
1580 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE);
1581 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE);
1582 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE);
1583 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE);
1585 // 32bit test: any of 0x1
1586 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE);
1587 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS);
1588 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE);
1589 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS);
1591 // 32bit test: any of 0x3
1592 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE);
1593 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS);
1594 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS);
1595 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS);
1596 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS);
1598 // 32bit test: any of 0x80000000
1599 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1600 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1601 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1602 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1603 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1605 #if __SIZEOF_POINTER__ > 4
1606 // 64bit test: any of 0x0 (should always be false)
1607 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE);
1608 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE);
1609 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE);
1610 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE);
1611 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1612 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1613 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1614 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1616 // 64bit test: any of 0x1
1617 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE);
1618 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS);
1619 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE);
1620 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS);
1621 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1622 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1623 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1624 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1626 // 64bit test: any of 0x3
1627 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE);
1628 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS);
1629 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS);
1630 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS);
1631 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS);
1632 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE);
1633 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1634 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1635 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1636 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1638 // 64bit test: any of 0x80000000
1639 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1640 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1641 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1642 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1643 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1644 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1645 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1646 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1647 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1648 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1650 // 64bit test: any of 0x100000000
1651 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1652 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1653 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1654 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1655 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1656 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1657 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1658 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1660 // 64bit test: any of 0x300000000
1661 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1662 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1663 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1664 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1665 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1666 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1667 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1668 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1669 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1670 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1672 // 64bit test: any of 0x100000001
1673 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE);
1674 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1675 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS);
1676 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1677 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1678 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1679 #endif
1682 class MaskedEqualTestPolicy : public SandboxBPFDSLPolicy {
1683 public:
1684 MaskedEqualTestPolicy() {}
1685 virtual ~MaskedEqualTestPolicy() {}
1687 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
1689 private:
1690 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value);
1691 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value);
1693 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy);
1696 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) {
1697 const Arg<uint32_t> arg(1);
1698 return If((arg & mask) == value, Error(1)).Else(Error(0));
1701 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) {
1702 const Arg<uint64_t> arg(1);
1703 return If((arg & mask) == value, Error(1)).Else(Error(0));
1706 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const {
1707 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1709 if (sysno == __NR_uname) {
1710 const Arg<int> option(0);
1711 return Switch(option)
1712 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa))
1713 #if __SIZEOF_POINTER__ > 4
1714 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000))
1715 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa))
1716 #endif
1717 .Default(Kill("Invalid test case number"));
1720 return Allow();
1723 #define MASKEQ_TEST(rulenum, arg, expected_result) \
1724 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result))
1726 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) {
1727 // Allowed: 0x__55__aa
1728 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE);
1729 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE);
1730 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE);
1731 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE);
1732 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE);
1733 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS);
1734 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE);
1735 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE);
1736 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS);
1737 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS);
1738 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS);
1739 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS);
1741 #if __SIZEOF_POINTER__ > 4
1742 // Allowed: 0x__55__aa________
1743 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE);
1744 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE);
1745 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE);
1746 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE);
1747 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE);
1748 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE);
1749 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE);
1750 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS);
1751 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE);
1752 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE);
1753 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS);
1754 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS);
1755 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS);
1756 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1757 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1758 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS);
1760 // Allowed: 0x__55__aa__55__aa
1761 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE);
1762 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE);
1763 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE);
1764 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE);
1765 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE);
1766 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE);
1767 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE);
1768 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE);
1769 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE);
1770 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS);
1771 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE);
1772 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE);
1773 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE);
1774 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE);
1775 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS);
1776 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS);
1777 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS);
1778 #endif
1781 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
1782 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
1783 // We expect to get called for an attempt to fork(). No need to log that
1784 // call. But if we ever get called for anything else, we want to verbosely
1785 // print as much information as possible.
1786 const char* msg = (const char*)aux;
1787 printf(
1788 "Clone() was called with unexpected arguments\n"
1789 " nr: %d\n"
1790 " 1: 0x%llX\n"
1791 " 2: 0x%llX\n"
1792 " 3: 0x%llX\n"
1793 " 4: 0x%llX\n"
1794 " 5: 0x%llX\n"
1795 " 6: 0x%llX\n"
1796 "%s\n",
1797 args.nr,
1798 (long long)args.args[0],
1799 (long long)args.args[1],
1800 (long long)args.args[2],
1801 (long long)args.args[3],
1802 (long long)args.args[4],
1803 (long long)args.args[5],
1804 msg);
1806 return -EPERM;
1809 class PthreadPolicyEquality : public SandboxBPFDSLPolicy {
1810 public:
1811 PthreadPolicyEquality() {}
1812 virtual ~PthreadPolicyEquality() {}
1814 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
1816 private:
1817 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality);
1820 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const {
1821 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1822 // This policy allows creating threads with pthread_create(). But it
1823 // doesn't allow any other uses of clone(). Most notably, it does not
1824 // allow callers to implement fork() or vfork() by passing suitable flags
1825 // to the clone() system call.
1826 if (sysno == __NR_clone) {
1827 // We have seen two different valid combinations of flags. Glibc
1828 // uses the more modern flags, sets the TLS from the call to clone(), and
1829 // uses futexes to monitor threads. Android's C run-time library, doesn't
1830 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1831 // More recent versions of Android don't set CLONE_DETACHED anymore, so
1832 // the last case accounts for that.
1833 // The following policy is very strict. It only allows the exact masks
1834 // that we have seen in known implementations. It is probably somewhat
1835 // stricter than what we would want to do.
1836 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
1837 CLONE_SIGHAND | CLONE_THREAD |
1838 CLONE_SYSVSEM | CLONE_SETTLS |
1839 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1840 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
1841 CLONE_SIGHAND | CLONE_THREAD |
1842 CLONE_SYSVSEM;
1843 const Arg<unsigned long> flags(0);
1844 return If(flags == kGlibcCloneMask ||
1845 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) ||
1846 flags == kBaseAndroidCloneMask,
1847 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask"));
1850 return Allow();
1853 class PthreadPolicyBitMask : public SandboxBPFDSLPolicy {
1854 public:
1855 PthreadPolicyBitMask() {}
1856 virtual ~PthreadPolicyBitMask() {}
1858 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE;
1860 private:
1861 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits);
1862 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits);
1864 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask);
1867 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg,
1868 unsigned long bits) {
1869 return (arg & bits) != 0;
1872 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg,
1873 unsigned long bits) {
1874 return (arg & bits) == bits;
1877 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const {
1878 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1879 // This policy allows creating threads with pthread_create(). But it
1880 // doesn't allow any other uses of clone(). Most notably, it does not
1881 // allow callers to implement fork() or vfork() by passing suitable flags
1882 // to the clone() system call.
1883 if (sysno == __NR_clone) {
1884 // We have seen two different valid combinations of flags. Glibc
1885 // uses the more modern flags, sets the TLS from the call to clone(), and
1886 // uses futexes to monitor threads. Android's C run-time library, doesn't
1887 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1888 // The following policy allows for either combination of flags, but it
1889 // is generally a little more conservative than strictly necessary. We
1890 // err on the side of rather safe than sorry.
1891 // Very noticeably though, we disallow fork() (which is often just a
1892 // wrapper around clone()).
1893 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES |
1894 CLONE_SIGHAND | CLONE_THREAD |
1895 CLONE_SYSVSEM;
1896 const unsigned long kFutexFlags =
1897 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1898 const unsigned long kNoopFlags = CLONE_DETACHED;
1899 const unsigned long kKnownFlags =
1900 kMandatoryFlags | kFutexFlags | kNoopFlags;
1902 const Arg<unsigned long> flags(0);
1903 return If(HasAnyBits(flags, ~kKnownFlags),
1904 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found"))
1905 .ElseIf(!HasAllBits(flags, kMandatoryFlags),
1906 Trap(PthreadTrapHandler,
1907 "Missing mandatory CLONE_XXX flags "
1908 "when creating new thread"))
1909 .ElseIf(
1910 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags),
1911 Trap(PthreadTrapHandler,
1912 "Must set either all or none of the TLS and futex bits in "
1913 "call to clone()"))
1914 .Else(Allow());
1917 return Allow();
1920 static void* ThreadFnc(void* arg) {
1921 ++*reinterpret_cast<int*>(arg);
1922 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
1923 return NULL;
1926 static void PthreadTest() {
1927 // Attempt to start a joinable thread. This should succeed.
1928 pthread_t thread;
1929 int thread_ran = 0;
1930 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran));
1931 BPF_ASSERT(!pthread_join(thread, NULL));
1932 BPF_ASSERT(thread_ran);
1934 // Attempt to start a detached thread. This should succeed.
1935 thread_ran = 0;
1936 pthread_attr_t attr;
1937 BPF_ASSERT(!pthread_attr_init(&attr));
1938 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1939 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
1940 BPF_ASSERT(!pthread_attr_destroy(&attr));
1941 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
1942 -EINTR) {
1944 BPF_ASSERT(thread_ran);
1946 // Attempt to fork() a process using clone(). This should fail. We use the
1947 // same flags that glibc uses when calling fork(). But we don't actually
1948 // try calling the fork() implementation in the C run-time library, as
1949 // run-time libraries other than glibc might call __NR_fork instead of
1950 // __NR_clone, and that would introduce a bogus test failure.
1951 int pid;
1952 BPF_ASSERT(Syscall::Call(__NR_clone,
1953 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
1956 &pid) == -EPERM);
1959 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) {
1960 PthreadTest();
1963 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) {
1964 PthreadTest();
1967 // libc might not define these even though the kernel supports it.
1968 #ifndef PTRACE_O_TRACESECCOMP
1969 #define PTRACE_O_TRACESECCOMP 0x00000080
1970 #endif
1972 #ifdef PTRACE_EVENT_SECCOMP
1973 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
1974 #else
1975 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they
1976 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by
1977 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If
1978 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both
1979 // values here.
1980 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8)
1981 #endif
1983 #if defined(__arm__)
1984 #ifndef PTRACE_SET_SYSCALL
1985 #define PTRACE_SET_SYSCALL 23
1986 #endif
1987 #endif
1989 #if defined(__aarch64__)
1990 #ifndef PTRACE_GETREGS
1991 #define PTRACE_GETREGS 12
1992 #endif
1993 #endif
1995 #if defined(__aarch64__)
1996 #ifndef PTRACE_SETREGS
1997 #define PTRACE_SETREGS 13
1998 #endif
1999 #endif
2001 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with
2002 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on
2003 // PTRACE_EVENT_SECCOMP.
2005 // regs should contain the current set of registers of the child, obtained using
2006 // PTRACE_GETREGS.
2008 // Depending on the architecture, this may modify regs, so the caller is
2009 // responsible for committing these changes using PTRACE_SETREGS.
2010 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) {
2011 #if defined(__arm__)
2012 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the
2013 // libc ptrace call as the request parameter is an enum, and
2014 // PTRACE_SET_SYSCALL may not be in the enum.
2015 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number);
2016 #endif
2018 SECCOMP_PT_SYSCALL(*regs) = syscall_number;
2019 return 0;
2022 const uint16_t kTraceData = 0xcc;
2024 class TraceAllPolicy : public SandboxBPFDSLPolicy {
2025 public:
2026 TraceAllPolicy() {}
2027 virtual ~TraceAllPolicy() {}
2029 virtual ResultExpr EvaluateSyscall(int system_call_number) const OVERRIDE {
2030 return Trace(kTraceData);
2033 private:
2034 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy);
2037 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) {
2038 if (SandboxBPF::SupportsSeccompSandbox(-1) !=
2039 sandbox::SandboxBPF::STATUS_AVAILABLE) {
2040 return;
2043 // This test is disabled on arm due to a kernel bug.
2044 // See https://code.google.com/p/chromium/issues/detail?id=383977
2045 #if defined(__arm__) || defined(__aarch64__)
2046 printf("This test is currently disabled on ARM32/64 due to a kernel bug.");
2047 return;
2048 #endif
2050 #if defined(__mips__)
2051 // TODO: Figure out how to support specificity of handling indirect syscalls
2052 // in this test and enable it.
2053 printf("This test is currently disabled on MIPS.");
2054 return;
2055 #endif
2057 pid_t pid = fork();
2058 BPF_ASSERT_NE(-1, pid);
2059 if (pid == 0) {
2060 pid_t my_pid = getpid();
2061 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL));
2062 BPF_ASSERT_EQ(0, raise(SIGSTOP));
2063 SandboxBPF sandbox;
2064 sandbox.SetSandboxPolicy(new TraceAllPolicy);
2065 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
2067 // getpid is allowed.
2068 BPF_ASSERT_EQ(my_pid, syscall(__NR_getpid));
2070 // write to stdout is skipped and returns a fake value.
2071 BPF_ASSERT_EQ(kExpectedReturnValue,
2072 syscall(__NR_write, STDOUT_FILENO, "A", 1));
2074 // kill is rewritten to exit(kExpectedReturnValue).
2075 syscall(__NR_kill, my_pid, SIGKILL);
2077 // Should not be reached.
2078 BPF_ASSERT(false);
2081 int status;
2082 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1);
2083 BPF_ASSERT(WIFSTOPPED(status));
2085 BPF_ASSERT_NE(-1,
2086 ptrace(PTRACE_SETOPTIONS,
2087 pid,
2088 NULL,
2089 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP)));
2090 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2091 while (true) {
2092 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1);
2093 if (WIFEXITED(status) || WIFSIGNALED(status)) {
2094 BPF_ASSERT(WIFEXITED(status));
2095 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status));
2096 break;
2099 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
2100 !IS_SECCOMP_EVENT(status)) {
2101 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2102 continue;
2105 unsigned long data;
2106 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data));
2107 BPF_ASSERT_EQ(kTraceData, data);
2109 regs_struct regs;
2110 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, &regs));
2111 switch (SECCOMP_PT_SYSCALL(regs)) {
2112 case __NR_write:
2113 // Skip writes to stdout, make it return kExpectedReturnValue. Allow
2114 // writes to stderr so that BPF_ASSERT messages show up.
2115 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) {
2116 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, -1));
2117 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue;
2118 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2120 break;
2122 case __NR_kill:
2123 // Rewrite to exit(kExpectedReturnValue).
2124 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, __NR_exit));
2125 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue;
2126 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2127 break;
2129 default:
2130 // Allow all other syscalls.
2131 break;
2134 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2138 // Android does not expose pread64 nor pwrite64.
2139 #if !defined(OS_ANDROID)
2141 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) {
2142 while (count > 0) {
2143 const ssize_t transfered =
2144 HANDLE_EINTR(pwrite64(fd, buffer, count, offset));
2145 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2146 return false;
2148 count -= transfered;
2149 buffer += transfered;
2150 offset += transfered;
2152 return true;
2155 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) {
2156 while (count > 0) {
2157 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset));
2158 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2159 return false;
2161 count -= transfered;
2162 buffer += transfered;
2163 offset += transfered;
2165 return true;
2168 bool pread_64_was_forwarded = false;
2170 class TrapPread64Policy : public SandboxBPFDSLPolicy {
2171 public:
2172 TrapPread64Policy() {}
2173 virtual ~TrapPread64Policy() {}
2175 virtual ResultExpr EvaluateSyscall(int system_call_number) const OVERRIDE {
2176 // Set the global environment for unsafe traps once.
2177 if (system_call_number == MIN_SYSCALL) {
2178 EnableUnsafeTraps();
2181 if (system_call_number == __NR_pread64) {
2182 return UnsafeTrap(ForwardPreadHandler, NULL);
2184 return Allow();
2187 private:
2188 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args,
2189 void* aux) {
2190 BPF_ASSERT(args.nr == __NR_pread64);
2191 pread_64_was_forwarded = true;
2193 return SandboxBPF::ForwardSyscall(args);
2196 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy);
2199 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split
2200 // between two arguments. In this test, we make sure that ForwardSyscall() can
2201 // forward it properly.
2202 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) {
2203 ScopedTemporaryFile temp_file;
2204 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF;
2205 const char kTestString[] = "This is a test!";
2206 BPF_ASSERT(FullPwrite64(
2207 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset));
2209 char read_test_string[sizeof(kTestString)] = {0};
2210 BPF_ASSERT(FullPread64(temp_file.fd(),
2211 read_test_string,
2212 sizeof(read_test_string),
2213 kLargeOffset));
2214 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString)));
2215 BPF_ASSERT(pread_64_was_forwarded);
2218 #endif // !defined(OS_ANDROID)
2220 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) {
2221 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr);
2223 // Wait for the main thread to signal that the filter has been applied.
2224 if (!event->IsSignaled()) {
2225 event->Wait();
2228 BPF_ASSERT(event->IsSignaled());
2230 BlacklistNanosleepPolicy::AssertNanosleepFails();
2232 return NULL;
2235 SANDBOX_TEST(SandboxBPF, Tsync) {
2236 if (SandboxBPF::SupportsSeccompThreadFilterSynchronization() !=
2237 SandboxBPF::STATUS_AVAILABLE) {
2238 return;
2241 base::WaitableEvent event(true, false);
2243 // Create a thread on which to invoke the blocked syscall.
2244 pthread_t thread;
2245 BPF_ASSERT_EQ(
2246 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event));
2248 // Test that nanoseelp success.
2249 const struct timespec ts = {0, 0};
2250 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
2252 // Engage the sandbox.
2253 SandboxBPF sandbox;
2254 sandbox.SetSandboxPolicy(new BlacklistNanosleepPolicy());
2255 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED));
2257 // This thread should have the filter applied as well.
2258 BlacklistNanosleepPolicy::AssertNanosleepFails();
2260 // Signal the condition to invoke the system call.
2261 event.Signal();
2263 // Wait for the thread to finish.
2264 BPF_ASSERT_EQ(0, pthread_join(thread, NULL));
2267 class AllowAllPolicy : public SandboxBPFDSLPolicy {
2268 public:
2269 AllowAllPolicy() {}
2270 virtual ~AllowAllPolicy() {}
2272 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
2273 return Allow();
2276 private:
2277 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy);
2280 SANDBOX_DEATH_TEST(
2281 SandboxBPF,
2282 StartMultiThreadedAsSingleThreaded,
2283 DEATH_MESSAGE("Cannot start sandbox; process is already multi-threaded")) {
2284 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded");
2285 BPF_ASSERT(thread.Start());
2287 SandboxBPF sandbox;
2288 sandbox.SetSandboxPolicy(new AllowAllPolicy());
2289 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
2292 // http://crbug.com/407357
2293 #if !defined(THREAD_SANITIZER)
2294 SANDBOX_DEATH_TEST(
2295 SandboxBPF,
2296 StartSingleThreadedAsMultiThreaded,
2297 DEATH_MESSAGE(
2298 "Cannot start sandbox; process may be single-threaded when "
2299 "reported as not")) {
2300 SandboxBPF sandbox;
2301 sandbox.SetSandboxPolicy(new AllowAllPolicy());
2302 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED));
2304 #endif // !defined(THREAD_SANITIZER)
2306 // A stub handler for the UnsafeTrap. Never called.
2307 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) {
2308 return -1;
2311 class UnsafeTrapWithCondPolicy : public SandboxBPFDSLPolicy {
2312 public:
2313 UnsafeTrapWithCondPolicy() {}
2314 virtual ~UnsafeTrapWithCondPolicy() {}
2316 virtual ResultExpr EvaluateSyscall(int sysno) const OVERRIDE {
2317 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
2318 setenv(kSandboxDebuggingEnv, "t", 0);
2319 Die::SuppressInfoMessages(true);
2321 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
2322 return Allow();
2324 switch (sysno) {
2325 case __NR_uname: {
2326 const Arg<uint32_t> arg(0);
2327 return If(arg == 0, Allow()).Else(Error(EPERM));
2329 case __NR_setgid: {
2330 const Arg<uint32_t> arg(0);
2331 return Switch(arg)
2332 .Case(100, Error(ENOMEM))
2333 .Case(200, Error(ENOSYS))
2334 .Default(Error(EPERM));
2336 case __NR_close:
2337 case __NR_exit_group:
2338 case __NR_write:
2339 return Allow();
2340 case __NR_getppid:
2341 return UnsafeTrap(NoOpHandler, NULL);
2342 default:
2343 return Error(EPERM);
2347 private:
2348 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy);
2351 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) {
2352 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0));
2353 BPF_ASSERT_EQ(EFAULT, errno);
2355 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1));
2356 BPF_ASSERT_EQ(EPERM, errno);
2358 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100));
2359 BPF_ASSERT_EQ(ENOMEM, errno);
2361 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200));
2362 BPF_ASSERT_EQ(ENOSYS, errno);
2364 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300));
2365 BPF_ASSERT_EQ(EPERM, errno);
2368 } // namespace
2370 } // namespace bpf_dsl
2371 } // namespace sandbox