Blink roll 25b6bd3a7a131ffe68d809546ad1a20707915cdc:3a503f41ae42e5b79cfcd2ff10e65afde...
[chromium-blink-merge.git] / sandbox / linux / bpf_dsl / bpf_dsl_more_unittest.cc
blob66669e796c69dbba717395c2a4772c96f5716aa3
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <pthread.h>
10 #include <sched.h>
11 #include <signal.h>
12 #include <sys/prctl.h>
13 #include <sys/ptrace.h>
14 #include <sys/syscall.h>
15 #include <sys/time.h>
16 #include <sys/types.h>
17 #include <sys/utsname.h>
18 #include <unistd.h>
19 #include <sys/socket.h>
21 #if defined(ANDROID)
22 // Work-around for buggy headers in Android's NDK
23 #define __user
24 #endif
25 #include <linux/futex.h>
27 #include "base/bind.h"
28 #include "base/logging.h"
29 #include "base/macros.h"
30 #include "base/memory/scoped_ptr.h"
31 #include "base/posix/eintr_wrapper.h"
32 #include "base/synchronization/waitable_event.h"
33 #include "base/threading/thread.h"
34 #include "build/build_config.h"
35 #include "sandbox/linux/bpf_dsl/policy.h"
36 #include "sandbox/linux/seccomp-bpf/bpf_tests.h"
37 #include "sandbox/linux/seccomp-bpf/die.h"
38 #include "sandbox/linux/seccomp-bpf/errorcode.h"
39 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
40 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
41 #include "sandbox/linux/seccomp-bpf/syscall.h"
42 #include "sandbox/linux/seccomp-bpf/trap.h"
43 #include "sandbox/linux/services/linux_syscalls.h"
44 #include "sandbox/linux/services/syscall_wrappers.h"
45 #include "sandbox/linux/syscall_broker/broker_process.h"
46 #include "sandbox/linux/tests/scoped_temporary_file.h"
47 #include "sandbox/linux/tests/unit_tests.h"
48 #include "testing/gtest/include/gtest/gtest.h"
50 // Workaround for Android's prctl.h file.
51 #ifndef PR_GET_ENDIAN
52 #define PR_GET_ENDIAN 19
53 #endif
54 #ifndef PR_CAPBSET_READ
55 #define PR_CAPBSET_READ 23
56 #define PR_CAPBSET_DROP 24
57 #endif
59 namespace sandbox {
60 namespace bpf_dsl {
62 namespace {
64 const int kExpectedReturnValue = 42;
65 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
67 // Set the global environment to allow the use of UnsafeTrap() policies.
68 void EnableUnsafeTraps() {
69 // The use of UnsafeTrap() causes us to print a warning message. This is
70 // generally desirable, but it results in the unittest failing, as it doesn't
71 // expect any messages on "stderr". So, temporarily disable messages. The
72 // BPF_TEST() is guaranteed to turn messages back on, after the policy
73 // function has completed.
74 setenv(kSandboxDebuggingEnv, "t", 0);
75 Die::SuppressInfoMessages(true);
78 // This test should execute no matter whether we have kernel support. So,
79 // we make it a TEST() instead of a BPF_TEST().
80 TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupports)) {
81 // We check that we don't crash, but it's ok if the kernel doesn't
82 // support it.
83 bool seccomp_bpf_supported =
84 SandboxBPF::SupportsSeccompSandbox(-1) == SandboxBPF::STATUS_AVAILABLE;
85 // We want to log whether or not seccomp BPF is actually supported
86 // since actual test coverage depends on it.
87 RecordProperty("SeccompBPFSupported",
88 seccomp_bpf_supported ? "true." : "false.");
89 std::cout << "Seccomp BPF supported: "
90 << (seccomp_bpf_supported ? "true." : "false.") << "\n";
91 RecordProperty("PointerSize", sizeof(void*));
92 std::cout << "Pointer size: " << sizeof(void*) << "\n";
95 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupportsTwice)) {
96 SandboxBPF::SupportsSeccompSandbox(-1);
97 SandboxBPF::SupportsSeccompSandbox(-1);
100 // BPF_TEST does a lot of the boiler-plate code around setting up a
101 // policy and optional passing data between the caller, the policy and
102 // any Trap() handlers. This is great for writing short and concise tests,
103 // and it helps us accidentally forgetting any of the crucial steps in
104 // setting up the sandbox. But it wouldn't hurt to have at least one test
105 // that explicitly walks through all these steps.
107 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) {
108 BPF_ASSERT(aux);
109 int* counter = static_cast<int*>(aux);
110 return (*counter)++;
113 class VerboseAPITestingPolicy : public Policy {
114 public:
115 explicit VerboseAPITestingPolicy(int* counter_ptr)
116 : counter_ptr_(counter_ptr) {}
117 ~VerboseAPITestingPolicy() override {}
119 ResultExpr EvaluateSyscall(int sysno) const override {
120 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
121 if (sysno == __NR_uname) {
122 return Trap(IncreaseCounter, counter_ptr_);
124 return Allow();
127 private:
128 int* counter_ptr_;
130 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy);
133 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) {
134 if (SandboxBPF::SupportsSeccompSandbox(-1) ==
135 sandbox::SandboxBPF::STATUS_AVAILABLE) {
136 static int counter = 0;
138 SandboxBPF sandbox;
139 sandbox.SetSandboxPolicy(new VerboseAPITestingPolicy(&counter));
140 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
142 BPF_ASSERT_EQ(0, counter);
143 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0));
144 BPF_ASSERT_EQ(1, counter);
145 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0));
146 BPF_ASSERT_EQ(2, counter);
150 // A simple blacklist test
152 class BlacklistNanosleepPolicy : public Policy {
153 public:
154 BlacklistNanosleepPolicy() {}
155 ~BlacklistNanosleepPolicy() override {}
157 ResultExpr EvaluateSyscall(int sysno) const override {
158 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
159 switch (sysno) {
160 case __NR_nanosleep:
161 return Error(EACCES);
162 default:
163 return Allow();
167 static void AssertNanosleepFails() {
168 const struct timespec ts = {0, 0};
169 errno = 0;
170 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
171 BPF_ASSERT_EQ(EACCES, errno);
174 private:
175 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy);
178 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) {
179 BlacklistNanosleepPolicy::AssertNanosleepFails();
182 // Now do a simple whitelist test
184 class WhitelistGetpidPolicy : public Policy {
185 public:
186 WhitelistGetpidPolicy() {}
187 ~WhitelistGetpidPolicy() override {}
189 ResultExpr EvaluateSyscall(int sysno) const override {
190 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
191 switch (sysno) {
192 case __NR_getpid:
193 case __NR_exit_group:
194 return Allow();
195 default:
196 return Error(ENOMEM);
200 private:
201 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy);
204 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) {
205 // getpid() should be allowed
206 errno = 0;
207 BPF_ASSERT(sys_getpid() > 0);
208 BPF_ASSERT(errno == 0);
210 // getpgid() should be denied
211 BPF_ASSERT(getpgid(0) == -1);
212 BPF_ASSERT(errno == ENOMEM);
215 // A simple blacklist policy, with a SIGSYS handler
216 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
217 // We also check that the auxiliary data is correct
218 SANDBOX_ASSERT(aux);
219 *(static_cast<int*>(aux)) = kExpectedReturnValue;
220 return -ENOMEM;
223 class BlacklistNanosleepTrapPolicy : public Policy {
224 public:
225 explicit BlacklistNanosleepTrapPolicy(int* aux) : aux_(aux) {}
226 ~BlacklistNanosleepTrapPolicy() override {}
228 ResultExpr EvaluateSyscall(int sysno) const override {
229 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
230 switch (sysno) {
231 case __NR_nanosleep:
232 return Trap(EnomemHandler, aux_);
233 default:
234 return Allow();
238 private:
239 int* aux_;
241 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepTrapPolicy);
244 BPF_TEST(SandboxBPF,
245 BasicBlacklistWithSigsys,
246 BlacklistNanosleepTrapPolicy,
247 int /* (*BPF_AUX) */) {
248 // getpid() should work properly
249 errno = 0;
250 BPF_ASSERT(sys_getpid() > 0);
251 BPF_ASSERT(errno == 0);
253 // Our Auxiliary Data, should be reset by the signal handler
254 *BPF_AUX = -1;
255 const struct timespec ts = {0, 0};
256 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
257 BPF_ASSERT(errno == ENOMEM);
259 // We expect the signal handler to modify AuxData
260 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue);
263 // A simple test that verifies we can return arbitrary errno values.
265 class ErrnoTestPolicy : public Policy {
266 public:
267 ErrnoTestPolicy() {}
268 ~ErrnoTestPolicy() override {}
270 ResultExpr EvaluateSyscall(int sysno) const override;
272 private:
273 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy);
276 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const {
277 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
278 switch (sysno) {
279 case __NR_dup3: // dup2 is a wrapper of dup3 in android
280 #if defined(__NR_dup2)
281 case __NR_dup2:
282 #endif
283 // Pretend that dup2() worked, but don't actually do anything.
284 return Error(0);
285 case __NR_setuid:
286 #if defined(__NR_setuid32)
287 case __NR_setuid32:
288 #endif
289 // Return errno = 1.
290 return Error(1);
291 case __NR_setgid:
292 #if defined(__NR_setgid32)
293 case __NR_setgid32:
294 #endif
295 // Return maximum errno value (typically 4095).
296 return Error(ErrorCode::ERR_MAX_ERRNO);
297 case __NR_uname:
298 // Return errno = 42;
299 return Error(42);
300 default:
301 return Allow();
305 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) {
306 // Verify that dup2() returns success, but doesn't actually run.
307 int fds[4];
308 BPF_ASSERT(pipe(fds) == 0);
309 BPF_ASSERT(pipe(fds + 2) == 0);
310 BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
311 char buf[1] = {};
312 BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
313 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
314 BPF_ASSERT(read(fds[0], buf, 1) == 1);
316 // If dup2() executed, we will read \xAA, but it dup2() has been turned
317 // into a no-op by our policy, then we will read \x55.
318 BPF_ASSERT(buf[0] == '\x55');
320 // Verify that we can return the minimum and maximum errno values.
321 errno = 0;
322 BPF_ASSERT(setuid(0) == -1);
323 BPF_ASSERT(errno == 1);
325 // On Android, errno is only supported up to 255, otherwise errno
326 // processing is skipped.
327 // We work around this (crbug.com/181647).
328 if (sandbox::IsAndroid() && setgid(0) != -1) {
329 errno = 0;
330 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO);
331 BPF_ASSERT(errno == 0);
332 } else {
333 errno = 0;
334 BPF_ASSERT(setgid(0) == -1);
335 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO);
338 // Finally, test an errno in between the minimum and maximum.
339 errno = 0;
340 struct utsname uts_buf;
341 BPF_ASSERT(uname(&uts_buf) == -1);
342 BPF_ASSERT(errno == 42);
345 // Testing the stacking of two sandboxes
347 class StackingPolicyPartOne : public Policy {
348 public:
349 StackingPolicyPartOne() {}
350 ~StackingPolicyPartOne() override {}
352 ResultExpr EvaluateSyscall(int sysno) const override {
353 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
354 switch (sysno) {
355 case __NR_getppid: {
356 const Arg<int> arg(0);
357 return If(arg == 0, Allow()).Else(Error(EPERM));
359 default:
360 return Allow();
364 private:
365 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne);
368 class StackingPolicyPartTwo : public Policy {
369 public:
370 StackingPolicyPartTwo() {}
371 ~StackingPolicyPartTwo() override {}
373 ResultExpr EvaluateSyscall(int sysno) const override {
374 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
375 switch (sysno) {
376 case __NR_getppid: {
377 const Arg<int> arg(0);
378 return If(arg == 0, Error(EINVAL)).Else(Allow());
380 default:
381 return Allow();
385 private:
386 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo);
389 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) {
390 errno = 0;
391 BPF_ASSERT(syscall(__NR_getppid, 0) > 0);
392 BPF_ASSERT(errno == 0);
394 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
395 BPF_ASSERT(errno == EPERM);
397 // Stack a second sandbox with its own policy. Verify that we can further
398 // restrict filters, but we cannot relax existing filters.
399 SandboxBPF sandbox;
400 sandbox.SetSandboxPolicy(new StackingPolicyPartTwo());
401 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
403 errno = 0;
404 BPF_ASSERT(syscall(__NR_getppid, 0) == -1);
405 BPF_ASSERT(errno == EINVAL);
407 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
408 BPF_ASSERT(errno == EPERM);
411 // A more complex, but synthetic policy. This tests the correctness of the BPF
412 // program by iterating through all syscalls and checking for an errno that
413 // depends on the syscall number. Unlike the Verifier, this exercises the BPF
414 // interpreter in the kernel.
416 // We try to make sure we exercise optimizations in the BPF compiler. We make
417 // sure that the compiler can have an opportunity to coalesce syscalls with
418 // contiguous numbers and we also make sure that disjoint sets can return the
419 // same errno.
420 int SysnoToRandomErrno(int sysno) {
421 // Small contiguous sets of 3 system calls return an errno equal to the
422 // index of that set + 1 (so that we never return a NUL errno).
423 return ((sysno & ~3) >> 2) % 29 + 1;
426 class SyntheticPolicy : public Policy {
427 public:
428 SyntheticPolicy() {}
429 ~SyntheticPolicy() override {}
431 ResultExpr EvaluateSyscall(int sysno) const override {
432 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
433 if (sysno == __NR_exit_group || sysno == __NR_write) {
434 // exit_group() is special, we really need it to work.
435 // write() is needed for BPF_ASSERT() to report a useful error message.
436 return Allow();
438 return Error(SysnoToRandomErrno(sysno));
441 private:
442 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy);
445 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) {
446 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
447 // overflow.
448 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
449 static_cast<int>(MAX_PUBLIC_SYSCALL));
451 for (int syscall_number = static_cast<int>(MIN_SYSCALL);
452 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
453 ++syscall_number) {
454 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) {
455 // exit_group() is special
456 continue;
458 errno = 0;
459 BPF_ASSERT(syscall(syscall_number) == -1);
460 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number));
464 #if defined(__arm__)
465 // A simple policy that tests whether ARM private system calls are supported
466 // by our BPF compiler and by the BPF interpreter in the kernel.
468 // For ARM private system calls, return an errno equal to their offset from
469 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno).
470 int ArmPrivateSysnoToErrno(int sysno) {
471 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) &&
472 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
473 return (sysno - MIN_PRIVATE_SYSCALL) + 1;
474 } else {
475 return ENOSYS;
479 class ArmPrivatePolicy : public Policy {
480 public:
481 ArmPrivatePolicy() {}
482 virtual ~ArmPrivatePolicy() {}
484 virtual ResultExpr EvaluateSyscall(int sysno) const override {
485 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
486 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual
487 // ARM private system calls.
488 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) &&
489 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
490 return Error(ArmPrivateSysnoToErrno(sysno));
492 return Allow();
495 private:
496 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy);
499 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) {
500 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
501 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
502 ++syscall_number) {
503 errno = 0;
504 BPF_ASSERT(syscall(syscall_number) == -1);
505 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
508 #endif // defined(__arm__)
510 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
511 // Count all invocations of our callback function.
512 ++*reinterpret_cast<int*>(aux);
514 // Verify that within the callback function all filtering is temporarily
515 // disabled.
516 BPF_ASSERT(sys_getpid() > 1);
518 // Verify that we can now call the underlying system call without causing
519 // infinite recursion.
520 return SandboxBPF::ForwardSyscall(args);
523 class GreyListedPolicy : public Policy {
524 public:
525 explicit GreyListedPolicy(int* aux) : aux_(aux) {
526 // Set the global environment for unsafe traps once.
527 EnableUnsafeTraps();
529 ~GreyListedPolicy() override {}
531 ResultExpr EvaluateSyscall(int sysno) const override {
532 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
533 // Some system calls must always be allowed, if our policy wants to make
534 // use of UnsafeTrap()
535 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) {
536 return Allow();
537 } else if (sysno == __NR_getpid) {
538 // Disallow getpid()
539 return Error(EPERM);
540 } else {
541 // Allow (and count) all other system calls.
542 return UnsafeTrap(CountSyscalls, aux_);
546 private:
547 int* aux_;
549 DISALLOW_COPY_AND_ASSIGN(GreyListedPolicy);
552 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) {
553 BPF_ASSERT(sys_getpid() == -1);
554 BPF_ASSERT(errno == EPERM);
555 BPF_ASSERT(*BPF_AUX == 0);
556 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
557 BPF_ASSERT(*BPF_AUX == 2);
558 char name[17] = {};
559 BPF_ASSERT(!syscall(__NR_prctl,
560 PR_GET_NAME,
561 name,
562 (void*)NULL,
563 (void*)NULL,
564 (void*)NULL));
565 BPF_ASSERT(*BPF_AUX == 3);
566 BPF_ASSERT(*name);
569 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) {
570 // Disabling warning messages that could confuse our test framework.
571 setenv(kSandboxDebuggingEnv, "t", 0);
572 Die::SuppressInfoMessages(true);
574 unsetenv(kSandboxDebuggingEnv);
575 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
576 setenv(kSandboxDebuggingEnv, "", 1);
577 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
578 setenv(kSandboxDebuggingEnv, "t", 1);
579 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true);
582 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
583 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
584 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
585 // return an error. But our handler allows this call.
586 return 0;
587 } else {
588 return SandboxBPF::ForwardSyscall(args);
592 class PrctlPolicy : public Policy {
593 public:
594 PrctlPolicy() {}
595 ~PrctlPolicy() override {}
597 ResultExpr EvaluateSyscall(int sysno) const override {
598 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
599 setenv(kSandboxDebuggingEnv, "t", 0);
600 Die::SuppressInfoMessages(true);
602 if (sysno == __NR_prctl) {
603 // Handle prctl() inside an UnsafeTrap()
604 return UnsafeTrap(PrctlHandler, NULL);
607 // Allow all other system calls.
608 return Allow();
611 private:
612 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy);
615 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) {
616 // This call should never be allowed. But our policy will intercept it and
617 // let it pass successfully.
618 BPF_ASSERT(
619 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
621 // Verify that the call will fail, if it makes it all the way to the kernel.
622 BPF_ASSERT(
623 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
625 // And verify that other uses of prctl() work just fine.
626 char name[17] = {};
627 BPF_ASSERT(!syscall(__NR_prctl,
628 PR_GET_NAME,
629 name,
630 (void*)NULL,
631 (void*)NULL,
632 (void*)NULL));
633 BPF_ASSERT(*name);
635 // Finally, verify that system calls other than prctl() are completely
636 // unaffected by our policy.
637 struct utsname uts = {};
638 BPF_ASSERT(!uname(&uts));
639 BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
642 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
643 return SandboxBPF::ForwardSyscall(args);
646 class RedirectAllSyscallsPolicy : public Policy {
647 public:
648 RedirectAllSyscallsPolicy() {}
649 ~RedirectAllSyscallsPolicy() override {}
651 ResultExpr EvaluateSyscall(int sysno) const override;
653 private:
654 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy);
657 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const {
658 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
659 setenv(kSandboxDebuggingEnv, "t", 0);
660 Die::SuppressInfoMessages(true);
662 // Some system calls must always be allowed, if our policy wants to make
663 // use of UnsafeTrap()
664 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
665 return Allow();
666 return UnsafeTrap(AllowRedirectedSyscall, NULL);
669 int bus_handler_fd_ = -1;
671 void SigBusHandler(int, siginfo_t* info, void* void_context) {
672 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
675 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) {
676 // We use the SIGBUS bit in the signal mask as a thread-local boolean
677 // value in the implementation of UnsafeTrap(). This is obviously a bit
678 // of a hack that could conceivably interfere with code that uses SIGBUS
679 // in more traditional ways. This test verifies that basic functionality
680 // of SIGBUS is not impacted, but it is certainly possibly to construe
681 // more complex uses of signals where our use of the SIGBUS mask is not
682 // 100% transparent. This is expected behavior.
683 int fds[2];
684 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
685 bus_handler_fd_ = fds[1];
686 struct sigaction sa = {};
687 sa.sa_sigaction = SigBusHandler;
688 sa.sa_flags = SA_SIGINFO;
689 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
690 raise(SIGBUS);
691 char c = '\000';
692 BPF_ASSERT(read(fds[0], &c, 1) == 1);
693 BPF_ASSERT(close(fds[0]) == 0);
694 BPF_ASSERT(close(fds[1]) == 0);
695 BPF_ASSERT(c == 0x55);
698 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) {
699 // Signal masks are potentially tricky to handle. For instance, if we
700 // ever tried to update them from inside a Trap() or UnsafeTrap() handler,
701 // the call to sigreturn() at the end of the signal handler would undo
702 // all of our efforts. So, it makes sense to test that sigprocmask()
703 // works, even if we have a policy in place that makes use of UnsafeTrap().
704 // In practice, this works because we force sigprocmask() to be handled
705 // entirely in the kernel.
706 sigset_t mask0, mask1, mask2;
708 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't
709 // change the mask (it shouldn't have been, as it isn't blocked by default
710 // in POSIX).
712 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose.
713 sigemptyset(&mask0);
714 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1));
715 BPF_ASSERT(!sigismember(&mask1, SIGUSR2));
717 // Try again, and this time we verify that we can block it. This
718 // requires a second call to sigprocmask().
719 sigaddset(&mask0, SIGUSR2);
720 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL));
721 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2));
722 BPF_ASSERT(sigismember(&mask2, SIGUSR2));
725 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
726 // An UnsafeTrap() (or for that matter, a Trap()) has to report error
727 // conditions by returning an exit code in the range -1..-4096. This
728 // should happen automatically if using ForwardSyscall(). If the TrapFnc()
729 // uses some other method to make system calls, then it is responsible
730 // for computing the correct return code.
731 // This test verifies that ForwardSyscall() does the correct thing.
733 // The glibc system wrapper will ultimately set errno for us. So, from normal
734 // userspace, all of this should be completely transparent.
735 errno = 0;
736 BPF_ASSERT(close(-1) == -1);
737 BPF_ASSERT(errno == EBADF);
739 // Explicitly avoid the glibc wrapper. This is not normally the way anybody
740 // would make system calls, but it allows us to verify that we don't
741 // accidentally mess with errno, when we shouldn't.
742 errno = 0;
743 struct arch_seccomp_data args = {};
744 args.nr = __NR_close;
745 args.args[0] = -1;
746 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF);
747 BPF_ASSERT(errno == 0);
750 bool NoOpCallback() {
751 return true;
754 // Test a trap handler that makes use of a broker process to open().
756 class InitializedOpenBroker {
757 public:
758 InitializedOpenBroker() : initialized_(false) {
759 std::vector<std::string> allowed_files;
760 allowed_files.push_back("/proc/allowed");
761 allowed_files.push_back("/proc/cpuinfo");
763 broker_process_.reset(new syscall_broker::BrokerProcess(
764 EPERM, allowed_files, std::vector<std::string>()));
765 BPF_ASSERT(broker_process() != NULL);
766 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback)));
768 initialized_ = true;
770 bool initialized() { return initialized_; }
771 class syscall_broker::BrokerProcess* broker_process() {
772 return broker_process_.get();
775 private:
776 bool initialized_;
777 scoped_ptr<class syscall_broker::BrokerProcess> broker_process_;
778 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker);
781 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args,
782 void* aux) {
783 BPF_ASSERT(aux);
784 syscall_broker::BrokerProcess* broker_process =
785 static_cast<syscall_broker::BrokerProcess*>(aux);
786 switch (args.nr) {
787 case __NR_faccessat: // access is a wrapper of faccessat in android
788 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
789 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]),
790 static_cast<int>(args.args[2]));
791 #if defined(__NR_access)
792 case __NR_access:
793 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]),
794 static_cast<int>(args.args[1]));
795 #endif
796 #if defined(__NR_open)
797 case __NR_open:
798 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]),
799 static_cast<int>(args.args[1]));
800 #endif
801 case __NR_openat:
802 // We only call open() so if we arrive here, it's because glibc uses
803 // the openat() system call.
804 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
805 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]),
806 static_cast<int>(args.args[2]));
807 default:
808 BPF_ASSERT(false);
809 return -ENOSYS;
813 class DenyOpenPolicy : public Policy {
814 public:
815 explicit DenyOpenPolicy(InitializedOpenBroker* iob) : iob_(iob) {}
816 ~DenyOpenPolicy() override {}
818 ResultExpr EvaluateSyscall(int sysno) const override {
819 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
821 switch (sysno) {
822 case __NR_faccessat:
823 #if defined(__NR_access)
824 case __NR_access:
825 #endif
826 #if defined(__NR_open)
827 case __NR_open:
828 #endif
829 case __NR_openat:
830 // We get a InitializedOpenBroker class, but our trap handler wants
831 // the syscall_broker::BrokerProcess object.
832 return Trap(BrokerOpenTrapHandler, iob_->broker_process());
833 default:
834 return Allow();
838 private:
839 InitializedOpenBroker* iob_;
841 DISALLOW_COPY_AND_ASSIGN(DenyOpenPolicy);
844 // We use a InitializedOpenBroker class, so that we can run unsandboxed
845 // code in its constructor, which is the only way to do so in a BPF_TEST.
846 BPF_TEST(SandboxBPF,
847 UseOpenBroker,
848 DenyOpenPolicy,
849 InitializedOpenBroker /* (*BPF_AUX) */) {
850 BPF_ASSERT(BPF_AUX->initialized());
851 syscall_broker::BrokerProcess* broker_process = BPF_AUX->broker_process();
852 BPF_ASSERT(broker_process != NULL);
854 // First, use the broker "manually"
855 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM);
856 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM);
857 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT);
858 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT);
860 // Now use glibc's open() as an external library would.
861 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1);
862 BPF_ASSERT(errno == EPERM);
864 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1);
865 BPF_ASSERT(errno == ENOENT);
867 // Also test glibc's openat(), some versions of libc use it transparently
868 // instead of open().
869 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1);
870 BPF_ASSERT(errno == EPERM);
872 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1);
873 BPF_ASSERT(errno == ENOENT);
875 // And test glibc's access().
876 BPF_ASSERT(access("/proc/denied", R_OK) == -1);
877 BPF_ASSERT(errno == EPERM);
879 BPF_ASSERT(access("/proc/allowed", R_OK) == -1);
880 BPF_ASSERT(errno == ENOENT);
882 // This is also white listed and does exist.
883 int cpu_info_access = access("/proc/cpuinfo", R_OK);
884 BPF_ASSERT(cpu_info_access == 0);
885 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY);
886 BPF_ASSERT(cpu_info_fd >= 0);
887 char buf[1024];
888 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0);
891 // Simple test demonstrating how to use SandboxBPF::Cond()
893 class SimpleCondTestPolicy : public Policy {
894 public:
895 SimpleCondTestPolicy() {}
896 ~SimpleCondTestPolicy() override {}
898 ResultExpr EvaluateSyscall(int sysno) const override;
900 private:
901 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy);
904 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const {
905 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
907 // We deliberately return unusual errno values upon failure, so that we
908 // can uniquely test for these values. In a "real" policy, you would want
909 // to return more traditional values.
910 int flags_argument_position = -1;
911 switch (sysno) {
912 #if defined(__NR_open)
913 case __NR_open:
914 flags_argument_position = 1;
915 #endif
916 case __NR_openat: { // open can be a wrapper for openat(2).
917 if (sysno == __NR_openat)
918 flags_argument_position = 2;
920 // Allow opening files for reading, but don't allow writing.
921 COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits);
922 const Arg<int> flags(flags_argument_position);
923 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow());
925 case __NR_prctl: {
926 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
927 // disallow everything else.
928 const Arg<int> option(0);
929 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow())
930 .Else(Error(ENOMEM));
932 default:
933 return Allow();
937 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) {
938 int fd;
939 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1);
940 BPF_ASSERT(errno == EROFS);
941 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0);
942 close(fd);
944 int ret;
945 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
946 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
947 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
948 BPF_ASSERT(errno == ENOMEM);
951 // This test exercises the SandboxBPF::Cond() method by building a complex
952 // tree of conditional equality operations. It then makes system calls and
953 // verifies that they return the values that we expected from our BPF
954 // program.
955 class EqualityStressTest {
956 public:
957 EqualityStressTest() {
958 // We want a deterministic test
959 srand(0);
961 // Iterates over system call numbers and builds a random tree of
962 // equality tests.
963 // We are actually constructing a graph of ArgValue objects. This
964 // graph will later be used to a) compute our sandbox policy, and
965 // b) drive the code that verifies the output from the BPF program.
966 COMPILE_ASSERT(
967 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
968 num_test_cases_must_be_significantly_smaller_than_num_system_calls);
969 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
970 if (IsReservedSyscall(sysno)) {
971 // Skip reserved system calls. This ensures that our test frame
972 // work isn't impacted by the fact that we are overriding
973 // a lot of different system calls.
974 ++end;
975 arg_values_.push_back(NULL);
976 } else {
977 arg_values_.push_back(
978 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
983 ~EqualityStressTest() {
984 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
985 iter != arg_values_.end();
986 ++iter) {
987 DeleteArgValue(*iter);
991 ResultExpr Policy(int sysno) {
992 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
993 if (sysno < 0 || sysno >= (int)arg_values_.size() ||
994 IsReservedSyscall(sysno)) {
995 // We only return ErrorCode values for the system calls that
996 // are part of our test data. Every other system call remains
997 // allowed.
998 return Allow();
999 } else {
1000 // ToErrorCode() turns an ArgValue object into an ErrorCode that is
1001 // suitable for use by a sandbox policy.
1002 return ToErrorCode(arg_values_[sysno]);
1006 void VerifyFilter() {
1007 // Iterate over all system calls. Skip the system calls that have
1008 // previously been determined as being reserved.
1009 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) {
1010 if (!arg_values_[sysno]) {
1011 // Skip reserved system calls.
1012 continue;
1014 // Verify that system calls return the values that we expect them to
1015 // return. This involves passing different combinations of system call
1016 // parameters in order to exercise all possible code paths through the
1017 // BPF filter program.
1018 // We arbitrarily start by setting all six system call arguments to
1019 // zero. And we then recursive traverse our tree of ArgValues to
1020 // determine the necessary combinations of parameters.
1021 intptr_t args[6] = {};
1022 Verify(sysno, args, *arg_values_[sysno]);
1026 private:
1027 struct ArgValue {
1028 int argno; // Argument number to inspect.
1029 int size; // Number of test cases (must be > 0).
1030 struct Tests {
1031 uint32_t k_value; // Value to compare syscall arg against.
1032 int err; // If non-zero, errno value to return.
1033 struct ArgValue* arg_value; // Otherwise, more args needs inspecting.
1034 }* tests;
1035 int err; // If none of the tests passed, this is what
1036 struct ArgValue* arg_value; // we'll return (this is the "else" branch).
1039 bool IsReservedSyscall(int sysno) {
1040 // There are a handful of system calls that we should never use in our
1041 // test cases. These system calls are needed to allow the test framework
1042 // to run properly.
1043 // If we wanted to write fully generic code, there are more system calls
1044 // that could be listed here, and it is quite difficult to come up with a
1045 // truly comprehensive list. After all, we are deliberately making system
1046 // calls unavailable. In practice, we have a pretty good idea of the system
1047 // calls that will be made by this particular test. So, this small list is
1048 // sufficient. But if anybody copy'n'pasted this code for other uses, they
1049 // would have to review that the list.
1050 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
1051 sysno == __NR_exit_group || sysno == __NR_restart_syscall;
1054 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
1055 // Create a new ArgValue and fill it with random data. We use as bit mask
1056 // to keep track of the system call parameters that have previously been
1057 // set; this ensures that we won't accidentally define a contradictory
1058 // set of equality tests.
1059 struct ArgValue* arg_value = new ArgValue();
1060 args_mask |= 1 << argno;
1061 arg_value->argno = argno;
1063 // Apply some restrictions on just how complex our tests can be.
1064 // Otherwise, we end up with a BPF program that is too complicated for
1065 // the kernel to load.
1066 int fan_out = kMaxFanOut;
1067 if (remaining_args > 3) {
1068 fan_out = 1;
1069 } else if (remaining_args > 2) {
1070 fan_out = 2;
1073 // Create a couple of different test cases with randomized values that
1074 // we want to use when comparing system call parameter number "argno".
1075 arg_value->size = rand() % fan_out + 1;
1076 arg_value->tests = new ArgValue::Tests[arg_value->size];
1078 uint32_t k_value = rand();
1079 for (int n = 0; n < arg_value->size; ++n) {
1080 // Ensure that we have unique values
1081 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
1083 // There are two possible types of nodes. Either this is a leaf node;
1084 // in that case, we have completed all the equality tests that we
1085 // wanted to perform, and we can now compute a random "errno" value that
1086 // we should return. Or this is part of a more complex boolean
1087 // expression; in that case, we have to recursively add tests for some
1088 // of system call parameters that we have not yet included in our
1089 // tests.
1090 arg_value->tests[n].k_value = k_value;
1091 if (!remaining_args || (rand() & 1)) {
1092 arg_value->tests[n].err = (rand() % 1000) + 1;
1093 arg_value->tests[n].arg_value = NULL;
1094 } else {
1095 arg_value->tests[n].err = 0;
1096 arg_value->tests[n].arg_value =
1097 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
1100 // Finally, we have to define what we should return if none of the
1101 // previous equality tests pass. Again, we can either deal with a leaf
1102 // node, or we can randomly add another couple of tests.
1103 if (!remaining_args || (rand() & 1)) {
1104 arg_value->err = (rand() % 1000) + 1;
1105 arg_value->arg_value = NULL;
1106 } else {
1107 arg_value->err = 0;
1108 arg_value->arg_value =
1109 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
1111 // We have now built a new (sub-)tree of ArgValues defining a set of
1112 // boolean expressions for testing random system call arguments against
1113 // random values. Return this tree to our caller.
1114 return arg_value;
1117 int RandomArg(int args_mask) {
1118 // Compute a random system call parameter number.
1119 int argno = rand() % kMaxArgs;
1121 // Make sure that this same parameter number has not previously been
1122 // used. Otherwise, we could end up with a test that is impossible to
1123 // satisfy (e.g. args[0] == 1 && args[0] == 2).
1124 while (args_mask & (1 << argno)) {
1125 argno = (argno + 1) % kMaxArgs;
1127 return argno;
1130 void DeleteArgValue(ArgValue* arg_value) {
1131 // Delete an ArgValue and all of its child nodes. This requires
1132 // recursively descending into the tree.
1133 if (arg_value) {
1134 if (arg_value->size) {
1135 for (int n = 0; n < arg_value->size; ++n) {
1136 if (!arg_value->tests[n].err) {
1137 DeleteArgValue(arg_value->tests[n].arg_value);
1140 delete[] arg_value->tests;
1142 if (!arg_value->err) {
1143 DeleteArgValue(arg_value->arg_value);
1145 delete arg_value;
1149 ResultExpr ToErrorCode(ArgValue* arg_value) {
1150 // Compute the ResultExpr that should be returned, if none of our
1151 // tests succeed (i.e. the system call parameter doesn't match any
1152 // of the values in arg_value->tests[].k_value).
1153 ResultExpr err;
1154 if (arg_value->err) {
1155 // If this was a leaf node, return the errno value that we expect to
1156 // return from the BPF filter program.
1157 err = Error(arg_value->err);
1158 } else {
1159 // If this wasn't a leaf node yet, recursively descend into the rest
1160 // of the tree. This will end up adding a few more SandboxBPF::Cond()
1161 // tests to our ErrorCode.
1162 err = ToErrorCode(arg_value->arg_value);
1165 // Now, iterate over all the test cases that we want to compare against.
1166 // This builds a chain of SandboxBPF::Cond() tests
1167 // (aka "if ... elif ... elif ... elif ... fi")
1168 for (int n = arg_value->size; n-- > 0;) {
1169 ResultExpr matched;
1170 // Again, we distinguish between leaf nodes and subtrees.
1171 if (arg_value->tests[n].err) {
1172 matched = Error(arg_value->tests[n].err);
1173 } else {
1174 matched = ToErrorCode(arg_value->tests[n].arg_value);
1176 // For now, all of our tests are limited to 32bit.
1177 // We have separate tests that check the behavior of 32bit vs. 64bit
1178 // conditional expressions.
1179 const Arg<uint32_t> arg(arg_value->argno);
1180 err = If(arg == arg_value->tests[n].k_value, matched).Else(err);
1182 return err;
1185 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
1186 uint32_t mismatched = 0;
1187 // Iterate over all the k_values in arg_value.tests[] and verify that
1188 // we see the expected return values from system calls, when we pass
1189 // the k_value as a parameter in a system call.
1190 for (int n = arg_value.size; n-- > 0;) {
1191 mismatched += arg_value.tests[n].k_value;
1192 args[arg_value.argno] = arg_value.tests[n].k_value;
1193 if (arg_value.tests[n].err) {
1194 VerifyErrno(sysno, args, arg_value.tests[n].err);
1195 } else {
1196 Verify(sysno, args, *arg_value.tests[n].arg_value);
1199 // Find a k_value that doesn't match any of the k_values in
1200 // arg_value.tests[]. In most cases, the current value of "mismatched"
1201 // would fit this requirement. But on the off-chance that it happens
1202 // to collide, we double-check.
1203 try_again:
1204 for (int n = arg_value.size; n-- > 0;) {
1205 if (mismatched == arg_value.tests[n].k_value) {
1206 ++mismatched;
1207 goto try_again;
1210 // Now verify that we see the expected return value from system calls,
1211 // if we pass a value that doesn't match any of the conditions (i.e. this
1212 // is testing the "else" clause of the conditions).
1213 args[arg_value.argno] = mismatched;
1214 if (arg_value.err) {
1215 VerifyErrno(sysno, args, arg_value.err);
1216 } else {
1217 Verify(sysno, args, *arg_value.arg_value);
1219 // Reset args[arg_value.argno]. This is not technically needed, but it
1220 // makes it easier to reason about the correctness of our tests.
1221 args[arg_value.argno] = 0;
1224 void VerifyErrno(int sysno, intptr_t* args, int err) {
1225 // We installed BPF filters that return different errno values
1226 // based on the system call number and the parameters that we decided
1227 // to pass in. Verify that this condition holds true.
1228 BPF_ASSERT(
1229 Syscall::Call(
1230 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
1231 -err);
1234 // Vector of ArgValue trees. These trees define all the possible boolean
1235 // expressions that we want to turn into a BPF filter program.
1236 std::vector<ArgValue*> arg_values_;
1238 // Don't increase these values. We are pushing the limits of the maximum
1239 // BPF program that the kernel will allow us to load. If the values are
1240 // increased too much, the test will start failing.
1241 #if defined(__aarch64__)
1242 static const int kNumTestCases = 30;
1243 #else
1244 static const int kNumTestCases = 40;
1245 #endif
1246 static const int kMaxFanOut = 3;
1247 static const int kMaxArgs = 6;
1250 class EqualityStressTestPolicy : public Policy {
1251 public:
1252 explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {}
1253 ~EqualityStressTestPolicy() override {}
1255 ResultExpr EvaluateSyscall(int sysno) const override {
1256 return aux_->Policy(sysno);
1259 private:
1260 EqualityStressTest* aux_;
1262 DISALLOW_COPY_AND_ASSIGN(EqualityStressTestPolicy);
1265 BPF_TEST(SandboxBPF,
1266 EqualityTests,
1267 EqualityStressTestPolicy,
1268 EqualityStressTest /* (*BPF_AUX) */) {
1269 BPF_AUX->VerifyFilter();
1272 class EqualityArgumentWidthPolicy : public Policy {
1273 public:
1274 EqualityArgumentWidthPolicy() {}
1275 ~EqualityArgumentWidthPolicy() override {}
1277 ResultExpr EvaluateSyscall(int sysno) const override;
1279 private:
1280 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy);
1283 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const {
1284 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1285 if (sysno == __NR_uname) {
1286 const Arg<int> option(0);
1287 const Arg<uint32_t> arg32(1);
1288 const Arg<uint64_t> arg64(1);
1289 return Switch(option)
1290 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2)))
1291 #if __SIZEOF_POINTER__ > 4
1292 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2)))
1293 #endif
1294 .Default(Error(3));
1296 return Allow();
1299 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
1300 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1);
1301 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2);
1302 #if __SIZEOF_POINTER__ > 4
1303 // On 32bit machines, there is no way to pass a 64bit argument through the
1304 // syscall interface. So, we have to skip the part of the test that requires
1305 // 64bit arguments.
1306 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1);
1307 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2);
1308 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2);
1309 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2);
1310 #endif
1313 #if __SIZEOF_POINTER__ > 4
1314 // On 32bit machines, there is no way to pass a 64bit argument through the
1315 // syscall interface. So, we have to skip the part of the test that requires
1316 // 64bit arguments.
1317 BPF_DEATH_TEST_C(SandboxBPF,
1318 EqualityArgumentUnallowed64bit,
1319 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1320 EqualityArgumentWidthPolicy) {
1321 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL);
1323 #endif
1325 class EqualityWithNegativeArgumentsPolicy : public Policy {
1326 public:
1327 EqualityWithNegativeArgumentsPolicy() {}
1328 ~EqualityWithNegativeArgumentsPolicy() override {}
1330 ResultExpr EvaluateSyscall(int sysno) const override {
1331 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1332 if (sysno == __NR_uname) {
1333 // TODO(mdempsky): This currently can't be Arg<int> because then
1334 // 0xFFFFFFFF will be treated as a (signed) int, and then when
1335 // Arg::EqualTo casts it to uint64_t, it will be sign extended.
1336 const Arg<unsigned> arg(0);
1337 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2));
1339 return Allow();
1342 private:
1343 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy);
1346 BPF_TEST_C(SandboxBPF,
1347 EqualityWithNegativeArguments,
1348 EqualityWithNegativeArgumentsPolicy) {
1349 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1);
1350 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1);
1351 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1);
1354 #if __SIZEOF_POINTER__ > 4
1355 BPF_DEATH_TEST_C(SandboxBPF,
1356 EqualityWithNegative64bitArguments,
1357 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1358 EqualityWithNegativeArgumentsPolicy) {
1359 // When expecting a 32bit system call argument, we look at the MSB of the
1360 // 64bit value and allow both "0" and "-1". But the latter is allowed only
1361 // iff the LSB was negative. So, this death test should error out.
1362 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
1364 #endif
1366 class AllBitTestPolicy : public Policy {
1367 public:
1368 AllBitTestPolicy() {}
1369 ~AllBitTestPolicy() override {}
1371 ResultExpr EvaluateSyscall(int sysno) const override;
1373 private:
1374 static ResultExpr HasAllBits32(uint32_t bits);
1375 static ResultExpr HasAllBits64(uint64_t bits);
1377 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy);
1380 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) {
1381 if (bits == 0) {
1382 return Error(1);
1384 const Arg<uint32_t> arg(1);
1385 return If((arg & bits) == bits, Error(1)).Else(Error(0));
1388 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) {
1389 if (bits == 0) {
1390 return Error(1);
1392 const Arg<uint64_t> arg(1);
1393 return If((arg & bits) == bits, Error(1)).Else(Error(0));
1396 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const {
1397 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1398 // Test masked-equality cases that should trigger the "has all bits"
1399 // peephole optimizations. We try to find bitmasks that could conceivably
1400 // touch corner cases.
1401 // For all of these tests, we override the uname(). We can make use with
1402 // a single system call number, as we use the first system call argument to
1403 // select the different bit masks that we want to test against.
1404 if (sysno == __NR_uname) {
1405 const Arg<int> option(0);
1406 return Switch(option)
1407 .Case(0, HasAllBits32(0x0))
1408 .Case(1, HasAllBits32(0x1))
1409 .Case(2, HasAllBits32(0x3))
1410 .Case(3, HasAllBits32(0x80000000))
1411 #if __SIZEOF_POINTER__ > 4
1412 .Case(4, HasAllBits64(0x0))
1413 .Case(5, HasAllBits64(0x1))
1414 .Case(6, HasAllBits64(0x3))
1415 .Case(7, HasAllBits64(0x80000000))
1416 .Case(8, HasAllBits64(0x100000000ULL))
1417 .Case(9, HasAllBits64(0x300000000ULL))
1418 .Case(10, HasAllBits64(0x100000001ULL))
1419 #endif
1420 .Default(Kill("Invalid test case number"));
1422 return Allow();
1425 // Define a macro that performs tests using our test policy.
1426 // NOTE: Not all of the arguments in this macro are actually used!
1427 // They are here just to serve as documentation of the conditions
1428 // implemented in the test policy.
1429 // Most notably, "op" and "mask" are unused by the macro. If you want
1430 // to make changes to these values, you will have to edit the
1431 // test policy instead.
1432 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
1433 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value))
1435 // Our uname() system call returns ErrorCode(1) for success and
1436 // ErrorCode(0) for failure. Syscall::Call() turns this into an
1437 // exit code of -1 or 0.
1438 #define EXPECT_FAILURE 0
1439 #define EXPECT_SUCCESS -1
1441 // A couple of our tests behave differently on 32bit and 64bit systems, as
1442 // there is no way for a 32bit system call to pass in a 64bit system call
1443 // argument "arg".
1444 // We expect these tests to succeed on 64bit systems, but to tail on 32bit
1445 // systems.
1446 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
1447 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) {
1448 // 32bit test: all of 0x0 (should always be true)
1449 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS);
1450 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS);
1451 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS);
1452 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS);
1453 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS);
1455 // 32bit test: all of 0x1
1456 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE);
1457 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS);
1458 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE);
1459 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS);
1461 // 32bit test: all of 0x3
1462 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE);
1463 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE);
1464 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE);
1465 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS);
1466 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS);
1468 // 32bit test: all of 0x80000000
1469 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1470 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1471 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1472 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1473 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1475 #if __SIZEOF_POINTER__ > 4
1476 // 64bit test: all of 0x0 (should always be true)
1477 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS);
1478 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS);
1479 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS);
1480 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS);
1481 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1482 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1483 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1484 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS);
1486 // 64bit test: all of 0x1
1487 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE);
1488 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS);
1489 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE);
1490 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS);
1491 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE);
1492 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS);
1493 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE);
1494 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS);
1496 // 64bit test: all of 0x3
1497 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE);
1498 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE);
1499 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE);
1500 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS);
1501 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS);
1502 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE);
1503 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE);
1504 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE);
1505 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS);
1506 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS);
1508 // 64bit test: all of 0x80000000
1509 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1510 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1511 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1512 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1513 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1514 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1515 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1516 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1517 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1518 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1520 // 64bit test: all of 0x100000000
1521 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1522 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1523 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1524 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1525 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1526 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1527 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1528 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1530 // 64bit test: all of 0x300000000
1531 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1532 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1533 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1534 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1535 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1536 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1537 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1538 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1539 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1540 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1542 // 64bit test: all of 0x100000001
1543 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1544 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1545 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1546 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1547 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE);
1548 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1549 #endif
1552 class AnyBitTestPolicy : public Policy {
1553 public:
1554 AnyBitTestPolicy() {}
1555 ~AnyBitTestPolicy() override {}
1557 ResultExpr EvaluateSyscall(int sysno) const override;
1559 private:
1560 static ResultExpr HasAnyBits32(uint32_t);
1561 static ResultExpr HasAnyBits64(uint64_t);
1563 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy);
1566 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) {
1567 if (bits == 0) {
1568 return Error(0);
1570 const Arg<uint32_t> arg(1);
1571 return If((arg & bits) != 0, Error(1)).Else(Error(0));
1574 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) {
1575 if (bits == 0) {
1576 return Error(0);
1578 const Arg<uint64_t> arg(1);
1579 return If((arg & bits) != 0, Error(1)).Else(Error(0));
1582 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const {
1583 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1584 // Test masked-equality cases that should trigger the "has any bits"
1585 // peephole optimizations. We try to find bitmasks that could conceivably
1586 // touch corner cases.
1587 // For all of these tests, we override the uname(). We can make use with
1588 // a single system call number, as we use the first system call argument to
1589 // select the different bit masks that we want to test against.
1590 if (sysno == __NR_uname) {
1591 const Arg<int> option(0);
1592 return Switch(option)
1593 .Case(0, HasAnyBits32(0x0))
1594 .Case(1, HasAnyBits32(0x1))
1595 .Case(2, HasAnyBits32(0x3))
1596 .Case(3, HasAnyBits32(0x80000000))
1597 #if __SIZEOF_POINTER__ > 4
1598 .Case(4, HasAnyBits64(0x0))
1599 .Case(5, HasAnyBits64(0x1))
1600 .Case(6, HasAnyBits64(0x3))
1601 .Case(7, HasAnyBits64(0x80000000))
1602 .Case(8, HasAnyBits64(0x100000000ULL))
1603 .Case(9, HasAnyBits64(0x300000000ULL))
1604 .Case(10, HasAnyBits64(0x100000001ULL))
1605 #endif
1606 .Default(Kill("Invalid test case number"));
1608 return Allow();
1611 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) {
1612 // 32bit test: any of 0x0 (should always be false)
1613 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE);
1614 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE);
1615 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE);
1616 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE);
1617 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE);
1619 // 32bit test: any of 0x1
1620 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE);
1621 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS);
1622 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE);
1623 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS);
1625 // 32bit test: any of 0x3
1626 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE);
1627 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS);
1628 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS);
1629 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS);
1630 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS);
1632 // 32bit test: any of 0x80000000
1633 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1634 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1635 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1636 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1637 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1639 #if __SIZEOF_POINTER__ > 4
1640 // 64bit test: any of 0x0 (should always be false)
1641 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE);
1642 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE);
1643 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE);
1644 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE);
1645 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1646 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1647 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1648 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1650 // 64bit test: any of 0x1
1651 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE);
1652 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS);
1653 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE);
1654 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS);
1655 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1656 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1657 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1658 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1660 // 64bit test: any of 0x3
1661 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE);
1662 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS);
1663 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS);
1664 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS);
1665 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS);
1666 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE);
1667 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1668 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1669 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1670 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1672 // 64bit test: any of 0x80000000
1673 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1674 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1675 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1676 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1677 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1678 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1679 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1680 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1681 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1682 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1684 // 64bit test: any of 0x100000000
1685 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1686 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1687 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1688 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1689 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1690 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1691 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1692 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1694 // 64bit test: any of 0x300000000
1695 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1696 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1697 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1698 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1699 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1700 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1701 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1702 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1703 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1704 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1706 // 64bit test: any of 0x100000001
1707 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE);
1708 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1709 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS);
1710 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1711 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1712 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1713 #endif
1716 class MaskedEqualTestPolicy : public Policy {
1717 public:
1718 MaskedEqualTestPolicy() {}
1719 ~MaskedEqualTestPolicy() override {}
1721 ResultExpr EvaluateSyscall(int sysno) const override;
1723 private:
1724 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value);
1725 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value);
1727 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy);
1730 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) {
1731 const Arg<uint32_t> arg(1);
1732 return If((arg & mask) == value, Error(1)).Else(Error(0));
1735 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) {
1736 const Arg<uint64_t> arg(1);
1737 return If((arg & mask) == value, Error(1)).Else(Error(0));
1740 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const {
1741 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1743 if (sysno == __NR_uname) {
1744 const Arg<int> option(0);
1745 return Switch(option)
1746 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa))
1747 #if __SIZEOF_POINTER__ > 4
1748 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000))
1749 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa))
1750 #endif
1751 .Default(Kill("Invalid test case number"));
1754 return Allow();
1757 #define MASKEQ_TEST(rulenum, arg, expected_result) \
1758 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result))
1760 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) {
1761 // Allowed: 0x__55__aa
1762 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE);
1763 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE);
1764 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE);
1765 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE);
1766 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE);
1767 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS);
1768 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE);
1769 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE);
1770 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS);
1771 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS);
1772 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS);
1773 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS);
1775 #if __SIZEOF_POINTER__ > 4
1776 // Allowed: 0x__55__aa________
1777 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE);
1778 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE);
1779 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE);
1780 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE);
1781 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE);
1782 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE);
1783 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE);
1784 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS);
1785 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE);
1786 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE);
1787 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS);
1788 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS);
1789 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS);
1790 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1791 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1792 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS);
1794 // Allowed: 0x__55__aa__55__aa
1795 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE);
1796 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE);
1797 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE);
1798 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE);
1799 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE);
1800 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE);
1801 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE);
1802 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE);
1803 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE);
1804 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS);
1805 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE);
1806 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE);
1807 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE);
1808 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE);
1809 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS);
1810 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS);
1811 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS);
1812 #endif
1815 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
1816 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
1817 // We expect to get called for an attempt to fork(). No need to log that
1818 // call. But if we ever get called for anything else, we want to verbosely
1819 // print as much information as possible.
1820 const char* msg = (const char*)aux;
1821 printf(
1822 "Clone() was called with unexpected arguments\n"
1823 " nr: %d\n"
1824 " 1: 0x%llX\n"
1825 " 2: 0x%llX\n"
1826 " 3: 0x%llX\n"
1827 " 4: 0x%llX\n"
1828 " 5: 0x%llX\n"
1829 " 6: 0x%llX\n"
1830 "%s\n",
1831 args.nr,
1832 (long long)args.args[0],
1833 (long long)args.args[1],
1834 (long long)args.args[2],
1835 (long long)args.args[3],
1836 (long long)args.args[4],
1837 (long long)args.args[5],
1838 msg);
1840 return -EPERM;
1843 class PthreadPolicyEquality : public Policy {
1844 public:
1845 PthreadPolicyEquality() {}
1846 ~PthreadPolicyEquality() override {}
1848 ResultExpr EvaluateSyscall(int sysno) const override;
1850 private:
1851 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality);
1854 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const {
1855 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1856 // This policy allows creating threads with pthread_create(). But it
1857 // doesn't allow any other uses of clone(). Most notably, it does not
1858 // allow callers to implement fork() or vfork() by passing suitable flags
1859 // to the clone() system call.
1860 if (sysno == __NR_clone) {
1861 // We have seen two different valid combinations of flags. Glibc
1862 // uses the more modern flags, sets the TLS from the call to clone(), and
1863 // uses futexes to monitor threads. Android's C run-time library, doesn't
1864 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1865 // More recent versions of Android don't set CLONE_DETACHED anymore, so
1866 // the last case accounts for that.
1867 // The following policy is very strict. It only allows the exact masks
1868 // that we have seen in known implementations. It is probably somewhat
1869 // stricter than what we would want to do.
1870 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
1871 CLONE_SIGHAND | CLONE_THREAD |
1872 CLONE_SYSVSEM | CLONE_SETTLS |
1873 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1874 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
1875 CLONE_SIGHAND | CLONE_THREAD |
1876 CLONE_SYSVSEM;
1877 const Arg<unsigned long> flags(0);
1878 return If(flags == kGlibcCloneMask ||
1879 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) ||
1880 flags == kBaseAndroidCloneMask,
1881 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask"));
1884 return Allow();
1887 class PthreadPolicyBitMask : public Policy {
1888 public:
1889 PthreadPolicyBitMask() {}
1890 ~PthreadPolicyBitMask() override {}
1892 ResultExpr EvaluateSyscall(int sysno) const override;
1894 private:
1895 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits);
1896 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits);
1898 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask);
1901 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg,
1902 unsigned long bits) {
1903 return (arg & bits) != 0;
1906 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg,
1907 unsigned long bits) {
1908 return (arg & bits) == bits;
1911 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const {
1912 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1913 // This policy allows creating threads with pthread_create(). But it
1914 // doesn't allow any other uses of clone(). Most notably, it does not
1915 // allow callers to implement fork() or vfork() by passing suitable flags
1916 // to the clone() system call.
1917 if (sysno == __NR_clone) {
1918 // We have seen two different valid combinations of flags. Glibc
1919 // uses the more modern flags, sets the TLS from the call to clone(), and
1920 // uses futexes to monitor threads. Android's C run-time library, doesn't
1921 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1922 // The following policy allows for either combination of flags, but it
1923 // is generally a little more conservative than strictly necessary. We
1924 // err on the side of rather safe than sorry.
1925 // Very noticeably though, we disallow fork() (which is often just a
1926 // wrapper around clone()).
1927 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES |
1928 CLONE_SIGHAND | CLONE_THREAD |
1929 CLONE_SYSVSEM;
1930 const unsigned long kFutexFlags =
1931 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1932 const unsigned long kNoopFlags = CLONE_DETACHED;
1933 const unsigned long kKnownFlags =
1934 kMandatoryFlags | kFutexFlags | kNoopFlags;
1936 const Arg<unsigned long> flags(0);
1937 return If(HasAnyBits(flags, ~kKnownFlags),
1938 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found"))
1939 .ElseIf(!HasAllBits(flags, kMandatoryFlags),
1940 Trap(PthreadTrapHandler,
1941 "Missing mandatory CLONE_XXX flags "
1942 "when creating new thread"))
1943 .ElseIf(
1944 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags),
1945 Trap(PthreadTrapHandler,
1946 "Must set either all or none of the TLS and futex bits in "
1947 "call to clone()"))
1948 .Else(Allow());
1951 return Allow();
1954 static void* ThreadFnc(void* arg) {
1955 ++*reinterpret_cast<int*>(arg);
1956 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
1957 return NULL;
1960 static void PthreadTest() {
1961 // Attempt to start a joinable thread. This should succeed.
1962 pthread_t thread;
1963 int thread_ran = 0;
1964 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran));
1965 BPF_ASSERT(!pthread_join(thread, NULL));
1966 BPF_ASSERT(thread_ran);
1968 // Attempt to start a detached thread. This should succeed.
1969 thread_ran = 0;
1970 pthread_attr_t attr;
1971 BPF_ASSERT(!pthread_attr_init(&attr));
1972 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1973 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
1974 BPF_ASSERT(!pthread_attr_destroy(&attr));
1975 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
1976 -EINTR) {
1978 BPF_ASSERT(thread_ran);
1980 // Attempt to fork() a process using clone(). This should fail. We use the
1981 // same flags that glibc uses when calling fork(). But we don't actually
1982 // try calling the fork() implementation in the C run-time library, as
1983 // run-time libraries other than glibc might call __NR_fork instead of
1984 // __NR_clone, and that would introduce a bogus test failure.
1985 int pid;
1986 BPF_ASSERT(Syscall::Call(__NR_clone,
1987 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
1990 &pid) == -EPERM);
1993 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) {
1994 PthreadTest();
1997 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) {
1998 PthreadTest();
2001 // libc might not define these even though the kernel supports it.
2002 #ifndef PTRACE_O_TRACESECCOMP
2003 #define PTRACE_O_TRACESECCOMP 0x00000080
2004 #endif
2006 #ifdef PTRACE_EVENT_SECCOMP
2007 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
2008 #else
2009 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they
2010 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by
2011 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If
2012 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both
2013 // values here.
2014 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8)
2015 #endif
2017 #if defined(__arm__)
2018 #ifndef PTRACE_SET_SYSCALL
2019 #define PTRACE_SET_SYSCALL 23
2020 #endif
2021 #endif
2023 #if defined(__aarch64__)
2024 #ifndef PTRACE_GETREGS
2025 #define PTRACE_GETREGS 12
2026 #endif
2027 #endif
2029 #if defined(__aarch64__)
2030 #ifndef PTRACE_SETREGS
2031 #define PTRACE_SETREGS 13
2032 #endif
2033 #endif
2035 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with
2036 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on
2037 // PTRACE_EVENT_SECCOMP.
2039 // regs should contain the current set of registers of the child, obtained using
2040 // PTRACE_GETREGS.
2042 // Depending on the architecture, this may modify regs, so the caller is
2043 // responsible for committing these changes using PTRACE_SETREGS.
2044 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) {
2045 #if defined(__arm__)
2046 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the
2047 // libc ptrace call as the request parameter is an enum, and
2048 // PTRACE_SET_SYSCALL may not be in the enum.
2049 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number);
2050 #endif
2052 SECCOMP_PT_SYSCALL(*regs) = syscall_number;
2053 return 0;
2056 const uint16_t kTraceData = 0xcc;
2058 class TraceAllPolicy : public Policy {
2059 public:
2060 TraceAllPolicy() {}
2061 ~TraceAllPolicy() override {}
2063 ResultExpr EvaluateSyscall(int system_call_number) const override {
2064 return Trace(kTraceData);
2067 private:
2068 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy);
2071 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) {
2072 if (SandboxBPF::SupportsSeccompSandbox(-1) !=
2073 sandbox::SandboxBPF::STATUS_AVAILABLE) {
2074 return;
2077 // This test is disabled on arm due to a kernel bug.
2078 // See https://code.google.com/p/chromium/issues/detail?id=383977
2079 #if defined(__arm__) || defined(__aarch64__)
2080 printf("This test is currently disabled on ARM32/64 due to a kernel bug.");
2081 return;
2082 #endif
2084 #if defined(__mips__)
2085 // TODO: Figure out how to support specificity of handling indirect syscalls
2086 // in this test and enable it.
2087 printf("This test is currently disabled on MIPS.");
2088 return;
2089 #endif
2091 pid_t pid = fork();
2092 BPF_ASSERT_NE(-1, pid);
2093 if (pid == 0) {
2094 pid_t my_pid = getpid();
2095 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL));
2096 BPF_ASSERT_EQ(0, raise(SIGSTOP));
2097 SandboxBPF sandbox;
2098 sandbox.SetSandboxPolicy(new TraceAllPolicy);
2099 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
2101 // getpid is allowed.
2102 BPF_ASSERT_EQ(my_pid, sys_getpid());
2104 // write to stdout is skipped and returns a fake value.
2105 BPF_ASSERT_EQ(kExpectedReturnValue,
2106 syscall(__NR_write, STDOUT_FILENO, "A", 1));
2108 // kill is rewritten to exit(kExpectedReturnValue).
2109 syscall(__NR_kill, my_pid, SIGKILL);
2111 // Should not be reached.
2112 BPF_ASSERT(false);
2115 int status;
2116 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1);
2117 BPF_ASSERT(WIFSTOPPED(status));
2119 BPF_ASSERT_NE(-1,
2120 ptrace(PTRACE_SETOPTIONS,
2121 pid,
2122 NULL,
2123 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP)));
2124 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2125 while (true) {
2126 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1);
2127 if (WIFEXITED(status) || WIFSIGNALED(status)) {
2128 BPF_ASSERT(WIFEXITED(status));
2129 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status));
2130 break;
2133 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
2134 !IS_SECCOMP_EVENT(status)) {
2135 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2136 continue;
2139 unsigned long data;
2140 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data));
2141 BPF_ASSERT_EQ(kTraceData, data);
2143 regs_struct regs;
2144 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, &regs));
2145 switch (SECCOMP_PT_SYSCALL(regs)) {
2146 case __NR_write:
2147 // Skip writes to stdout, make it return kExpectedReturnValue. Allow
2148 // writes to stderr so that BPF_ASSERT messages show up.
2149 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) {
2150 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, -1));
2151 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue;
2152 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2154 break;
2156 case __NR_kill:
2157 // Rewrite to exit(kExpectedReturnValue).
2158 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, __NR_exit));
2159 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue;
2160 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2161 break;
2163 default:
2164 // Allow all other syscalls.
2165 break;
2168 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2172 // Android does not expose pread64 nor pwrite64.
2173 #if !defined(OS_ANDROID)
2175 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) {
2176 while (count > 0) {
2177 const ssize_t transfered =
2178 HANDLE_EINTR(pwrite64(fd, buffer, count, offset));
2179 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2180 return false;
2182 count -= transfered;
2183 buffer += transfered;
2184 offset += transfered;
2186 return true;
2189 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) {
2190 while (count > 0) {
2191 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset));
2192 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2193 return false;
2195 count -= transfered;
2196 buffer += transfered;
2197 offset += transfered;
2199 return true;
2202 bool pread_64_was_forwarded = false;
2204 class TrapPread64Policy : public Policy {
2205 public:
2206 TrapPread64Policy() {}
2207 ~TrapPread64Policy() override {}
2209 ResultExpr EvaluateSyscall(int system_call_number) const override {
2210 // Set the global environment for unsafe traps once.
2211 if (system_call_number == MIN_SYSCALL) {
2212 EnableUnsafeTraps();
2215 if (system_call_number == __NR_pread64) {
2216 return UnsafeTrap(ForwardPreadHandler, NULL);
2218 return Allow();
2221 private:
2222 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args,
2223 void* aux) {
2224 BPF_ASSERT(args.nr == __NR_pread64);
2225 pread_64_was_forwarded = true;
2227 return SandboxBPF::ForwardSyscall(args);
2230 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy);
2233 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split
2234 // between two arguments. In this test, we make sure that ForwardSyscall() can
2235 // forward it properly.
2236 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) {
2237 ScopedTemporaryFile temp_file;
2238 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF;
2239 const char kTestString[] = "This is a test!";
2240 BPF_ASSERT(FullPwrite64(
2241 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset));
2243 char read_test_string[sizeof(kTestString)] = {0};
2244 BPF_ASSERT(FullPread64(temp_file.fd(),
2245 read_test_string,
2246 sizeof(read_test_string),
2247 kLargeOffset));
2248 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString)));
2249 BPF_ASSERT(pread_64_was_forwarded);
2252 #endif // !defined(OS_ANDROID)
2254 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) {
2255 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr);
2257 // Wait for the main thread to signal that the filter has been applied.
2258 if (!event->IsSignaled()) {
2259 event->Wait();
2262 BPF_ASSERT(event->IsSignaled());
2264 BlacklistNanosleepPolicy::AssertNanosleepFails();
2266 return NULL;
2269 SANDBOX_TEST(SandboxBPF, Tsync) {
2270 if (SandboxBPF::SupportsSeccompThreadFilterSynchronization() !=
2271 SandboxBPF::STATUS_AVAILABLE) {
2272 return;
2275 base::WaitableEvent event(true, false);
2277 // Create a thread on which to invoke the blocked syscall.
2278 pthread_t thread;
2279 BPF_ASSERT_EQ(
2280 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event));
2282 // Test that nanoseelp success.
2283 const struct timespec ts = {0, 0};
2284 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
2286 // Engage the sandbox.
2287 SandboxBPF sandbox;
2288 sandbox.SetSandboxPolicy(new BlacklistNanosleepPolicy());
2289 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED));
2291 // This thread should have the filter applied as well.
2292 BlacklistNanosleepPolicy::AssertNanosleepFails();
2294 // Signal the condition to invoke the system call.
2295 event.Signal();
2297 // Wait for the thread to finish.
2298 BPF_ASSERT_EQ(0, pthread_join(thread, NULL));
2301 class AllowAllPolicy : public Policy {
2302 public:
2303 AllowAllPolicy() {}
2304 ~AllowAllPolicy() override {}
2306 ResultExpr EvaluateSyscall(int sysno) const override { return Allow(); }
2308 private:
2309 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy);
2312 SANDBOX_DEATH_TEST(
2313 SandboxBPF,
2314 StartMultiThreadedAsSingleThreaded,
2315 DEATH_MESSAGE("Cannot start sandbox; process is already multi-threaded")) {
2316 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded");
2317 BPF_ASSERT(thread.Start());
2319 SandboxBPF sandbox;
2320 sandbox.SetSandboxPolicy(new AllowAllPolicy());
2321 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
2324 // http://crbug.com/407357
2325 #if !defined(THREAD_SANITIZER)
2326 SANDBOX_DEATH_TEST(
2327 SandboxBPF,
2328 StartSingleThreadedAsMultiThreaded,
2329 DEATH_MESSAGE(
2330 "Cannot start sandbox; process may be single-threaded when "
2331 "reported as not")) {
2332 SandboxBPF sandbox;
2333 sandbox.SetSandboxPolicy(new AllowAllPolicy());
2334 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED));
2336 #endif // !defined(THREAD_SANITIZER)
2338 // A stub handler for the UnsafeTrap. Never called.
2339 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) {
2340 return -1;
2343 class UnsafeTrapWithCondPolicy : public Policy {
2344 public:
2345 UnsafeTrapWithCondPolicy() {}
2346 ~UnsafeTrapWithCondPolicy() override {}
2348 ResultExpr EvaluateSyscall(int sysno) const override {
2349 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
2350 setenv(kSandboxDebuggingEnv, "t", 0);
2351 Die::SuppressInfoMessages(true);
2353 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
2354 return Allow();
2356 switch (sysno) {
2357 case __NR_uname: {
2358 const Arg<uint32_t> arg(0);
2359 return If(arg == 0, Allow()).Else(Error(EPERM));
2361 case __NR_setgid: {
2362 const Arg<uint32_t> arg(0);
2363 return Switch(arg)
2364 .Case(100, Error(ENOMEM))
2365 .Case(200, Error(ENOSYS))
2366 .Default(Error(EPERM));
2368 case __NR_close:
2369 case __NR_exit_group:
2370 case __NR_write:
2371 return Allow();
2372 case __NR_getppid:
2373 return UnsafeTrap(NoOpHandler, NULL);
2374 default:
2375 return Error(EPERM);
2379 private:
2380 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy);
2383 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) {
2384 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0));
2385 BPF_ASSERT_EQ(EFAULT, errno);
2387 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1));
2388 BPF_ASSERT_EQ(EPERM, errno);
2390 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100));
2391 BPF_ASSERT_EQ(ENOMEM, errno);
2393 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200));
2394 BPF_ASSERT_EQ(ENOSYS, errno);
2396 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300));
2397 BPF_ASSERT_EQ(EPERM, errno);
2400 } // namespace
2402 } // namespace bpf_dsl
2403 } // namespace sandbox