Update V8 to version 4.6.61.
[chromium-blink-merge.git] / sandbox / linux / integration_tests / bpf_dsl_seccomp_unittest.cc
blobe884774146d58738b5cbb4342fc4cb4508081e75
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <pthread.h>
8 #include <sched.h>
9 #include <signal.h>
10 #include <sys/prctl.h>
11 #include <sys/ptrace.h>
12 #include <sys/syscall.h>
13 #include <sys/time.h>
14 #include <sys/types.h>
15 #include <sys/utsname.h>
16 #include <unistd.h>
17 #include <sys/socket.h>
19 #if defined(ANDROID)
20 // Work-around for buggy headers in Android's NDK
21 #define __user
22 #endif
23 #include <linux/futex.h>
25 #include "base/bind.h"
26 #include "base/logging.h"
27 #include "base/macros.h"
28 #include "base/memory/scoped_ptr.h"
29 #include "base/posix/eintr_wrapper.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/threading/thread.h"
33 #include "build/build_config.h"
34 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
35 #include "sandbox/linux/bpf_dsl/linux_syscall_ranges.h"
36 #include "sandbox/linux/bpf_dsl/policy.h"
37 #include "sandbox/linux/bpf_dsl/seccomp_macros.h"
38 #include "sandbox/linux/seccomp-bpf/bpf_tests.h"
39 #include "sandbox/linux/seccomp-bpf/die.h"
40 #include "sandbox/linux/seccomp-bpf/errorcode.h"
41 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
42 #include "sandbox/linux/seccomp-bpf/syscall.h"
43 #include "sandbox/linux/seccomp-bpf/trap.h"
44 #include "sandbox/linux/services/syscall_wrappers.h"
45 #include "sandbox/linux/services/thread_helpers.h"
46 #include "sandbox/linux/system_headers/linux_syscalls.h"
47 #include "sandbox/linux/tests/scoped_temporary_file.h"
48 #include "sandbox/linux/tests/unit_tests.h"
49 #include "testing/gtest/include/gtest/gtest.h"
51 // Workaround for Android's prctl.h file.
52 #ifndef PR_GET_ENDIAN
53 #define PR_GET_ENDIAN 19
54 #endif
55 #ifndef PR_CAPBSET_READ
56 #define PR_CAPBSET_READ 23
57 #define PR_CAPBSET_DROP 24
58 #endif
60 namespace sandbox {
61 namespace bpf_dsl {
63 namespace {
65 const int kExpectedReturnValue = 42;
66 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
68 // Set the global environment to allow the use of UnsafeTrap() policies.
69 void EnableUnsafeTraps() {
70 // The use of UnsafeTrap() causes us to print a warning message. This is
71 // generally desirable, but it results in the unittest failing, as it doesn't
72 // expect any messages on "stderr". So, temporarily disable messages. The
73 // BPF_TEST() is guaranteed to turn messages back on, after the policy
74 // function has completed.
75 setenv(kSandboxDebuggingEnv, "t", 0);
76 Die::SuppressInfoMessages(true);
79 // BPF_TEST does a lot of the boiler-plate code around setting up a
80 // policy and optional passing data between the caller, the policy and
81 // any Trap() handlers. This is great for writing short and concise tests,
82 // and it helps us accidentally forgetting any of the crucial steps in
83 // setting up the sandbox. But it wouldn't hurt to have at least one test
84 // that explicitly walks through all these steps.
86 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) {
87 BPF_ASSERT(aux);
88 int* counter = static_cast<int*>(aux);
89 return (*counter)++;
92 class VerboseAPITestingPolicy : public Policy {
93 public:
94 explicit VerboseAPITestingPolicy(int* counter_ptr)
95 : counter_ptr_(counter_ptr) {}
96 ~VerboseAPITestingPolicy() override {}
98 ResultExpr EvaluateSyscall(int sysno) const override {
99 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
100 if (sysno == __NR_uname) {
101 return Trap(IncreaseCounter, counter_ptr_);
103 return Allow();
106 private:
107 int* counter_ptr_;
109 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy);
112 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) {
113 if (SandboxBPF::SupportsSeccompSandbox(
114 SandboxBPF::SeccompLevel::SINGLE_THREADED)) {
115 static int counter = 0;
117 SandboxBPF sandbox(new VerboseAPITestingPolicy(&counter));
118 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
120 BPF_ASSERT_EQ(0, counter);
121 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0));
122 BPF_ASSERT_EQ(1, counter);
123 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0));
124 BPF_ASSERT_EQ(2, counter);
128 // A simple blacklist test
130 class BlacklistNanosleepPolicy : public Policy {
131 public:
132 BlacklistNanosleepPolicy() {}
133 ~BlacklistNanosleepPolicy() override {}
135 ResultExpr EvaluateSyscall(int sysno) const override {
136 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
137 switch (sysno) {
138 case __NR_nanosleep:
139 return Error(EACCES);
140 default:
141 return Allow();
145 static void AssertNanosleepFails() {
146 const struct timespec ts = {0, 0};
147 errno = 0;
148 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
149 BPF_ASSERT_EQ(EACCES, errno);
152 private:
153 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy);
156 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) {
157 BlacklistNanosleepPolicy::AssertNanosleepFails();
160 BPF_TEST_C(SandboxBPF, UseVsyscall, BlacklistNanosleepPolicy) {
161 time_t current_time;
162 // time() is implemented as a vsyscall. With an older glibc, with
163 // vsyscall=emulate and some versions of the seccomp BPF patch
164 // we may get SIGKILL-ed. Detect this!
165 BPF_ASSERT_NE(static_cast<time_t>(-1), time(&current_time));
168 // Now do a simple whitelist test
170 class WhitelistGetpidPolicy : public Policy {
171 public:
172 WhitelistGetpidPolicy() {}
173 ~WhitelistGetpidPolicy() override {}
175 ResultExpr EvaluateSyscall(int sysno) const override {
176 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
177 switch (sysno) {
178 case __NR_getpid:
179 case __NR_exit_group:
180 return Allow();
181 default:
182 return Error(ENOMEM);
186 private:
187 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy);
190 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) {
191 // getpid() should be allowed
192 errno = 0;
193 BPF_ASSERT(sys_getpid() > 0);
194 BPF_ASSERT(errno == 0);
196 // getpgid() should be denied
197 BPF_ASSERT(getpgid(0) == -1);
198 BPF_ASSERT(errno == ENOMEM);
201 // A simple blacklist policy, with a SIGSYS handler
202 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
203 // We also check that the auxiliary data is correct
204 SANDBOX_ASSERT(aux);
205 *(static_cast<int*>(aux)) = kExpectedReturnValue;
206 return -ENOMEM;
209 class BlacklistNanosleepTrapPolicy : public Policy {
210 public:
211 explicit BlacklistNanosleepTrapPolicy(int* aux) : aux_(aux) {}
212 ~BlacklistNanosleepTrapPolicy() override {}
214 ResultExpr EvaluateSyscall(int sysno) const override {
215 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
216 switch (sysno) {
217 case __NR_nanosleep:
218 return Trap(EnomemHandler, aux_);
219 default:
220 return Allow();
224 private:
225 int* aux_;
227 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepTrapPolicy);
230 BPF_TEST(SandboxBPF,
231 BasicBlacklistWithSigsys,
232 BlacklistNanosleepTrapPolicy,
233 int /* (*BPF_AUX) */) {
234 // getpid() should work properly
235 errno = 0;
236 BPF_ASSERT(sys_getpid() > 0);
237 BPF_ASSERT(errno == 0);
239 // Our Auxiliary Data, should be reset by the signal handler
240 *BPF_AUX = -1;
241 const struct timespec ts = {0, 0};
242 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
243 BPF_ASSERT(errno == ENOMEM);
245 // We expect the signal handler to modify AuxData
246 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue);
249 // A simple test that verifies we can return arbitrary errno values.
251 class ErrnoTestPolicy : public Policy {
252 public:
253 ErrnoTestPolicy() {}
254 ~ErrnoTestPolicy() override {}
256 ResultExpr EvaluateSyscall(int sysno) const override;
258 private:
259 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy);
262 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const {
263 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
264 switch (sysno) {
265 case __NR_dup3: // dup2 is a wrapper of dup3 in android
266 #if defined(__NR_dup2)
267 case __NR_dup2:
268 #endif
269 // Pretend that dup2() worked, but don't actually do anything.
270 return Error(0);
271 case __NR_setuid:
272 #if defined(__NR_setuid32)
273 case __NR_setuid32:
274 #endif
275 // Return errno = 1.
276 return Error(1);
277 case __NR_setgid:
278 #if defined(__NR_setgid32)
279 case __NR_setgid32:
280 #endif
281 // Return maximum errno value (typically 4095).
282 return Error(ErrorCode::ERR_MAX_ERRNO);
283 case __NR_uname:
284 // Return errno = 42;
285 return Error(42);
286 default:
287 return Allow();
291 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) {
292 // Verify that dup2() returns success, but doesn't actually run.
293 int fds[4];
294 BPF_ASSERT(pipe(fds) == 0);
295 BPF_ASSERT(pipe(fds + 2) == 0);
296 BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
297 char buf[1] = {};
298 BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
299 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
300 BPF_ASSERT(read(fds[0], buf, 1) == 1);
302 // If dup2() executed, we will read \xAA, but it dup2() has been turned
303 // into a no-op by our policy, then we will read \x55.
304 BPF_ASSERT(buf[0] == '\x55');
306 // Verify that we can return the minimum and maximum errno values.
307 errno = 0;
308 BPF_ASSERT(setuid(0) == -1);
309 BPF_ASSERT(errno == 1);
311 // On Android, errno is only supported up to 255, otherwise errno
312 // processing is skipped.
313 // We work around this (crbug.com/181647).
314 if (sandbox::IsAndroid() && setgid(0) != -1) {
315 errno = 0;
316 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO);
317 BPF_ASSERT(errno == 0);
318 } else {
319 errno = 0;
320 BPF_ASSERT(setgid(0) == -1);
321 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO);
324 // Finally, test an errno in between the minimum and maximum.
325 errno = 0;
326 struct utsname uts_buf;
327 BPF_ASSERT(uname(&uts_buf) == -1);
328 BPF_ASSERT(errno == 42);
331 // Testing the stacking of two sandboxes
333 class StackingPolicyPartOne : public Policy {
334 public:
335 StackingPolicyPartOne() {}
336 ~StackingPolicyPartOne() override {}
338 ResultExpr EvaluateSyscall(int sysno) const override {
339 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
340 switch (sysno) {
341 case __NR_getppid: {
342 const Arg<int> arg(0);
343 return If(arg == 0, Allow()).Else(Error(EPERM));
345 default:
346 return Allow();
350 private:
351 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne);
354 class StackingPolicyPartTwo : public Policy {
355 public:
356 StackingPolicyPartTwo() {}
357 ~StackingPolicyPartTwo() override {}
359 ResultExpr EvaluateSyscall(int sysno) const override {
360 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
361 switch (sysno) {
362 case __NR_getppid: {
363 const Arg<int> arg(0);
364 return If(arg == 0, Error(EINVAL)).Else(Allow());
366 default:
367 return Allow();
371 private:
372 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo);
375 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) {
376 errno = 0;
377 BPF_ASSERT(syscall(__NR_getppid, 0) > 0);
378 BPF_ASSERT(errno == 0);
380 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
381 BPF_ASSERT(errno == EPERM);
383 // Stack a second sandbox with its own policy. Verify that we can further
384 // restrict filters, but we cannot relax existing filters.
385 SandboxBPF sandbox(new StackingPolicyPartTwo());
386 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
388 errno = 0;
389 BPF_ASSERT(syscall(__NR_getppid, 0) == -1);
390 BPF_ASSERT(errno == EINVAL);
392 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
393 BPF_ASSERT(errno == EPERM);
396 // A more complex, but synthetic policy. This tests the correctness of the BPF
397 // program by iterating through all syscalls and checking for an errno that
398 // depends on the syscall number. Unlike the Verifier, this exercises the BPF
399 // interpreter in the kernel.
401 // We try to make sure we exercise optimizations in the BPF compiler. We make
402 // sure that the compiler can have an opportunity to coalesce syscalls with
403 // contiguous numbers and we also make sure that disjoint sets can return the
404 // same errno.
405 int SysnoToRandomErrno(int sysno) {
406 // Small contiguous sets of 3 system calls return an errno equal to the
407 // index of that set + 1 (so that we never return a NUL errno).
408 return ((sysno & ~3) >> 2) % 29 + 1;
411 class SyntheticPolicy : public Policy {
412 public:
413 SyntheticPolicy() {}
414 ~SyntheticPolicy() override {}
416 ResultExpr EvaluateSyscall(int sysno) const override {
417 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
418 if (sysno == __NR_exit_group || sysno == __NR_write) {
419 // exit_group() is special, we really need it to work.
420 // write() is needed for BPF_ASSERT() to report a useful error message.
421 return Allow();
423 return Error(SysnoToRandomErrno(sysno));
426 private:
427 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy);
430 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) {
431 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
432 // overflow.
433 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
434 static_cast<int>(MAX_PUBLIC_SYSCALL));
436 for (int syscall_number = static_cast<int>(MIN_SYSCALL);
437 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
438 ++syscall_number) {
439 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) {
440 // exit_group() is special
441 continue;
443 errno = 0;
444 BPF_ASSERT(syscall(syscall_number) == -1);
445 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number));
449 #if defined(__arm__)
450 // A simple policy that tests whether ARM private system calls are supported
451 // by our BPF compiler and by the BPF interpreter in the kernel.
453 // For ARM private system calls, return an errno equal to their offset from
454 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno).
455 int ArmPrivateSysnoToErrno(int sysno) {
456 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) &&
457 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
458 return (sysno - MIN_PRIVATE_SYSCALL) + 1;
459 } else {
460 return ENOSYS;
464 class ArmPrivatePolicy : public Policy {
465 public:
466 ArmPrivatePolicy() {}
467 ~ArmPrivatePolicy() override {}
469 ResultExpr EvaluateSyscall(int sysno) const override {
470 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
471 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual
472 // ARM private system calls.
473 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) &&
474 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
475 return Error(ArmPrivateSysnoToErrno(sysno));
477 return Allow();
480 private:
481 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy);
484 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) {
485 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
486 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
487 ++syscall_number) {
488 errno = 0;
489 BPF_ASSERT(syscall(syscall_number) == -1);
490 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
493 #endif // defined(__arm__)
495 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
496 // Count all invocations of our callback function.
497 ++*reinterpret_cast<int*>(aux);
499 // Verify that within the callback function all filtering is temporarily
500 // disabled.
501 BPF_ASSERT(sys_getpid() > 1);
503 // Verify that we can now call the underlying system call without causing
504 // infinite recursion.
505 return SandboxBPF::ForwardSyscall(args);
508 class GreyListedPolicy : public Policy {
509 public:
510 explicit GreyListedPolicy(int* aux) : aux_(aux) {
511 // Set the global environment for unsafe traps once.
512 EnableUnsafeTraps();
514 ~GreyListedPolicy() override {}
516 ResultExpr EvaluateSyscall(int sysno) const override {
517 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
518 // Some system calls must always be allowed, if our policy wants to make
519 // use of UnsafeTrap()
520 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) {
521 return Allow();
522 } else if (sysno == __NR_getpid) {
523 // Disallow getpid()
524 return Error(EPERM);
525 } else {
526 // Allow (and count) all other system calls.
527 return UnsafeTrap(CountSyscalls, aux_);
531 private:
532 int* aux_;
534 DISALLOW_COPY_AND_ASSIGN(GreyListedPolicy);
537 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) {
538 BPF_ASSERT(sys_getpid() == -1);
539 BPF_ASSERT(errno == EPERM);
540 BPF_ASSERT(*BPF_AUX == 0);
541 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
542 BPF_ASSERT(*BPF_AUX == 2);
543 char name[17] = {};
544 BPF_ASSERT(!syscall(__NR_prctl,
545 PR_GET_NAME,
546 name,
547 (void*)NULL,
548 (void*)NULL,
549 (void*)NULL));
550 BPF_ASSERT(*BPF_AUX == 3);
551 BPF_ASSERT(*name);
554 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) {
555 // Disabling warning messages that could confuse our test framework.
556 setenv(kSandboxDebuggingEnv, "t", 0);
557 Die::SuppressInfoMessages(true);
559 unsetenv(kSandboxDebuggingEnv);
560 SANDBOX_ASSERT(Trap::Registry()->EnableUnsafeTraps() == false);
561 setenv(kSandboxDebuggingEnv, "", 1);
562 SANDBOX_ASSERT(Trap::Registry()->EnableUnsafeTraps() == false);
563 setenv(kSandboxDebuggingEnv, "t", 1);
564 SANDBOX_ASSERT(Trap::Registry()->EnableUnsafeTraps() == true);
567 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
568 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
569 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
570 // return an error. But our handler allows this call.
571 return 0;
572 } else {
573 return SandboxBPF::ForwardSyscall(args);
577 class PrctlPolicy : public Policy {
578 public:
579 PrctlPolicy() {}
580 ~PrctlPolicy() override {}
582 ResultExpr EvaluateSyscall(int sysno) const override {
583 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
584 setenv(kSandboxDebuggingEnv, "t", 0);
585 Die::SuppressInfoMessages(true);
587 if (sysno == __NR_prctl) {
588 // Handle prctl() inside an UnsafeTrap()
589 return UnsafeTrap(PrctlHandler, NULL);
592 // Allow all other system calls.
593 return Allow();
596 private:
597 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy);
600 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) {
601 // This call should never be allowed. But our policy will intercept it and
602 // let it pass successfully.
603 BPF_ASSERT(
604 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
606 // Verify that the call will fail, if it makes it all the way to the kernel.
607 BPF_ASSERT(
608 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
610 // And verify that other uses of prctl() work just fine.
611 char name[17] = {};
612 BPF_ASSERT(!syscall(__NR_prctl,
613 PR_GET_NAME,
614 name,
615 (void*)NULL,
616 (void*)NULL,
617 (void*)NULL));
618 BPF_ASSERT(*name);
620 // Finally, verify that system calls other than prctl() are completely
621 // unaffected by our policy.
622 struct utsname uts = {};
623 BPF_ASSERT(!uname(&uts));
624 BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
627 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
628 return SandboxBPF::ForwardSyscall(args);
631 class RedirectAllSyscallsPolicy : public Policy {
632 public:
633 RedirectAllSyscallsPolicy() {}
634 ~RedirectAllSyscallsPolicy() override {}
636 ResultExpr EvaluateSyscall(int sysno) const override;
638 private:
639 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy);
642 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const {
643 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
644 setenv(kSandboxDebuggingEnv, "t", 0);
645 Die::SuppressInfoMessages(true);
647 // Some system calls must always be allowed, if our policy wants to make
648 // use of UnsafeTrap()
649 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
650 return Allow();
651 return UnsafeTrap(AllowRedirectedSyscall, NULL);
654 #if !defined(ADDRESS_SANITIZER)
655 // ASan does not allow changing the signal handler for SIGBUS, and treats it as
656 // a fatal signal.
658 int bus_handler_fd_ = -1;
660 void SigBusHandler(int, siginfo_t* info, void* void_context) {
661 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
664 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) {
665 // We use the SIGBUS bit in the signal mask as a thread-local boolean
666 // value in the implementation of UnsafeTrap(). This is obviously a bit
667 // of a hack that could conceivably interfere with code that uses SIGBUS
668 // in more traditional ways. This test verifies that basic functionality
669 // of SIGBUS is not impacted, but it is certainly possibly to construe
670 // more complex uses of signals where our use of the SIGBUS mask is not
671 // 100% transparent. This is expected behavior.
672 int fds[2];
673 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
674 bus_handler_fd_ = fds[1];
675 struct sigaction sa = {};
676 sa.sa_sigaction = SigBusHandler;
677 sa.sa_flags = SA_SIGINFO;
678 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
679 raise(SIGBUS);
680 char c = '\000';
681 BPF_ASSERT(read(fds[0], &c, 1) == 1);
682 BPF_ASSERT(close(fds[0]) == 0);
683 BPF_ASSERT(close(fds[1]) == 0);
684 BPF_ASSERT(c == 0x55);
686 #endif // !defined(ADDRESS_SANITIZER)
688 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) {
689 // Signal masks are potentially tricky to handle. For instance, if we
690 // ever tried to update them from inside a Trap() or UnsafeTrap() handler,
691 // the call to sigreturn() at the end of the signal handler would undo
692 // all of our efforts. So, it makes sense to test that sigprocmask()
693 // works, even if we have a policy in place that makes use of UnsafeTrap().
694 // In practice, this works because we force sigprocmask() to be handled
695 // entirely in the kernel.
696 sigset_t mask0, mask1, mask2;
698 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't
699 // change the mask (it shouldn't have been, as it isn't blocked by default
700 // in POSIX).
702 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose.
703 sigemptyset(&mask0);
704 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1));
705 BPF_ASSERT(!sigismember(&mask1, SIGUSR2));
707 // Try again, and this time we verify that we can block it. This
708 // requires a second call to sigprocmask().
709 sigaddset(&mask0, SIGUSR2);
710 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL));
711 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2));
712 BPF_ASSERT(sigismember(&mask2, SIGUSR2));
715 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
716 // An UnsafeTrap() (or for that matter, a Trap()) has to report error
717 // conditions by returning an exit code in the range -1..-4096. This
718 // should happen automatically if using ForwardSyscall(). If the TrapFnc()
719 // uses some other method to make system calls, then it is responsible
720 // for computing the correct return code.
721 // This test verifies that ForwardSyscall() does the correct thing.
723 // The glibc system wrapper will ultimately set errno for us. So, from normal
724 // userspace, all of this should be completely transparent.
725 errno = 0;
726 BPF_ASSERT(close(-1) == -1);
727 BPF_ASSERT(errno == EBADF);
729 // Explicitly avoid the glibc wrapper. This is not normally the way anybody
730 // would make system calls, but it allows us to verify that we don't
731 // accidentally mess with errno, when we shouldn't.
732 errno = 0;
733 struct arch_seccomp_data args = {};
734 args.nr = __NR_close;
735 args.args[0] = -1;
736 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF);
737 BPF_ASSERT(errno == 0);
740 // Simple test demonstrating how to use SandboxBPF::Cond()
742 class SimpleCondTestPolicy : public Policy {
743 public:
744 SimpleCondTestPolicy() {}
745 ~SimpleCondTestPolicy() override {}
747 ResultExpr EvaluateSyscall(int sysno) const override;
749 private:
750 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy);
753 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const {
754 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
756 // We deliberately return unusual errno values upon failure, so that we
757 // can uniquely test for these values. In a "real" policy, you would want
758 // to return more traditional values.
759 int flags_argument_position = -1;
760 switch (sysno) {
761 #if defined(__NR_open)
762 case __NR_open:
763 flags_argument_position = 1;
764 #endif
765 case __NR_openat: { // open can be a wrapper for openat(2).
766 if (sysno == __NR_openat)
767 flags_argument_position = 2;
769 // Allow opening files for reading, but don't allow writing.
770 static_assert(O_RDONLY == 0, "O_RDONLY must be all zero bits");
771 const Arg<int> flags(flags_argument_position);
772 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow());
774 case __NR_prctl: {
775 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
776 // disallow everything else.
777 const Arg<int> option(0);
778 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow())
779 .Else(Error(ENOMEM));
781 default:
782 return Allow();
786 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) {
787 int fd;
788 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1);
789 BPF_ASSERT(errno == EROFS);
790 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0);
791 close(fd);
793 int ret;
794 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
795 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
796 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
797 BPF_ASSERT(errno == ENOMEM);
800 // This test exercises the SandboxBPF::Cond() method by building a complex
801 // tree of conditional equality operations. It then makes system calls and
802 // verifies that they return the values that we expected from our BPF
803 // program.
804 class EqualityStressTest {
805 public:
806 EqualityStressTest() {
807 // We want a deterministic test
808 srand(0);
810 // Iterates over system call numbers and builds a random tree of
811 // equality tests.
812 // We are actually constructing a graph of ArgValue objects. This
813 // graph will later be used to a) compute our sandbox policy, and
814 // b) drive the code that verifies the output from the BPF program.
815 static_assert(
816 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
817 "kNumTestCases must be significantly smaller than the number "
818 "of system calls");
819 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
820 if (IsReservedSyscall(sysno)) {
821 // Skip reserved system calls. This ensures that our test frame
822 // work isn't impacted by the fact that we are overriding
823 // a lot of different system calls.
824 ++end;
825 arg_values_.push_back(NULL);
826 } else {
827 arg_values_.push_back(
828 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
833 ~EqualityStressTest() {
834 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
835 iter != arg_values_.end();
836 ++iter) {
837 DeleteArgValue(*iter);
841 ResultExpr Policy(int sysno) {
842 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
843 if (sysno < 0 || sysno >= (int)arg_values_.size() ||
844 IsReservedSyscall(sysno)) {
845 // We only return ErrorCode values for the system calls that
846 // are part of our test data. Every other system call remains
847 // allowed.
848 return Allow();
849 } else {
850 // ToErrorCode() turns an ArgValue object into an ErrorCode that is
851 // suitable for use by a sandbox policy.
852 return ToErrorCode(arg_values_[sysno]);
856 void VerifyFilter() {
857 // Iterate over all system calls. Skip the system calls that have
858 // previously been determined as being reserved.
859 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) {
860 if (!arg_values_[sysno]) {
861 // Skip reserved system calls.
862 continue;
864 // Verify that system calls return the values that we expect them to
865 // return. This involves passing different combinations of system call
866 // parameters in order to exercise all possible code paths through the
867 // BPF filter program.
868 // We arbitrarily start by setting all six system call arguments to
869 // zero. And we then recursive traverse our tree of ArgValues to
870 // determine the necessary combinations of parameters.
871 intptr_t args[6] = {};
872 Verify(sysno, args, *arg_values_[sysno]);
876 private:
877 struct ArgValue {
878 int argno; // Argument number to inspect.
879 int size; // Number of test cases (must be > 0).
880 struct Tests {
881 uint32_t k_value; // Value to compare syscall arg against.
882 int err; // If non-zero, errno value to return.
883 struct ArgValue* arg_value; // Otherwise, more args needs inspecting.
884 }* tests;
885 int err; // If none of the tests passed, this is what
886 struct ArgValue* arg_value; // we'll return (this is the "else" branch).
889 bool IsReservedSyscall(int sysno) {
890 // There are a handful of system calls that we should never use in our
891 // test cases. These system calls are needed to allow the test framework
892 // to run properly.
893 // If we wanted to write fully generic code, there are more system calls
894 // that could be listed here, and it is quite difficult to come up with a
895 // truly comprehensive list. After all, we are deliberately making system
896 // calls unavailable. In practice, we have a pretty good idea of the system
897 // calls that will be made by this particular test. So, this small list is
898 // sufficient. But if anybody copy'n'pasted this code for other uses, they
899 // would have to review that the list.
900 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
901 sysno == __NR_exit_group || sysno == __NR_restart_syscall;
904 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
905 // Create a new ArgValue and fill it with random data. We use as bit mask
906 // to keep track of the system call parameters that have previously been
907 // set; this ensures that we won't accidentally define a contradictory
908 // set of equality tests.
909 struct ArgValue* arg_value = new ArgValue();
910 args_mask |= 1 << argno;
911 arg_value->argno = argno;
913 // Apply some restrictions on just how complex our tests can be.
914 // Otherwise, we end up with a BPF program that is too complicated for
915 // the kernel to load.
916 int fan_out = kMaxFanOut;
917 if (remaining_args > 3) {
918 fan_out = 1;
919 } else if (remaining_args > 2) {
920 fan_out = 2;
923 // Create a couple of different test cases with randomized values that
924 // we want to use when comparing system call parameter number "argno".
925 arg_value->size = rand() % fan_out + 1;
926 arg_value->tests = new ArgValue::Tests[arg_value->size];
928 uint32_t k_value = rand();
929 for (int n = 0; n < arg_value->size; ++n) {
930 // Ensure that we have unique values
931 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
933 // There are two possible types of nodes. Either this is a leaf node;
934 // in that case, we have completed all the equality tests that we
935 // wanted to perform, and we can now compute a random "errno" value that
936 // we should return. Or this is part of a more complex boolean
937 // expression; in that case, we have to recursively add tests for some
938 // of system call parameters that we have not yet included in our
939 // tests.
940 arg_value->tests[n].k_value = k_value;
941 if (!remaining_args || (rand() & 1)) {
942 arg_value->tests[n].err = (rand() % 1000) + 1;
943 arg_value->tests[n].arg_value = NULL;
944 } else {
945 arg_value->tests[n].err = 0;
946 arg_value->tests[n].arg_value =
947 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
950 // Finally, we have to define what we should return if none of the
951 // previous equality tests pass. Again, we can either deal with a leaf
952 // node, or we can randomly add another couple of tests.
953 if (!remaining_args || (rand() & 1)) {
954 arg_value->err = (rand() % 1000) + 1;
955 arg_value->arg_value = NULL;
956 } else {
957 arg_value->err = 0;
958 arg_value->arg_value =
959 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
961 // We have now built a new (sub-)tree of ArgValues defining a set of
962 // boolean expressions for testing random system call arguments against
963 // random values. Return this tree to our caller.
964 return arg_value;
967 int RandomArg(int args_mask) {
968 // Compute a random system call parameter number.
969 int argno = rand() % kMaxArgs;
971 // Make sure that this same parameter number has not previously been
972 // used. Otherwise, we could end up with a test that is impossible to
973 // satisfy (e.g. args[0] == 1 && args[0] == 2).
974 while (args_mask & (1 << argno)) {
975 argno = (argno + 1) % kMaxArgs;
977 return argno;
980 void DeleteArgValue(ArgValue* arg_value) {
981 // Delete an ArgValue and all of its child nodes. This requires
982 // recursively descending into the tree.
983 if (arg_value) {
984 if (arg_value->size) {
985 for (int n = 0; n < arg_value->size; ++n) {
986 if (!arg_value->tests[n].err) {
987 DeleteArgValue(arg_value->tests[n].arg_value);
990 delete[] arg_value->tests;
992 if (!arg_value->err) {
993 DeleteArgValue(arg_value->arg_value);
995 delete arg_value;
999 ResultExpr ToErrorCode(ArgValue* arg_value) {
1000 // Compute the ResultExpr that should be returned, if none of our
1001 // tests succeed (i.e. the system call parameter doesn't match any
1002 // of the values in arg_value->tests[].k_value).
1003 ResultExpr err;
1004 if (arg_value->err) {
1005 // If this was a leaf node, return the errno value that we expect to
1006 // return from the BPF filter program.
1007 err = Error(arg_value->err);
1008 } else {
1009 // If this wasn't a leaf node yet, recursively descend into the rest
1010 // of the tree. This will end up adding a few more SandboxBPF::Cond()
1011 // tests to our ErrorCode.
1012 err = ToErrorCode(arg_value->arg_value);
1015 // Now, iterate over all the test cases that we want to compare against.
1016 // This builds a chain of SandboxBPF::Cond() tests
1017 // (aka "if ... elif ... elif ... elif ... fi")
1018 for (int n = arg_value->size; n-- > 0;) {
1019 ResultExpr matched;
1020 // Again, we distinguish between leaf nodes and subtrees.
1021 if (arg_value->tests[n].err) {
1022 matched = Error(arg_value->tests[n].err);
1023 } else {
1024 matched = ToErrorCode(arg_value->tests[n].arg_value);
1026 // For now, all of our tests are limited to 32bit.
1027 // We have separate tests that check the behavior of 32bit vs. 64bit
1028 // conditional expressions.
1029 const Arg<uint32_t> arg(arg_value->argno);
1030 err = If(arg == arg_value->tests[n].k_value, matched).Else(err);
1032 return err;
1035 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
1036 uint32_t mismatched = 0;
1037 // Iterate over all the k_values in arg_value.tests[] and verify that
1038 // we see the expected return values from system calls, when we pass
1039 // the k_value as a parameter in a system call.
1040 for (int n = arg_value.size; n-- > 0;) {
1041 mismatched += arg_value.tests[n].k_value;
1042 args[arg_value.argno] = arg_value.tests[n].k_value;
1043 if (arg_value.tests[n].err) {
1044 VerifyErrno(sysno, args, arg_value.tests[n].err);
1045 } else {
1046 Verify(sysno, args, *arg_value.tests[n].arg_value);
1049 // Find a k_value that doesn't match any of the k_values in
1050 // arg_value.tests[]. In most cases, the current value of "mismatched"
1051 // would fit this requirement. But on the off-chance that it happens
1052 // to collide, we double-check.
1053 try_again:
1054 for (int n = arg_value.size; n-- > 0;) {
1055 if (mismatched == arg_value.tests[n].k_value) {
1056 ++mismatched;
1057 goto try_again;
1060 // Now verify that we see the expected return value from system calls,
1061 // if we pass a value that doesn't match any of the conditions (i.e. this
1062 // is testing the "else" clause of the conditions).
1063 args[arg_value.argno] = mismatched;
1064 if (arg_value.err) {
1065 VerifyErrno(sysno, args, arg_value.err);
1066 } else {
1067 Verify(sysno, args, *arg_value.arg_value);
1069 // Reset args[arg_value.argno]. This is not technically needed, but it
1070 // makes it easier to reason about the correctness of our tests.
1071 args[arg_value.argno] = 0;
1074 void VerifyErrno(int sysno, intptr_t* args, int err) {
1075 // We installed BPF filters that return different errno values
1076 // based on the system call number and the parameters that we decided
1077 // to pass in. Verify that this condition holds true.
1078 BPF_ASSERT(
1079 Syscall::Call(
1080 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
1081 -err);
1084 // Vector of ArgValue trees. These trees define all the possible boolean
1085 // expressions that we want to turn into a BPF filter program.
1086 std::vector<ArgValue*> arg_values_;
1088 // Don't increase these values. We are pushing the limits of the maximum
1089 // BPF program that the kernel will allow us to load. If the values are
1090 // increased too much, the test will start failing.
1091 #if defined(__aarch64__)
1092 static const int kNumTestCases = 30;
1093 #else
1094 static const int kNumTestCases = 40;
1095 #endif
1096 static const int kMaxFanOut = 3;
1097 static const int kMaxArgs = 6;
1100 class EqualityStressTestPolicy : public Policy {
1101 public:
1102 explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {}
1103 ~EqualityStressTestPolicy() override {}
1105 ResultExpr EvaluateSyscall(int sysno) const override {
1106 return aux_->Policy(sysno);
1109 private:
1110 EqualityStressTest* aux_;
1112 DISALLOW_COPY_AND_ASSIGN(EqualityStressTestPolicy);
1115 BPF_TEST(SandboxBPF,
1116 EqualityTests,
1117 EqualityStressTestPolicy,
1118 EqualityStressTest /* (*BPF_AUX) */) {
1119 BPF_AUX->VerifyFilter();
1122 class EqualityArgumentWidthPolicy : public Policy {
1123 public:
1124 EqualityArgumentWidthPolicy() {}
1125 ~EqualityArgumentWidthPolicy() override {}
1127 ResultExpr EvaluateSyscall(int sysno) const override;
1129 private:
1130 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy);
1133 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const {
1134 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1135 if (sysno == __NR_uname) {
1136 const Arg<int> option(0);
1137 const Arg<uint32_t> arg32(1);
1138 const Arg<uint64_t> arg64(1);
1139 return Switch(option)
1140 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2)))
1141 #if __SIZEOF_POINTER__ > 4
1142 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2)))
1143 #endif
1144 .Default(Error(3));
1146 return Allow();
1149 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
1150 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1);
1151 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2);
1152 #if __SIZEOF_POINTER__ > 4
1153 // On 32bit machines, there is no way to pass a 64bit argument through the
1154 // syscall interface. So, we have to skip the part of the test that requires
1155 // 64bit arguments.
1156 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1);
1157 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2);
1158 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2);
1159 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2);
1160 #endif
1163 #if __SIZEOF_POINTER__ > 4
1164 // On 32bit machines, there is no way to pass a 64bit argument through the
1165 // syscall interface. So, we have to skip the part of the test that requires
1166 // 64bit arguments.
1167 BPF_DEATH_TEST_C(SandboxBPF,
1168 EqualityArgumentUnallowed64bit,
1169 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1170 EqualityArgumentWidthPolicy) {
1171 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL);
1173 #endif
1175 class EqualityWithNegativeArgumentsPolicy : public Policy {
1176 public:
1177 EqualityWithNegativeArgumentsPolicy() {}
1178 ~EqualityWithNegativeArgumentsPolicy() override {}
1180 ResultExpr EvaluateSyscall(int sysno) const override {
1181 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1182 if (sysno == __NR_uname) {
1183 // TODO(mdempsky): This currently can't be Arg<int> because then
1184 // 0xFFFFFFFF will be treated as a (signed) int, and then when
1185 // Arg::EqualTo casts it to uint64_t, it will be sign extended.
1186 const Arg<unsigned> arg(0);
1187 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2));
1189 return Allow();
1192 private:
1193 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy);
1196 BPF_TEST_C(SandboxBPF,
1197 EqualityWithNegativeArguments,
1198 EqualityWithNegativeArgumentsPolicy) {
1199 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1);
1200 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1);
1201 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1);
1204 #if __SIZEOF_POINTER__ > 4
1205 BPF_DEATH_TEST_C(SandboxBPF,
1206 EqualityWithNegative64bitArguments,
1207 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1208 EqualityWithNegativeArgumentsPolicy) {
1209 // When expecting a 32bit system call argument, we look at the MSB of the
1210 // 64bit value and allow both "0" and "-1". But the latter is allowed only
1211 // iff the LSB was negative. So, this death test should error out.
1212 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
1214 #endif
1216 class AllBitTestPolicy : public Policy {
1217 public:
1218 AllBitTestPolicy() {}
1219 ~AllBitTestPolicy() override {}
1221 ResultExpr EvaluateSyscall(int sysno) const override;
1223 private:
1224 static ResultExpr HasAllBits32(uint32_t bits);
1225 static ResultExpr HasAllBits64(uint64_t bits);
1227 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy);
1230 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) {
1231 if (bits == 0) {
1232 return Error(1);
1234 const Arg<uint32_t> arg(1);
1235 return If((arg & bits) == bits, Error(1)).Else(Error(0));
1238 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) {
1239 if (bits == 0) {
1240 return Error(1);
1242 const Arg<uint64_t> arg(1);
1243 return If((arg & bits) == bits, Error(1)).Else(Error(0));
1246 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const {
1247 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1248 // Test masked-equality cases that should trigger the "has all bits"
1249 // peephole optimizations. We try to find bitmasks that could conceivably
1250 // touch corner cases.
1251 // For all of these tests, we override the uname(). We can make use with
1252 // a single system call number, as we use the first system call argument to
1253 // select the different bit masks that we want to test against.
1254 if (sysno == __NR_uname) {
1255 const Arg<int> option(0);
1256 return Switch(option)
1257 .Case(0, HasAllBits32(0x0))
1258 .Case(1, HasAllBits32(0x1))
1259 .Case(2, HasAllBits32(0x3))
1260 .Case(3, HasAllBits32(0x80000000))
1261 #if __SIZEOF_POINTER__ > 4
1262 .Case(4, HasAllBits64(0x0))
1263 .Case(5, HasAllBits64(0x1))
1264 .Case(6, HasAllBits64(0x3))
1265 .Case(7, HasAllBits64(0x80000000))
1266 .Case(8, HasAllBits64(0x100000000ULL))
1267 .Case(9, HasAllBits64(0x300000000ULL))
1268 .Case(10, HasAllBits64(0x100000001ULL))
1269 #endif
1270 .Default(Kill("Invalid test case number"));
1272 return Allow();
1275 // Define a macro that performs tests using our test policy.
1276 // NOTE: Not all of the arguments in this macro are actually used!
1277 // They are here just to serve as documentation of the conditions
1278 // implemented in the test policy.
1279 // Most notably, "op" and "mask" are unused by the macro. If you want
1280 // to make changes to these values, you will have to edit the
1281 // test policy instead.
1282 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
1283 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value))
1285 // Our uname() system call returns ErrorCode(1) for success and
1286 // ErrorCode(0) for failure. Syscall::Call() turns this into an
1287 // exit code of -1 or 0.
1288 #define EXPECT_FAILURE 0
1289 #define EXPECT_SUCCESS -1
1291 // A couple of our tests behave differently on 32bit and 64bit systems, as
1292 // there is no way for a 32bit system call to pass in a 64bit system call
1293 // argument "arg".
1294 // We expect these tests to succeed on 64bit systems, but to tail on 32bit
1295 // systems.
1296 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
1297 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) {
1298 // 32bit test: all of 0x0 (should always be true)
1299 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS);
1300 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS);
1301 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS);
1302 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS);
1303 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS);
1305 // 32bit test: all of 0x1
1306 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE);
1307 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS);
1308 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE);
1309 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS);
1311 // 32bit test: all of 0x3
1312 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE);
1313 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE);
1314 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE);
1315 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS);
1316 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS);
1318 // 32bit test: all of 0x80000000
1319 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1320 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1321 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1322 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1323 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1325 #if __SIZEOF_POINTER__ > 4
1326 // 64bit test: all of 0x0 (should always be true)
1327 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS);
1328 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS);
1329 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS);
1330 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS);
1331 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1332 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1333 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1334 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS);
1336 // 64bit test: all of 0x1
1337 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE);
1338 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS);
1339 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE);
1340 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS);
1341 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE);
1342 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS);
1343 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE);
1344 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS);
1346 // 64bit test: all of 0x3
1347 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE);
1348 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE);
1349 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE);
1350 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS);
1351 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS);
1352 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE);
1353 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE);
1354 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE);
1355 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS);
1356 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS);
1358 // 64bit test: all of 0x80000000
1359 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1360 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1361 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1362 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1363 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1364 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1365 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1366 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1367 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1368 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1370 // 64bit test: all of 0x100000000
1371 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1372 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1373 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1374 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1375 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1376 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1377 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1378 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1380 // 64bit test: all of 0x300000000
1381 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1382 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1383 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1384 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1385 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1386 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1387 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1388 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1389 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1390 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1392 // 64bit test: all of 0x100000001
1393 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1394 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1395 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1396 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1397 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE);
1398 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1399 #endif
1402 class AnyBitTestPolicy : public Policy {
1403 public:
1404 AnyBitTestPolicy() {}
1405 ~AnyBitTestPolicy() override {}
1407 ResultExpr EvaluateSyscall(int sysno) const override;
1409 private:
1410 static ResultExpr HasAnyBits32(uint32_t);
1411 static ResultExpr HasAnyBits64(uint64_t);
1413 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy);
1416 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) {
1417 if (bits == 0) {
1418 return Error(0);
1420 const Arg<uint32_t> arg(1);
1421 return If((arg & bits) != 0, Error(1)).Else(Error(0));
1424 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) {
1425 if (bits == 0) {
1426 return Error(0);
1428 const Arg<uint64_t> arg(1);
1429 return If((arg & bits) != 0, Error(1)).Else(Error(0));
1432 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const {
1433 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1434 // Test masked-equality cases that should trigger the "has any bits"
1435 // peephole optimizations. We try to find bitmasks that could conceivably
1436 // touch corner cases.
1437 // For all of these tests, we override the uname(). We can make use with
1438 // a single system call number, as we use the first system call argument to
1439 // select the different bit masks that we want to test against.
1440 if (sysno == __NR_uname) {
1441 const Arg<int> option(0);
1442 return Switch(option)
1443 .Case(0, HasAnyBits32(0x0))
1444 .Case(1, HasAnyBits32(0x1))
1445 .Case(2, HasAnyBits32(0x3))
1446 .Case(3, HasAnyBits32(0x80000000))
1447 #if __SIZEOF_POINTER__ > 4
1448 .Case(4, HasAnyBits64(0x0))
1449 .Case(5, HasAnyBits64(0x1))
1450 .Case(6, HasAnyBits64(0x3))
1451 .Case(7, HasAnyBits64(0x80000000))
1452 .Case(8, HasAnyBits64(0x100000000ULL))
1453 .Case(9, HasAnyBits64(0x300000000ULL))
1454 .Case(10, HasAnyBits64(0x100000001ULL))
1455 #endif
1456 .Default(Kill("Invalid test case number"));
1458 return Allow();
1461 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) {
1462 // 32bit test: any of 0x0 (should always be false)
1463 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE);
1464 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE);
1465 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE);
1466 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE);
1467 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE);
1469 // 32bit test: any of 0x1
1470 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE);
1471 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS);
1472 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE);
1473 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS);
1475 // 32bit test: any of 0x3
1476 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE);
1477 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS);
1478 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS);
1479 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS);
1480 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS);
1482 // 32bit test: any of 0x80000000
1483 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1484 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1485 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1486 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1487 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1489 #if __SIZEOF_POINTER__ > 4
1490 // 64bit test: any of 0x0 (should always be false)
1491 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE);
1492 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE);
1493 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE);
1494 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE);
1495 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1496 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1497 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1498 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1500 // 64bit test: any of 0x1
1501 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE);
1502 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS);
1503 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE);
1504 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS);
1505 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1506 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1507 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1508 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1510 // 64bit test: any of 0x3
1511 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE);
1512 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS);
1513 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS);
1514 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS);
1515 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS);
1516 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE);
1517 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1518 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1519 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1520 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1522 // 64bit test: any of 0x80000000
1523 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1524 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1525 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1526 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1527 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1528 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1529 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1530 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1531 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1532 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1534 // 64bit test: any of 0x100000000
1535 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1536 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1537 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1538 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1539 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1540 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1541 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1542 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1544 // 64bit test: any of 0x300000000
1545 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1546 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1547 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1548 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1549 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1550 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1551 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1552 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1553 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1554 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1556 // 64bit test: any of 0x100000001
1557 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE);
1558 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1559 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS);
1560 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1561 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1562 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1563 #endif
1566 class MaskedEqualTestPolicy : public Policy {
1567 public:
1568 MaskedEqualTestPolicy() {}
1569 ~MaskedEqualTestPolicy() override {}
1571 ResultExpr EvaluateSyscall(int sysno) const override;
1573 private:
1574 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value);
1575 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value);
1577 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy);
1580 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) {
1581 const Arg<uint32_t> arg(1);
1582 return If((arg & mask) == value, Error(1)).Else(Error(0));
1585 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) {
1586 const Arg<uint64_t> arg(1);
1587 return If((arg & mask) == value, Error(1)).Else(Error(0));
1590 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const {
1591 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1593 if (sysno == __NR_uname) {
1594 const Arg<int> option(0);
1595 return Switch(option)
1596 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa))
1597 #if __SIZEOF_POINTER__ > 4
1598 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000))
1599 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa))
1600 #endif
1601 .Default(Kill("Invalid test case number"));
1604 return Allow();
1607 #define MASKEQ_TEST(rulenum, arg, expected_result) \
1608 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result))
1610 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) {
1611 // Allowed: 0x__55__aa
1612 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE);
1613 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE);
1614 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE);
1615 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE);
1616 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE);
1617 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS);
1618 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE);
1619 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE);
1620 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS);
1621 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS);
1622 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS);
1623 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS);
1625 #if __SIZEOF_POINTER__ > 4
1626 // Allowed: 0x__55__aa________
1627 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE);
1628 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE);
1629 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE);
1630 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE);
1631 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE);
1632 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE);
1633 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE);
1634 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS);
1635 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE);
1636 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE);
1637 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS);
1638 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS);
1639 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS);
1640 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1641 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1642 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS);
1644 // Allowed: 0x__55__aa__55__aa
1645 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE);
1646 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE);
1647 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE);
1648 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE);
1649 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE);
1650 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE);
1651 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE);
1652 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE);
1653 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE);
1654 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS);
1655 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE);
1656 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE);
1657 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE);
1658 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE);
1659 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS);
1660 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS);
1661 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS);
1662 #endif
1665 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
1666 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
1667 // We expect to get called for an attempt to fork(). No need to log that
1668 // call. But if we ever get called for anything else, we want to verbosely
1669 // print as much information as possible.
1670 const char* msg = (const char*)aux;
1671 printf(
1672 "Clone() was called with unexpected arguments\n"
1673 " nr: %d\n"
1674 " 1: 0x%llX\n"
1675 " 2: 0x%llX\n"
1676 " 3: 0x%llX\n"
1677 " 4: 0x%llX\n"
1678 " 5: 0x%llX\n"
1679 " 6: 0x%llX\n"
1680 "%s\n",
1681 args.nr,
1682 (long long)args.args[0],
1683 (long long)args.args[1],
1684 (long long)args.args[2],
1685 (long long)args.args[3],
1686 (long long)args.args[4],
1687 (long long)args.args[5],
1688 msg);
1690 return -EPERM;
1693 class PthreadPolicyEquality : public Policy {
1694 public:
1695 PthreadPolicyEquality() {}
1696 ~PthreadPolicyEquality() override {}
1698 ResultExpr EvaluateSyscall(int sysno) const override;
1700 private:
1701 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality);
1704 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const {
1705 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1706 // This policy allows creating threads with pthread_create(). But it
1707 // doesn't allow any other uses of clone(). Most notably, it does not
1708 // allow callers to implement fork() or vfork() by passing suitable flags
1709 // to the clone() system call.
1710 if (sysno == __NR_clone) {
1711 // We have seen two different valid combinations of flags. Glibc
1712 // uses the more modern flags, sets the TLS from the call to clone(), and
1713 // uses futexes to monitor threads. Android's C run-time library, doesn't
1714 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1715 // More recent versions of Android don't set CLONE_DETACHED anymore, so
1716 // the last case accounts for that.
1717 // The following policy is very strict. It only allows the exact masks
1718 // that we have seen in known implementations. It is probably somewhat
1719 // stricter than what we would want to do.
1720 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
1721 CLONE_SIGHAND | CLONE_THREAD |
1722 CLONE_SYSVSEM | CLONE_SETTLS |
1723 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1724 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
1725 CLONE_SIGHAND | CLONE_THREAD |
1726 CLONE_SYSVSEM;
1727 const Arg<unsigned long> flags(0);
1728 return If(flags == kGlibcCloneMask ||
1729 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) ||
1730 flags == kBaseAndroidCloneMask,
1731 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask"));
1734 return Allow();
1737 class PthreadPolicyBitMask : public Policy {
1738 public:
1739 PthreadPolicyBitMask() {}
1740 ~PthreadPolicyBitMask() override {}
1742 ResultExpr EvaluateSyscall(int sysno) const override;
1744 private:
1745 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits);
1746 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits);
1748 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask);
1751 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg,
1752 unsigned long bits) {
1753 return (arg & bits) != 0;
1756 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg,
1757 unsigned long bits) {
1758 return (arg & bits) == bits;
1761 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const {
1762 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1763 // This policy allows creating threads with pthread_create(). But it
1764 // doesn't allow any other uses of clone(). Most notably, it does not
1765 // allow callers to implement fork() or vfork() by passing suitable flags
1766 // to the clone() system call.
1767 if (sysno == __NR_clone) {
1768 // We have seen two different valid combinations of flags. Glibc
1769 // uses the more modern flags, sets the TLS from the call to clone(), and
1770 // uses futexes to monitor threads. Android's C run-time library, doesn't
1771 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1772 // The following policy allows for either combination of flags, but it
1773 // is generally a little more conservative than strictly necessary. We
1774 // err on the side of rather safe than sorry.
1775 // Very noticeably though, we disallow fork() (which is often just a
1776 // wrapper around clone()).
1777 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES |
1778 CLONE_SIGHAND | CLONE_THREAD |
1779 CLONE_SYSVSEM;
1780 const unsigned long kFutexFlags =
1781 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1782 const unsigned long kNoopFlags = CLONE_DETACHED;
1783 const unsigned long kKnownFlags =
1784 kMandatoryFlags | kFutexFlags | kNoopFlags;
1786 const Arg<unsigned long> flags(0);
1787 return If(HasAnyBits(flags, ~kKnownFlags),
1788 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found"))
1789 .ElseIf(!HasAllBits(flags, kMandatoryFlags),
1790 Trap(PthreadTrapHandler,
1791 "Missing mandatory CLONE_XXX flags "
1792 "when creating new thread"))
1793 .ElseIf(
1794 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags),
1795 Trap(PthreadTrapHandler,
1796 "Must set either all or none of the TLS and futex bits in "
1797 "call to clone()"))
1798 .Else(Allow());
1801 return Allow();
1804 static void* ThreadFnc(void* arg) {
1805 ++*reinterpret_cast<int*>(arg);
1806 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
1807 return NULL;
1810 static void PthreadTest() {
1811 // Attempt to start a joinable thread. This should succeed.
1812 pthread_t thread;
1813 int thread_ran = 0;
1814 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran));
1815 BPF_ASSERT(!pthread_join(thread, NULL));
1816 BPF_ASSERT(thread_ran);
1818 // Attempt to start a detached thread. This should succeed.
1819 thread_ran = 0;
1820 pthread_attr_t attr;
1821 BPF_ASSERT(!pthread_attr_init(&attr));
1822 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1823 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
1824 BPF_ASSERT(!pthread_attr_destroy(&attr));
1825 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
1826 -EINTR) {
1828 BPF_ASSERT(thread_ran);
1830 // Attempt to fork() a process using clone(). This should fail. We use the
1831 // same flags that glibc uses when calling fork(). But we don't actually
1832 // try calling the fork() implementation in the C run-time library, as
1833 // run-time libraries other than glibc might call __NR_fork instead of
1834 // __NR_clone, and that would introduce a bogus test failure.
1835 int pid;
1836 BPF_ASSERT(Syscall::Call(__NR_clone,
1837 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
1840 &pid) == -EPERM);
1843 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) {
1844 PthreadTest();
1847 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) {
1848 PthreadTest();
1851 // libc might not define these even though the kernel supports it.
1852 #ifndef PTRACE_O_TRACESECCOMP
1853 #define PTRACE_O_TRACESECCOMP 0x00000080
1854 #endif
1856 #ifdef PTRACE_EVENT_SECCOMP
1857 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
1858 #else
1859 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they
1860 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by
1861 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If
1862 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both
1863 // values here.
1864 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8)
1865 #endif
1867 #if defined(__arm__)
1868 #ifndef PTRACE_SET_SYSCALL
1869 #define PTRACE_SET_SYSCALL 23
1870 #endif
1871 #endif
1873 #if defined(__aarch64__)
1874 #ifndef PTRACE_GETREGS
1875 #define PTRACE_GETREGS 12
1876 #endif
1877 #endif
1879 #if defined(__aarch64__)
1880 #ifndef PTRACE_SETREGS
1881 #define PTRACE_SETREGS 13
1882 #endif
1883 #endif
1885 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with
1886 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on
1887 // PTRACE_EVENT_SECCOMP.
1889 // regs should contain the current set of registers of the child, obtained using
1890 // PTRACE_GETREGS.
1892 // Depending on the architecture, this may modify regs, so the caller is
1893 // responsible for committing these changes using PTRACE_SETREGS.
1894 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) {
1895 #if defined(__arm__)
1896 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the
1897 // libc ptrace call as the request parameter is an enum, and
1898 // PTRACE_SET_SYSCALL may not be in the enum.
1899 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number);
1900 #endif
1902 SECCOMP_PT_SYSCALL(*regs) = syscall_number;
1903 return 0;
1906 const uint16_t kTraceData = 0xcc;
1908 class TraceAllPolicy : public Policy {
1909 public:
1910 TraceAllPolicy() {}
1911 ~TraceAllPolicy() override {}
1913 ResultExpr EvaluateSyscall(int system_call_number) const override {
1914 return Trace(kTraceData);
1917 private:
1918 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy);
1921 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) {
1922 if (!SandboxBPF::SupportsSeccompSandbox(
1923 SandboxBPF::SeccompLevel::SINGLE_THREADED)) {
1924 return;
1927 // This test is disabled on arm due to a kernel bug.
1928 // See https://code.google.com/p/chromium/issues/detail?id=383977
1929 #if defined(__arm__) || defined(__aarch64__)
1930 printf("This test is currently disabled on ARM32/64 due to a kernel bug.");
1931 return;
1932 #endif
1934 #if defined(__mips__)
1935 // TODO: Figure out how to support specificity of handling indirect syscalls
1936 // in this test and enable it.
1937 printf("This test is currently disabled on MIPS.");
1938 return;
1939 #endif
1941 pid_t pid = fork();
1942 BPF_ASSERT_NE(-1, pid);
1943 if (pid == 0) {
1944 pid_t my_pid = getpid();
1945 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL));
1946 BPF_ASSERT_EQ(0, raise(SIGSTOP));
1947 SandboxBPF sandbox(new TraceAllPolicy);
1948 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
1950 // getpid is allowed.
1951 BPF_ASSERT_EQ(my_pid, sys_getpid());
1953 // write to stdout is skipped and returns a fake value.
1954 BPF_ASSERT_EQ(kExpectedReturnValue,
1955 syscall(__NR_write, STDOUT_FILENO, "A", 1));
1957 // kill is rewritten to exit(kExpectedReturnValue).
1958 syscall(__NR_kill, my_pid, SIGKILL);
1960 // Should not be reached.
1961 BPF_ASSERT(false);
1964 int status;
1965 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1);
1966 BPF_ASSERT(WIFSTOPPED(status));
1968 BPF_ASSERT_NE(-1,
1969 ptrace(PTRACE_SETOPTIONS,
1970 pid,
1971 NULL,
1972 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP)));
1973 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
1974 while (true) {
1975 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1);
1976 if (WIFEXITED(status) || WIFSIGNALED(status)) {
1977 BPF_ASSERT(WIFEXITED(status));
1978 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status));
1979 break;
1982 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
1983 !IS_SECCOMP_EVENT(status)) {
1984 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
1985 continue;
1988 unsigned long data;
1989 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data));
1990 BPF_ASSERT_EQ(kTraceData, data);
1992 regs_struct regs;
1993 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, &regs));
1994 switch (SECCOMP_PT_SYSCALL(regs)) {
1995 case __NR_write:
1996 // Skip writes to stdout, make it return kExpectedReturnValue. Allow
1997 // writes to stderr so that BPF_ASSERT messages show up.
1998 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) {
1999 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, -1));
2000 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue;
2001 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2003 break;
2005 case __NR_kill:
2006 // Rewrite to exit(kExpectedReturnValue).
2007 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, __NR_exit));
2008 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue;
2009 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2010 break;
2012 default:
2013 // Allow all other syscalls.
2014 break;
2017 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2021 // Android does not expose pread64 nor pwrite64.
2022 #if !defined(OS_ANDROID)
2024 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) {
2025 while (count > 0) {
2026 const ssize_t transfered =
2027 HANDLE_EINTR(pwrite64(fd, buffer, count, offset));
2028 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2029 return false;
2031 count -= transfered;
2032 buffer += transfered;
2033 offset += transfered;
2035 return true;
2038 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) {
2039 while (count > 0) {
2040 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset));
2041 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2042 return false;
2044 count -= transfered;
2045 buffer += transfered;
2046 offset += transfered;
2048 return true;
2051 bool pread_64_was_forwarded = false;
2053 class TrapPread64Policy : public Policy {
2054 public:
2055 TrapPread64Policy() {}
2056 ~TrapPread64Policy() override {}
2058 ResultExpr EvaluateSyscall(int system_call_number) const override {
2059 // Set the global environment for unsafe traps once.
2060 if (system_call_number == MIN_SYSCALL) {
2061 EnableUnsafeTraps();
2064 if (system_call_number == __NR_pread64) {
2065 return UnsafeTrap(ForwardPreadHandler, NULL);
2067 return Allow();
2070 private:
2071 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args,
2072 void* aux) {
2073 BPF_ASSERT(args.nr == __NR_pread64);
2074 pread_64_was_forwarded = true;
2076 return SandboxBPF::ForwardSyscall(args);
2079 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy);
2082 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split
2083 // between two arguments. In this test, we make sure that ForwardSyscall() can
2084 // forward it properly.
2085 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) {
2086 ScopedTemporaryFile temp_file;
2087 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF;
2088 const char kTestString[] = "This is a test!";
2089 BPF_ASSERT(FullPwrite64(
2090 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset));
2092 char read_test_string[sizeof(kTestString)] = {0};
2093 BPF_ASSERT(FullPread64(temp_file.fd(),
2094 read_test_string,
2095 sizeof(read_test_string),
2096 kLargeOffset));
2097 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString)));
2098 BPF_ASSERT(pread_64_was_forwarded);
2101 #endif // !defined(OS_ANDROID)
2103 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) {
2104 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr);
2106 // Wait for the main thread to signal that the filter has been applied.
2107 if (!event->IsSignaled()) {
2108 event->Wait();
2111 BPF_ASSERT(event->IsSignaled());
2113 BlacklistNanosleepPolicy::AssertNanosleepFails();
2115 return NULL;
2118 SANDBOX_TEST(SandboxBPF, Tsync) {
2119 const bool supports_multi_threaded = SandboxBPF::SupportsSeccompSandbox(
2120 SandboxBPF::SeccompLevel::MULTI_THREADED);
2121 // On Chrome OS tsync is mandatory.
2122 #if defined(OS_CHROMEOS)
2123 if (base::SysInfo::IsRunningOnChromeOS()) {
2124 BPF_ASSERT_EQ(true, supports_multi_threaded);
2126 // else a Chrome OS build not running on a Chrome OS device e.g. Chrome bots.
2127 // In this case fall through.
2128 #endif
2129 if (!supports_multi_threaded) {
2130 return;
2133 base::WaitableEvent event(true, false);
2135 // Create a thread on which to invoke the blocked syscall.
2136 pthread_t thread;
2137 BPF_ASSERT_EQ(
2138 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event));
2140 // Test that nanoseelp success.
2141 const struct timespec ts = {0, 0};
2142 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
2144 // Engage the sandbox.
2145 SandboxBPF sandbox(new BlacklistNanosleepPolicy());
2146 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED));
2148 // This thread should have the filter applied as well.
2149 BlacklistNanosleepPolicy::AssertNanosleepFails();
2151 // Signal the condition to invoke the system call.
2152 event.Signal();
2154 // Wait for the thread to finish.
2155 BPF_ASSERT_EQ(0, pthread_join(thread, NULL));
2158 class AllowAllPolicy : public Policy {
2159 public:
2160 AllowAllPolicy() {}
2161 ~AllowAllPolicy() override {}
2163 ResultExpr EvaluateSyscall(int sysno) const override { return Allow(); }
2165 private:
2166 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy);
2169 SANDBOX_DEATH_TEST(
2170 SandboxBPF,
2171 StartMultiThreadedAsSingleThreaded,
2172 DEATH_MESSAGE(
2173 ThreadHelpers::GetAssertSingleThreadedErrorMessageForTests())) {
2174 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded");
2175 BPF_ASSERT(thread.Start());
2177 SandboxBPF sandbox(new AllowAllPolicy());
2178 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
2181 // http://crbug.com/407357
2182 #if !defined(THREAD_SANITIZER)
2183 SANDBOX_DEATH_TEST(
2184 SandboxBPF,
2185 StartSingleThreadedAsMultiThreaded,
2186 DEATH_MESSAGE(
2187 "Cannot start sandbox; process may be single-threaded when "
2188 "reported as not")) {
2189 SandboxBPF sandbox(new AllowAllPolicy());
2190 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED));
2192 #endif // !defined(THREAD_SANITIZER)
2194 // A stub handler for the UnsafeTrap. Never called.
2195 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) {
2196 return -1;
2199 class UnsafeTrapWithCondPolicy : public Policy {
2200 public:
2201 UnsafeTrapWithCondPolicy() {}
2202 ~UnsafeTrapWithCondPolicy() override {}
2204 ResultExpr EvaluateSyscall(int sysno) const override {
2205 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
2206 setenv(kSandboxDebuggingEnv, "t", 0);
2207 Die::SuppressInfoMessages(true);
2209 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
2210 return Allow();
2212 switch (sysno) {
2213 case __NR_uname: {
2214 const Arg<uint32_t> arg(0);
2215 return If(arg == 0, Allow()).Else(Error(EPERM));
2217 case __NR_setgid: {
2218 const Arg<uint32_t> arg(0);
2219 return Switch(arg)
2220 .Case(100, Error(ENOMEM))
2221 .Case(200, Error(ENOSYS))
2222 .Default(Error(EPERM));
2224 case __NR_close:
2225 case __NR_exit_group:
2226 case __NR_write:
2227 return Allow();
2228 case __NR_getppid:
2229 return UnsafeTrap(NoOpHandler, NULL);
2230 default:
2231 return Error(EPERM);
2235 private:
2236 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy);
2239 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) {
2240 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0));
2241 BPF_ASSERT_EQ(EFAULT, errno);
2243 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1));
2244 BPF_ASSERT_EQ(EPERM, errno);
2246 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100));
2247 BPF_ASSERT_EQ(ENOMEM, errno);
2249 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200));
2250 BPF_ASSERT_EQ(ENOSYS, errno);
2252 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300));
2253 BPF_ASSERT_EQ(EPERM, errno);
2256 } // namespace
2258 } // namespace bpf_dsl
2259 } // namespace sandbox