1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/bpf_dsl/codegen.h"
10 #include "base/logging.h"
11 #include "sandbox/linux/system_headers/linux_filter.h"
13 // This CodeGen implementation strives for simplicity while still
14 // generating acceptable BPF programs under typical usage patterns
15 // (e.g., by PolicyCompiler).
17 // The key to its simplicity is that BPF programs only support forward
18 // jumps/branches, which allows constraining the DAG construction API
19 // to make instruction nodes immutable. Immutable nodes admits a
20 // simple greedy approach of emitting new instructions as needed and
21 // then reusing existing ones that have already been emitted. This
22 // cleanly avoids any need to compute basic blocks or apply
23 // topological sorting because the API effectively sorts instructions
24 // for us (e.g., before MakeInstruction() can be called to emit a
25 // branch instruction, it must have already been called for each
28 // This greedy algorithm is not without (theoretical) weakness though:
30 // 1. In the general case, we don't eliminate dead code. If needed,
31 // we could trace back through the program in Compile() and elide
32 // any unneeded instructions, but in practice we only emit live
33 // instructions anyway.
35 // 2. By not dividing instructions into basic blocks and sorting, we
36 // lose an opportunity to move non-branch/non-return instructions
37 // adjacent to their successor instructions, which means we might
38 // need to emit additional jumps. But in practice, they'll
39 // already be nearby as long as callers don't go out of their way
40 // to interleave MakeInstruction() calls for unrelated code
45 // kBranchRange is the maximum value that can be stored in
46 // sock_filter's 8-bit jt and jf fields.
47 const size_t kBranchRange
= std::numeric_limits
<uint8_t>::max();
49 const CodeGen::Node
CodeGen::kNullNode
;
51 CodeGen::CodeGen() : program_(), equivalent_(), memos_() {
57 CodeGen::Program
CodeGen::Compile(CodeGen::Node head
) {
58 return Program(program_
.rbegin() + Offset(head
), program_
.rend());
61 CodeGen::Node
CodeGen::MakeInstruction(uint16_t code
,
65 // To avoid generating redundant code sequences, we memoize the
66 // results from AppendInstruction().
67 auto res
= memos_
.insert(std::make_pair(MemoKey(code
, k
, jt
, jf
), kNullNode
));
68 CodeGen::Node
* node
= &res
.first
->second
;
69 if (res
.second
) { // Newly inserted memo entry.
70 *node
= AppendInstruction(code
, k
, jt
, jf
);
75 CodeGen::Node
CodeGen::AppendInstruction(uint16_t code
,
79 if (BPF_CLASS(code
) == BPF_JMP
) {
80 CHECK_NE(BPF_JA
, BPF_OP(code
)) << "CodeGen inserts JAs as needed";
82 // Optimally adding jumps is rather tricky, so we use a quick
83 // approximation: by artificially reducing |jt|'s range, |jt| will
84 // stay within its true range even if we add a jump for |jf|.
85 jt
= WithinRange(jt
, kBranchRange
- 1);
86 jf
= WithinRange(jf
, kBranchRange
);
87 return Append(code
, k
, Offset(jt
), Offset(jf
));
90 CHECK_EQ(kNullNode
, jf
) << "Non-branch instructions shouldn't provide jf";
91 if (BPF_CLASS(code
) == BPF_RET
) {
92 CHECK_EQ(kNullNode
, jt
) << "Return instructions shouldn't provide jt";
94 // For non-branch/non-return instructions, execution always
95 // proceeds to the next instruction; so we need to arrange for
97 jt
= WithinRange(jt
, 0);
98 CHECK_EQ(0U, Offset(jt
)) << "ICE: Failed to setup next instruction";
100 return Append(code
, k
, 0, 0);
103 CodeGen::Node
CodeGen::WithinRange(Node target
, size_t range
) {
104 // Just use |target| if it's already within range.
105 if (Offset(target
) <= range
) {
109 // Alternatively, look for an equivalent instruction within range.
110 if (Offset(equivalent_
.at(target
)) <= range
) {
111 return equivalent_
.at(target
);
114 // Otherwise, fall back to emitting a jump instruction.
115 Node jump
= Append(BPF_JMP
| BPF_JA
, Offset(target
), 0, 0);
116 equivalent_
.at(target
) = jump
;
120 CodeGen::Node
CodeGen::Append(uint16_t code
, uint32_t k
, size_t jt
, size_t jf
) {
121 if (BPF_CLASS(code
) == BPF_JMP
&& BPF_OP(code
) != BPF_JA
) {
122 CHECK_LE(jt
, kBranchRange
);
123 CHECK_LE(jf
, kBranchRange
);
129 CHECK_LT(program_
.size(), static_cast<size_t>(BPF_MAXINSNS
));
130 CHECK_EQ(program_
.size(), equivalent_
.size());
132 Node res
= program_
.size();
133 program_
.push_back(sock_filter
{
134 code
, static_cast<uint8_t>(jt
), static_cast<uint8_t>(jf
), k
});
135 equivalent_
.push_back(res
);
139 size_t CodeGen::Offset(Node target
) const {
140 CHECK_LT(target
, program_
.size()) << "Bogus offset target node";
141 return (program_
.size() - 1) - target
;
144 // TODO(mdempsky): Move into a general base::Tuple helper library.
145 bool CodeGen::MemoKeyLess::operator()(const MemoKey
& lhs
,
146 const MemoKey
& rhs
) const {
147 if (base::get
<0>(lhs
) != base::get
<0>(rhs
))
148 return base::get
<0>(lhs
) < base::get
<0>(rhs
);
149 if (base::get
<1>(lhs
) != base::get
<1>(rhs
))
150 return base::get
<1>(lhs
) < base::get
<1>(rhs
);
151 if (base::get
<2>(lhs
) != base::get
<2>(rhs
))
152 return base::get
<2>(lhs
) < base::get
<2>(rhs
);
153 if (base::get
<3>(lhs
) != base::get
<3>(rhs
))
154 return base::get
<3>(lhs
) < base::get
<3>(rhs
);
158 } // namespace sandbox