1 //===--- Randstruct.cpp ---------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation for Clang's structure field layout
12 //===----------------------------------------------------------------------===//
14 #include "clang/AST/Randstruct.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/ASTDiagnostic.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/Decl.h"
19 #include "clang/AST/DeclCXX.h" // For StaticAssertDecl
20 #include "clang/Basic/Diagnostic.h"
21 #include "llvm/ADT/SmallVector.h"
29 using clang::ASTContext
;
30 using clang::FieldDecl
;
31 using llvm::SmallVector
;
35 // FIXME: Replace this with some discovery once that mechanism exists.
36 enum { CACHE_LINE
= 64 };
38 // The Bucket class holds the struct fields we're trying to fill to a
41 SmallVector
<FieldDecl
*, 64> Fields
;
45 virtual ~Bucket() = default;
47 SmallVector
<FieldDecl
*, 64> &fields() { return Fields
; }
48 void addField(FieldDecl
*Field
, int FieldSize
);
49 virtual bool canFit(int FieldSize
) const {
50 return Size
+ FieldSize
<= CACHE_LINE
;
52 virtual bool isBitfieldRun() const { return false; }
53 bool full() const { return Size
>= CACHE_LINE
; }
56 void Bucket::addField(FieldDecl
*Field
, int FieldSize
) {
58 Fields
.push_back(Field
);
61 struct BitfieldRunBucket
: public Bucket
{
62 bool canFit(int FieldSize
) const override
{ return true; }
63 bool isBitfieldRun() const override
{ return true; }
66 void randomizeStructureLayoutImpl(const ASTContext
&Context
,
67 llvm::SmallVectorImpl
<FieldDecl
*> &FieldsOut
,
69 // All of the Buckets produced by best-effort cache-line algorithm.
70 SmallVector
<std::unique_ptr
<Bucket
>, 16> Buckets
;
72 // The current bucket of fields that we are trying to fill to a cache-line.
73 std::unique_ptr
<Bucket
> CurrentBucket
;
75 // The current bucket containing the run of adjacent bitfields to ensure they
77 std::unique_ptr
<BitfieldRunBucket
> CurrentBitfieldRun
;
79 // Tracks the number of fields that we failed to fit to the current bucket,
80 // and thus still need to be added later.
83 while (!FieldsOut
.empty()) {
84 // If we've Skipped more fields than we have remaining to place, that means
85 // that they can't fit in our current bucket, and we need to start a new
87 if (Skipped
>= FieldsOut
.size()) {
89 Buckets
.push_back(std::move(CurrentBucket
));
92 // Take the first field that needs to be put in a bucket.
93 auto FieldIter
= FieldsOut
.begin();
94 FieldDecl
*FD
= *FieldIter
;
96 if (FD
->isBitField() && !FD
->isZeroLengthBitField(Context
)) {
97 // Start a bitfield run if this is the first bitfield we have found.
98 if (!CurrentBitfieldRun
)
99 CurrentBitfieldRun
= std::make_unique
<BitfieldRunBucket
>();
101 // We've placed the field, and can remove it from the "awaiting Buckets"
102 // vector called "Fields."
103 CurrentBitfieldRun
->addField(FD
, /*FieldSize is irrelevant here*/ 1);
104 FieldsOut
.erase(FieldIter
);
108 // Else, current field is not a bitfield. If we were previously in a
109 // bitfield run, end it.
110 if (CurrentBitfieldRun
)
111 Buckets
.push_back(std::move(CurrentBitfieldRun
));
113 // If we don't have a bucket, make one.
115 CurrentBucket
= std::make_unique
<Bucket
>();
117 uint64_t Width
= Context
.getTypeInfo(FD
->getType()).Width
;
118 if (Width
>= CACHE_LINE
) {
119 std::unique_ptr
<Bucket
> OverSized
= std::make_unique
<Bucket
>();
120 OverSized
->addField(FD
, Width
);
121 FieldsOut
.erase(FieldIter
);
122 Buckets
.push_back(std::move(OverSized
));
126 // If it fits, add it.
127 if (CurrentBucket
->canFit(Width
)) {
128 CurrentBucket
->addField(FD
, Width
);
129 FieldsOut
.erase(FieldIter
);
131 // If it's now full, tie off the bucket.
132 if (CurrentBucket
->full()) {
134 Buckets
.push_back(std::move(CurrentBucket
));
137 // We can't fit it in our current bucket. Move to the end for processing
139 ++Skipped
; // Mark it skipped.
140 FieldsOut
.push_back(FD
);
141 FieldsOut
.erase(FieldIter
);
145 // Done processing the fields awaiting a bucket.
147 // If we were filling a bucket, tie it off.
149 Buckets
.push_back(std::move(CurrentBucket
));
151 // If we were processing a bitfield run bucket, tie it off.
152 if (CurrentBitfieldRun
)
153 Buckets
.push_back(std::move(CurrentBitfieldRun
));
155 std::shuffle(std::begin(Buckets
), std::end(Buckets
), RNG
);
157 // Produce the new ordering of the elements from the Buckets.
158 SmallVector
<FieldDecl
*, 16> FinalOrder
;
159 for (const std::unique_ptr
<Bucket
> &B
: Buckets
) {
160 llvm::SmallVectorImpl
<FieldDecl
*> &RandFields
= B
->fields();
161 if (!B
->isBitfieldRun())
162 std::shuffle(std::begin(RandFields
), std::end(RandFields
), RNG
);
164 FinalOrder
.insert(FinalOrder
.end(), RandFields
.begin(), RandFields
.end());
167 FieldsOut
= FinalOrder
;
170 } // anonymous namespace
173 namespace randstruct
{
175 bool randomizeStructureLayout(const ASTContext
&Context
, RecordDecl
*RD
,
176 SmallVectorImpl
<Decl
*> &FinalOrdering
) {
177 SmallVector
<FieldDecl
*, 64> RandomizedFields
;
178 SmallVector
<Decl
*, 8> PostRandomizedFields
;
180 unsigned TotalNumFields
= 0;
181 for (Decl
*D
: RD
->decls()) {
183 if (auto *FD
= dyn_cast
<FieldDecl
>(D
))
184 RandomizedFields
.push_back(FD
);
185 else if (isa
<StaticAssertDecl
>(D
) || isa
<IndirectFieldDecl
>(D
))
186 PostRandomizedFields
.push_back(D
);
188 FinalOrdering
.push_back(D
);
191 if (RandomizedFields
.empty())
194 // Struct might end with a flexible array or an array of size 0 or 1,
195 // in which case we don't want to randomize it.
196 FieldDecl
*FlexibleArray
=
197 RD
->hasFlexibleArrayMember() ? RandomizedFields
.pop_back_val() : nullptr;
198 if (!FlexibleArray
) {
200 dyn_cast
<ConstantArrayType
>(RandomizedFields
.back()->getType()))
201 if (CA
->getSize().sle(2))
202 FlexibleArray
= RandomizedFields
.pop_back_val();
206 Context
.getLangOpts().RandstructSeed
+ RD
->getNameAsString();
207 std::seed_seq
SeedSeq(Seed
.begin(), Seed
.end());
208 std::mt19937
RNG(SeedSeq
);
210 randomizeStructureLayoutImpl(Context
, RandomizedFields
, RNG
);
212 // Plorp the randomized decls into the final ordering.
213 FinalOrdering
.insert(FinalOrdering
.end(), RandomizedFields
.begin(),
214 RandomizedFields
.end());
216 // Add fields that belong towards the end of the RecordDecl.
217 FinalOrdering
.insert(FinalOrdering
.end(), PostRandomizedFields
.begin(),
218 PostRandomizedFields
.end());
220 // Add back the flexible array.
222 FinalOrdering
.push_back(FlexibleArray
);
224 assert(TotalNumFields
== FinalOrdering
.size() &&
225 "Decl count has been altered after Randstruct randomization!");
226 (void)TotalNumFields
;
230 } // end namespace randstruct
231 } // end namespace clang