1 //===--- Randstruct.cpp ---------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation for Clang's structure field layout
12 //===----------------------------------------------------------------------===//
14 #include "clang/AST/Randstruct.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/Decl.h"
18 #include "clang/AST/DeclCXX.h" // For StaticAssertDecl
19 #include "clang/Basic/Diagnostic.h"
20 #include "llvm/ADT/SmallVector.h"
27 using clang::ASTContext
;
28 using clang::FieldDecl
;
29 using llvm::SmallVector
;
33 // FIXME: Replace this with some discovery once that mechanism exists.
34 enum { CACHE_LINE
= 64 };
36 // The Bucket class holds the struct fields we're trying to fill to a
39 SmallVector
<FieldDecl
*, 64> Fields
;
43 virtual ~Bucket() = default;
45 SmallVector
<FieldDecl
*, 64> &fields() { return Fields
; }
46 void addField(FieldDecl
*Field
, int FieldSize
);
47 virtual bool canFit(int FieldSize
) const {
48 return Size
+ FieldSize
<= CACHE_LINE
;
50 virtual bool isBitfieldRun() const { return false; }
51 bool full() const { return Size
>= CACHE_LINE
; }
54 void Bucket::addField(FieldDecl
*Field
, int FieldSize
) {
56 Fields
.push_back(Field
);
59 struct BitfieldRunBucket
: public Bucket
{
60 bool canFit(int FieldSize
) const override
{ return true; }
61 bool isBitfieldRun() const override
{ return true; }
64 void randomizeStructureLayoutImpl(const ASTContext
&Context
,
65 llvm::SmallVectorImpl
<FieldDecl
*> &FieldsOut
,
67 // All of the Buckets produced by best-effort cache-line algorithm.
68 SmallVector
<std::unique_ptr
<Bucket
>, 16> Buckets
;
70 // The current bucket of fields that we are trying to fill to a cache-line.
71 std::unique_ptr
<Bucket
> CurrentBucket
;
73 // The current bucket containing the run of adjacent bitfields to ensure they
75 std::unique_ptr
<BitfieldRunBucket
> CurrentBitfieldRun
;
77 // Tracks the number of fields that we failed to fit to the current bucket,
78 // and thus still need to be added later.
81 while (!FieldsOut
.empty()) {
82 // If we've Skipped more fields than we have remaining to place, that means
83 // that they can't fit in our current bucket, and we need to start a new
85 if (Skipped
>= FieldsOut
.size()) {
87 Buckets
.push_back(std::move(CurrentBucket
));
90 // Take the first field that needs to be put in a bucket.
91 auto FieldIter
= FieldsOut
.begin();
92 FieldDecl
*FD
= *FieldIter
;
94 if (FD
->isBitField() && !FD
->isZeroLengthBitField(Context
)) {
95 // Start a bitfield run if this is the first bitfield we have found.
96 if (!CurrentBitfieldRun
)
97 CurrentBitfieldRun
= std::make_unique
<BitfieldRunBucket
>();
99 // We've placed the field, and can remove it from the "awaiting Buckets"
100 // vector called "Fields."
101 CurrentBitfieldRun
->addField(FD
, /*FieldSize is irrelevant here*/ 1);
102 FieldsOut
.erase(FieldIter
);
106 // Else, current field is not a bitfield. If we were previously in a
107 // bitfield run, end it.
108 if (CurrentBitfieldRun
)
109 Buckets
.push_back(std::move(CurrentBitfieldRun
));
111 // If we don't have a bucket, make one.
113 CurrentBucket
= std::make_unique
<Bucket
>();
115 uint64_t Width
= Context
.getTypeInfo(FD
->getType()).Width
;
116 if (Width
>= CACHE_LINE
) {
117 std::unique_ptr
<Bucket
> OverSized
= std::make_unique
<Bucket
>();
118 OverSized
->addField(FD
, Width
);
119 FieldsOut
.erase(FieldIter
);
120 Buckets
.push_back(std::move(OverSized
));
124 // If it fits, add it.
125 if (CurrentBucket
->canFit(Width
)) {
126 CurrentBucket
->addField(FD
, Width
);
127 FieldsOut
.erase(FieldIter
);
129 // If it's now full, tie off the bucket.
130 if (CurrentBucket
->full()) {
132 Buckets
.push_back(std::move(CurrentBucket
));
135 // We can't fit it in our current bucket. Move to the end for processing
137 ++Skipped
; // Mark it skipped.
138 FieldsOut
.push_back(FD
);
139 FieldsOut
.erase(FieldIter
);
143 // Done processing the fields awaiting a bucket.
145 // If we were filling a bucket, tie it off.
147 Buckets
.push_back(std::move(CurrentBucket
));
149 // If we were processing a bitfield run bucket, tie it off.
150 if (CurrentBitfieldRun
)
151 Buckets
.push_back(std::move(CurrentBitfieldRun
));
153 std::shuffle(std::begin(Buckets
), std::end(Buckets
), RNG
);
155 // Produce the new ordering of the elements from the Buckets.
156 SmallVector
<FieldDecl
*, 16> FinalOrder
;
157 for (const std::unique_ptr
<Bucket
> &B
: Buckets
) {
158 llvm::SmallVectorImpl
<FieldDecl
*> &RandFields
= B
->fields();
159 if (!B
->isBitfieldRun())
160 std::shuffle(std::begin(RandFields
), std::end(RandFields
), RNG
);
162 FinalOrder
.insert(FinalOrder
.end(), RandFields
.begin(), RandFields
.end());
165 FieldsOut
= FinalOrder
;
168 } // anonymous namespace
171 namespace randstruct
{
173 bool randomizeStructureLayout(const ASTContext
&Context
, RecordDecl
*RD
,
174 SmallVectorImpl
<Decl
*> &FinalOrdering
) {
175 SmallVector
<FieldDecl
*, 64> RandomizedFields
;
176 SmallVector
<Decl
*, 8> PostRandomizedFields
;
178 unsigned TotalNumFields
= 0;
179 for (Decl
*D
: RD
->decls()) {
181 if (auto *FD
= dyn_cast
<FieldDecl
>(D
))
182 RandomizedFields
.push_back(FD
);
183 else if (isa
<StaticAssertDecl
>(D
) || isa
<IndirectFieldDecl
>(D
))
184 PostRandomizedFields
.push_back(D
);
186 FinalOrdering
.push_back(D
);
189 if (RandomizedFields
.empty())
192 // Struct might end with a flexible array or an array of size 0 or 1,
193 // in which case we don't want to randomize it.
194 FieldDecl
*FlexibleArray
=
195 RD
->hasFlexibleArrayMember() ? RandomizedFields
.pop_back_val() : nullptr;
196 if (!FlexibleArray
) {
198 dyn_cast
<ConstantArrayType
>(RandomizedFields
.back()->getType()))
199 if (CA
->getSize().sle(2))
200 FlexibleArray
= RandomizedFields
.pop_back_val();
204 Context
.getLangOpts().RandstructSeed
+ RD
->getNameAsString();
205 std::seed_seq
SeedSeq(Seed
.begin(), Seed
.end());
206 std::mt19937
RNG(SeedSeq
);
208 randomizeStructureLayoutImpl(Context
, RandomizedFields
, RNG
);
210 // Plorp the randomized decls into the final ordering.
211 FinalOrdering
.insert(FinalOrdering
.end(), RandomizedFields
.begin(),
212 RandomizedFields
.end());
214 // Add fields that belong towards the end of the RecordDecl.
215 FinalOrdering
.insert(FinalOrdering
.end(), PostRandomizedFields
.begin(),
216 PostRandomizedFields
.end());
218 // Add back the flexible array.
220 FinalOrdering
.push_back(FlexibleArray
);
222 assert(TotalNumFields
== FinalOrdering
.size() &&
223 "Decl count has been altered after Randstruct randomization!");
224 (void)TotalNumFields
;
228 } // end namespace randstruct
229 } // end namespace clang