1 //===--- OptimizedStructLayout.cpp - Optimal data layout algorithm ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the performOptimizedStructLayout interface.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Support/OptimizedStructLayout.h"
18 using Field
= OptimizedStructLayoutField
;
21 static void checkValidLayout(ArrayRef
<Field
> Fields
, uint64_t Size
,
24 Align ComputedMaxAlign
;
25 for (auto &Field
: Fields
) {
26 assert(Field
.hasFixedOffset() &&
27 "didn't assign a fixed offset to field");
28 assert(isAligned(Field
.Alignment
, Field
.Offset
) &&
29 "didn't assign a correctly-aligned offset to field");
30 assert(Field
.Offset
>= LastEnd
&&
31 "didn't assign offsets in ascending order");
32 LastEnd
= Field
.getEndOffset();
33 assert(Field
.Alignment
<= MaxAlign
&&
34 "didn't compute MaxAlign correctly");
35 ComputedMaxAlign
= std::max(Field
.Alignment
, MaxAlign
);
37 assert(LastEnd
== Size
&& "didn't compute LastEnd correctly");
38 assert(ComputedMaxAlign
== MaxAlign
&& "didn't compute MaxAlign correctly");
42 std::pair
<uint64_t, Align
>
43 llvm::performOptimizedStructLayout(MutableArrayRef
<Field
> Fields
) {
45 // Do some simple precondition checks.
47 bool InFixedPrefix
= true;
49 for (auto &Field
: Fields
) {
50 assert(Field
.Size
> 0 && "field of zero size");
51 if (Field
.hasFixedOffset()) {
52 assert(InFixedPrefix
&&
53 "fixed-offset fields are not a strict prefix of array");
54 assert(LastEnd
<= Field
.Offset
&&
55 "fixed-offset fields overlap or are not in order");
56 LastEnd
= Field
.getEndOffset();
57 assert(LastEnd
> Field
.Offset
&&
58 "overflow in fixed-offset end offset");
60 InFixedPrefix
= false;
66 // Do an initial pass over the fields.
69 // Find the first flexible-offset field, tracking MaxAlign.
70 auto FirstFlexible
= Fields
.begin(), E
= Fields
.end();
71 while (FirstFlexible
!= E
&& FirstFlexible
->hasFixedOffset()) {
72 MaxAlign
= std::max(MaxAlign
, FirstFlexible
->Alignment
);
76 // If there are no flexible fields, we're done.
77 if (FirstFlexible
== E
) {
80 Size
= Fields
.back().getEndOffset();
83 checkValidLayout(Fields
, Size
, MaxAlign
);
85 return std::make_pair(Size
, MaxAlign
);
88 // Walk over the flexible-offset fields, tracking MaxAlign and
89 // assigning them a unique number in order of their appearance.
90 // We'll use this unique number in the comparison below so that
91 // we can use array_pod_sort, which isn't stable. We won't use it
94 uintptr_t UniqueNumber
= 0;
95 for (auto I
= FirstFlexible
; I
!= E
; ++I
) {
96 I
->Scratch
= reinterpret_cast<void*>(UniqueNumber
++);
97 MaxAlign
= std::max(MaxAlign
, I
->Alignment
);
101 // Sort the flexible elements in order of decreasing alignment,
102 // then decreasing size, and then the original order as recorded
103 // in Scratch. The decreasing-size aspect of this is only really
104 // important if we get into the gap-filling stage below, but it
105 // doesn't hurt here.
106 array_pod_sort(FirstFlexible
, E
,
107 [](const Field
*lhs
, const Field
*rhs
) -> int {
108 // Decreasing alignment.
109 if (lhs
->Alignment
!= rhs
->Alignment
)
110 return (lhs
->Alignment
< rhs
->Alignment
? 1 : -1);
113 if (lhs
->Size
!= rhs
->Size
)
114 return (lhs
->Size
< rhs
->Size
? 1 : -1);
117 auto lhsNumber
= reinterpret_cast<uintptr_t>(lhs
->Scratch
);
118 auto rhsNumber
= reinterpret_cast<uintptr_t>(rhs
->Scratch
);
119 if (lhsNumber
!= rhsNumber
)
120 return (lhsNumber
< rhsNumber
? -1 : 1);
125 // Do a quick check for whether that sort alone has given us a perfect
126 // layout with no interior padding. This is very common: if the
127 // fixed-layout fields have no interior padding, and they end at a
128 // sufficiently-aligned offset for all the flexible-layout fields,
129 // and the flexible-layout fields all have sizes that are multiples
130 // of their alignment, then this will reliably trigger.
132 bool HasPadding
= false;
133 uint64_t LastEnd
= 0;
135 // Walk the fixed-offset fields.
136 for (auto I
= Fields
.begin(); I
!= FirstFlexible
; ++I
) {
137 assert(I
->hasFixedOffset());
138 if (LastEnd
!= I
->Offset
) {
142 LastEnd
= I
->getEndOffset();
145 // Walk the flexible-offset fields, optimistically assigning fixed
146 // offsets. Note that we maintain a strict division between the
147 // fixed-offset and flexible-offset fields, so if we end up
148 // discovering padding later in this loop, we can just abandon this
149 // work and we'll ignore the offsets we already assigned.
151 for (auto I
= FirstFlexible
; I
!= E
; ++I
) {
152 auto Offset
= alignTo(LastEnd
, I
->Alignment
);
153 if (LastEnd
!= Offset
) {
158 LastEnd
= I
->getEndOffset();
162 // If we already have a perfect layout, we're done.
165 checkValidLayout(Fields
, LastEnd
, MaxAlign
);
167 return std::make_pair(LastEnd
, MaxAlign
);
171 // The algorithm sketch at this point is as follows.
173 // Consider the padding gaps between fixed-offset fields in ascending
174 // order. Let LastEnd be the offset of the first byte following the
175 // field before the gap, or 0 if the gap is at the beginning of the
176 // structure. Find the "best" flexible-offset field according to the
177 // criteria below. If no such field exists, proceed to the next gap.
178 // Otherwise, add the field at the first properly-aligned offset for
179 // that field that is >= LastEnd, then update LastEnd and repeat in
180 // order to fill any remaining gap following that field.
182 // Next, let LastEnd to be the offset of the first byte following the
183 // last fixed-offset field, or 0 if there are no fixed-offset fields.
184 // While there are flexible-offset fields remaining, find the "best"
185 // flexible-offset field according to the criteria below, add it at
186 // the first properly-aligned offset for that field that is >= LastEnd,
187 // and update LastEnd to the first byte following the field.
189 // The "best" field is chosen by the following criteria, considered
190 // strictly in order:
192 // - When filling a gap betweeen fields, the field must fit.
193 // - A field is preferred if it requires less padding following LastEnd.
194 // - A field is preferred if it is more aligned.
195 // - A field is preferred if it is larger.
196 // - A field is preferred if it appeared earlier in the initial order.
198 // Minimizing leading padding is a greedy attempt to avoid padding
199 // entirely. Preferring more-aligned fields is an attempt to eliminate
200 // stricter constraints earlier, with the idea that weaker alignment
201 // constraints may be resolvable with less padding elsewhere. These
202 // These two rules are sufficient to ensure that we get the optimal
203 // layout in the "C-style" case. Preferring larger fields tends to take
204 // better advantage of large gaps and may be more likely to have a size
205 // that's a multiple of a useful alignment. Preferring the initial
206 // order may help somewhat with locality but is mostly just a way of
207 // ensuring deterministic output.
209 // Note that this algorithm does not guarantee a minimal layout. Picking
210 // a larger object greedily may leave a gap that cannot be filled as
211 // efficiently. Unfortunately, solving this perfectly is an NP-complete
212 // problem (by reduction from bin-packing: let B_i be the bin sizes and
213 // O_j be the object sizes; add fixed-offset fields such that the gaps
214 // between them have size B_i, and add flexible-offset fields with
215 // alignment 1 and size O_j; if the layout size is equal to the end of
216 // the last fixed-layout field, the objects fit in the bins; note that
217 // this doesn't even require the complexity of alignment).
219 // The implementation below is essentially just an optimized version of
220 // scanning the list of remaining fields looking for the best, which
221 // would be O(n^2). In the worst case, it doesn't improve on that.
222 // However, in practice it'll just scan the array of alignment bins
223 // and consider the first few elements from one or two bins. The
224 // number of bins is bounded by a small constant: alignments are powers
225 // of two that are vanishingly unlikely to be over 64 and fairly unlikely
226 // to be over 8. And multiple elements only need to be considered when
227 // filling a gap between fixed-offset fields, which doesn't happen very
228 // often. We could use a data structure within bins that optimizes for
229 // finding the best-sized match, but it would require allocating memory
230 // and copying data, so it's unlikely to be worthwhile.
233 // Start by organizing the flexible-offset fields into bins according to
234 // their alignment. We expect a small enough number of bins that we
235 // don't care about the asymptotic costs of walking this.
236 struct AlignmentQueue
{
237 /// The minimum size of anything currently in this queue.
240 /// The head of the queue. A singly-linked list. The order here should
241 /// be consistent with the earlier sort, i.e. the elements should be
242 /// monotonically descending in size and otherwise in the original order.
244 /// We remove the queue from the array as soon as this is empty.
245 OptimizedStructLayoutField
*Head
;
247 /// The alignment requirement of the queue.
250 static Field
*getNext(Field
*Cur
) {
251 return static_cast<Field
*>(Cur
->Scratch
);
254 SmallVector
<AlignmentQueue
, 8> FlexibleFieldsByAlignment
;
255 for (auto I
= FirstFlexible
; I
!= E
; ) {
257 auto Alignment
= I
->Alignment
;
259 uint64_t MinSize
= I
->Size
;
260 auto LastInQueue
= I
;
261 for (++I
; I
!= E
&& I
->Alignment
== Alignment
; ++I
) {
262 LastInQueue
->Scratch
= I
;
264 MinSize
= std::min(MinSize
, I
->Size
);
266 LastInQueue
->Scratch
= nullptr;
268 FlexibleFieldsByAlignment
.push_back({MinSize
, Head
, Alignment
});
272 // Verify that we set the queues up correctly.
273 auto checkQueues
= [&]{
274 bool FirstQueue
= true;
275 Align LastQueueAlignment
;
276 for (auto &Queue
: FlexibleFieldsByAlignment
) {
277 assert((FirstQueue
|| Queue
.Alignment
< LastQueueAlignment
) &&
278 "bins not in order of descending alignment");
279 LastQueueAlignment
= Queue
.Alignment
;
282 assert(Queue
.Head
&& "queue was empty");
283 uint64_t LastSize
= ~(uint64_t)0;
284 for (auto I
= Queue
.Head
; I
; I
= Queue
.getNext(I
)) {
285 assert(I
->Alignment
== Queue
.Alignment
&& "bad field in queue");
286 assert(I
->Size
<= LastSize
&& "queue not in descending size order");
294 /// Helper function to remove a field from a queue.
295 auto spliceFromQueue
= [&](AlignmentQueue
*Queue
, Field
*Last
, Field
*Cur
) {
296 assert(Last
? Queue
->getNext(Last
) == Cur
: Queue
->Head
== Cur
);
298 // If we're removing Cur from a non-initial position, splice it out
299 // of the linked list.
301 Last
->Scratch
= Cur
->Scratch
;
303 // If Cur was the last field in the list, we need to update MinSize.
304 // We can just use the last field's size because the list is in
305 // descending order of size.
307 Queue
->MinSize
= Last
->Size
;
309 // Otherwise, replace the head.
311 if (auto NewHead
= Queue
->getNext(Cur
))
312 Queue
->Head
= NewHead
;
314 // If we just emptied the queue, destroy its bin.
316 FlexibleFieldsByAlignment
.erase(Queue
);
320 // Do layout into a local array. Doing this in-place on Fields is
321 // not really feasible.
322 SmallVector
<Field
, 16> Layout
;
323 Layout
.reserve(Fields
.size());
325 // The offset that we're currently looking to insert at (or after).
326 uint64_t LastEnd
= 0;
328 // Helper function to splice Cur out of the given queue and add it
329 // to the layout at the given offset.
330 auto addToLayout
= [&](AlignmentQueue
*Queue
, Field
*Last
, Field
*Cur
,
331 uint64_t Offset
) -> bool {
332 assert(Offset
== alignTo(LastEnd
, Cur
->Alignment
));
334 // Splice out. This potentially invalidates Queue.
335 spliceFromQueue(Queue
, Last
, Cur
);
337 // Add Cur to the layout.
338 Layout
.push_back(*Cur
);
339 Layout
.back().Offset
= Offset
;
340 LastEnd
= Layout
.back().getEndOffset();
342 // Always return true so that we can be tail-called.
346 // Helper function to try to find a field in the given queue that'll
347 // fit starting at StartOffset but before EndOffset (if present).
348 // Note that this never fails if EndOffset is not provided.
349 auto tryAddFillerFromQueue
= [&](AlignmentQueue
*Queue
, uint64_t StartOffset
,
350 std::optional
<uint64_t> EndOffset
) -> bool {
352 assert(StartOffset
== alignTo(LastEnd
, Queue
->Alignment
));
353 assert(!EndOffset
|| StartOffset
< *EndOffset
);
355 // Figure out the maximum size that a field can be, and ignore this
356 // queue if there's nothing in it that small.
358 (EndOffset
? *EndOffset
- StartOffset
: ~(uint64_t)0);
359 if (Queue
->MinSize
> MaxViableSize
)
362 // Find the matching field. Note that this should always find
363 // something because of the MinSize check above.
364 for (Field
*Cur
= Queue
->Head
, *Last
= nullptr; true;
365 Last
= Cur
, Cur
= Queue
->getNext(Cur
)) {
366 assert(Cur
&& "didn't find a match in queue despite its MinSize");
367 if (Cur
->Size
<= MaxViableSize
)
368 return addToLayout(Queue
, Last
, Cur
, StartOffset
);
371 llvm_unreachable("didn't find a match in queue despite its MinSize");
374 // Helper function to find the "best" flexible-offset field according
375 // to the criteria described above.
376 auto tryAddBestField
= [&](std::optional
<uint64_t> BeforeOffset
) -> bool {
377 assert(!BeforeOffset
|| LastEnd
< *BeforeOffset
);
378 auto QueueB
= FlexibleFieldsByAlignment
.begin();
379 auto QueueE
= FlexibleFieldsByAlignment
.end();
381 // Start by looking for the most-aligned queue that doesn't need any
382 // leading padding after LastEnd.
383 auto FirstQueueToSearch
= QueueB
;
384 for (; FirstQueueToSearch
!= QueueE
; ++FirstQueueToSearch
) {
385 if (isAligned(FirstQueueToSearch
->Alignment
, LastEnd
))
389 uint64_t Offset
= LastEnd
;
391 // Invariant: all of the queues in [FirstQueueToSearch, QueueE)
392 // require the same initial padding offset.
394 // Search those queues in descending order of alignment for a
395 // satisfactory field.
396 for (auto Queue
= FirstQueueToSearch
; Queue
!= QueueE
; ++Queue
) {
397 if (tryAddFillerFromQueue(Queue
, Offset
, BeforeOffset
))
401 // Okay, we don't need to scan those again.
402 QueueE
= FirstQueueToSearch
;
404 // If we started from the first queue, we're done.
405 if (FirstQueueToSearch
== QueueB
)
408 // Otherwise, scan backwards to find the most-aligned queue that
409 // still has minimal leading padding after LastEnd. If that
410 // minimal padding is already at or past the end point, we're done.
411 --FirstQueueToSearch
;
412 Offset
= alignTo(LastEnd
, FirstQueueToSearch
->Alignment
);
413 if (BeforeOffset
&& Offset
>= *BeforeOffset
)
415 while (FirstQueueToSearch
!= QueueB
&&
416 Offset
== alignTo(LastEnd
, FirstQueueToSearch
[-1].Alignment
))
417 --FirstQueueToSearch
;
421 // Phase 1: fill the gaps between fixed-offset fields with the best
422 // flexible-offset field that fits.
423 for (auto I
= Fields
.begin(); I
!= FirstFlexible
; ++I
) {
424 assert(LastEnd
<= I
->Offset
);
425 while (LastEnd
!= I
->Offset
) {
426 if (!tryAddBestField(I
->Offset
))
429 Layout
.push_back(*I
);
430 LastEnd
= I
->getEndOffset();
437 // Phase 2: repeatedly add the best flexible-offset field until
439 while (!FlexibleFieldsByAlignment
.empty()) {
440 bool Success
= tryAddBestField(std::nullopt
);
441 assert(Success
&& "didn't find a field with no fixed limit?");
445 // Copy the layout back into place.
446 assert(Layout
.size() == Fields
.size());
447 memcpy(Fields
.data(), Layout
.data(),
448 Fields
.size() * sizeof(OptimizedStructLayoutField
));
451 // Make a final check that the layout is valid.
452 checkValidLayout(Fields
, LastEnd
, MaxAlign
);
455 return std::make_pair(LastEnd
, MaxAlign
);