[InstCombine] Signed saturation patterns
[llvm-core.git] / include / llvm / Support / ArrayRecycler.h
blob5256ce80c0282cea9c906912504cb071a5ade4e0
1 //==- llvm/Support/ArrayRecycler.h - Recycling of Arrays ---------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the ArrayRecycler class template which can recycle small
10 // arrays allocated from one of the allocators in Allocator.h
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_SUPPORT_ARRAYRECYCLER_H
15 #define LLVM_SUPPORT_ARRAYRECYCLER_H
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Support/Allocator.h"
19 #include "llvm/Support/MathExtras.h"
21 namespace llvm {
23 /// Recycle small arrays allocated from a BumpPtrAllocator.
24 ///
25 /// Arrays are allocated in a small number of fixed sizes. For each supported
26 /// array size, the ArrayRecycler keeps a free list of available arrays.
27 ///
28 template <class T, size_t Align = alignof(T)> class ArrayRecycler {
29 // The free list for a given array size is a simple singly linked list.
30 // We can't use iplist or Recycler here since those classes can't be copied.
31 struct FreeList {
32 FreeList *Next;
35 static_assert(Align >= alignof(FreeList), "Object underaligned");
36 static_assert(sizeof(T) >= sizeof(FreeList), "Objects are too small");
38 // Keep a free list for each array size.
39 SmallVector<FreeList*, 8> Bucket;
41 // Remove an entry from the free list in Bucket[Idx] and return it.
42 // Return NULL if no entries are available.
43 T *pop(unsigned Idx) {
44 if (Idx >= Bucket.size())
45 return nullptr;
46 FreeList *Entry = Bucket[Idx];
47 if (!Entry)
48 return nullptr;
49 __asan_unpoison_memory_region(Entry, Capacity::get(Idx).getSize());
50 Bucket[Idx] = Entry->Next;
51 __msan_allocated_memory(Entry, Capacity::get(Idx).getSize());
52 return reinterpret_cast<T*>(Entry);
55 // Add an entry to the free list at Bucket[Idx].
56 void push(unsigned Idx, T *Ptr) {
57 assert(Ptr && "Cannot recycle NULL pointer");
58 FreeList *Entry = reinterpret_cast<FreeList*>(Ptr);
59 if (Idx >= Bucket.size())
60 Bucket.resize(size_t(Idx) + 1);
61 Entry->Next = Bucket[Idx];
62 Bucket[Idx] = Entry;
63 __asan_poison_memory_region(Ptr, Capacity::get(Idx).getSize());
66 public:
67 /// The size of an allocated array is represented by a Capacity instance.
68 ///
69 /// This class is much smaller than a size_t, and it provides methods to work
70 /// with the set of legal array capacities.
71 class Capacity {
72 uint8_t Index;
73 explicit Capacity(uint8_t idx) : Index(idx) {}
75 public:
76 Capacity() : Index(0) {}
78 /// Get the capacity of an array that can hold at least N elements.
79 static Capacity get(size_t N) {
80 return Capacity(N ? Log2_64_Ceil(N) : 0);
83 /// Get the number of elements in an array with this capacity.
84 size_t getSize() const { return size_t(1u) << Index; }
86 /// Get the bucket number for this capacity.
87 unsigned getBucket() const { return Index; }
89 /// Get the next larger capacity. Large capacities grow exponentially, so
90 /// this function can be used to reallocate incrementally growing vectors
91 /// in amortized linear time.
92 Capacity getNext() const { return Capacity(Index + 1); }
95 ~ArrayRecycler() {
96 // The client should always call clear() so recycled arrays can be returned
97 // to the allocator.
98 assert(Bucket.empty() && "Non-empty ArrayRecycler deleted!");
101 /// Release all the tracked allocations to the allocator. The recycler must
102 /// be free of any tracked allocations before being deleted.
103 template<class AllocatorType>
104 void clear(AllocatorType &Allocator) {
105 for (; !Bucket.empty(); Bucket.pop_back())
106 while (T *Ptr = pop(Bucket.size() - 1))
107 Allocator.Deallocate(Ptr);
110 /// Special case for BumpPtrAllocator which has an empty Deallocate()
111 /// function.
113 /// There is no need to traverse the free lists, pulling all the objects into
114 /// cache.
115 void clear(BumpPtrAllocator&) {
116 Bucket.clear();
119 /// Allocate an array of at least the requested capacity.
121 /// Return an existing recycled array, or allocate one from Allocator if
122 /// none are available for recycling.
124 template<class AllocatorType>
125 T *allocate(Capacity Cap, AllocatorType &Allocator) {
126 // Try to recycle an existing array.
127 if (T *Ptr = pop(Cap.getBucket()))
128 return Ptr;
129 // Nope, get more memory.
130 return static_cast<T*>(Allocator.Allocate(sizeof(T)*Cap.getSize(), Align));
133 /// Deallocate an array with the specified Capacity.
135 /// Cap must be the same capacity that was given to allocate().
137 void deallocate(Capacity Cap, T *Ptr) {
138 push(Cap.getBucket(), Ptr);
142 } // end llvm namespace
144 #endif