1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // UNSUPPORTED: c++03, c++11, c++14
18 #include "test_iterators.h"
22 // types of containers we'll want to test, covering interesting iterator types
23 struct VectorContainer
{
24 template <typename
... Args
>
25 using type
= std::vector
<Args
...>;
27 static constexpr const char* Name
= "Vector";
31 template <typename
... Args
>
32 using type
= std::set
<Args
...>;
34 static constexpr const char* Name
= "Set";
37 using AllContainerTypes
= std::tuple
<VectorContainer
, SetContainer
>;
39 // set_intersection performance may depend on where matching values lie
40 enum class OverlapPosition
{
43 // performance-wise, matches at the back are identical to ones at the front
47 struct AllOverlapPositions
: EnumValuesAsTuple
<AllOverlapPositions
, OverlapPosition
, 3> {
48 static constexpr const char* Names
[] = {"None", "Front", "Interlaced"};
51 // forward_iterator wrapping which, for each increment, moves the underlying iterator forward Stride elements
52 template <typename Wrapped
>
57 using iterator_category
= std::forward_iterator_tag
;
58 using difference_type
= typename
Wrapped::difference_type
;
59 using value_type
= typename
Wrapped::value_type
;
60 using pointer
= typename
Wrapped::pointer
;
61 using reference
= typename
Wrapped::reference
;
63 StridedFwdIt(Wrapped base
, unsigned stride
) : base_(base
), stride_(stride
) { assert(stride_
!= 0); }
65 StridedFwdIt
operator++() {
66 for (unsigned i
= 0; i
< stride_
; ++i
)
70 StridedFwdIt
operator++(int) {
75 value_type
& operator*() { return *base_
; }
76 const value_type
& operator*() const { return *base_
; }
77 value_type
& operator->() { return *base_
; }
78 const value_type
& operator->() const { return *base_
; }
79 bool operator==(const StridedFwdIt
& o
) const { return base_
== o
.base_
; }
80 bool operator!=(const StridedFwdIt
& o
) const { return !operator==(o
); }
82 template <typename Wrapped
>
83 StridedFwdIt(Wrapped
, unsigned) -> StridedFwdIt
<Wrapped
>;
86 std::vector
<T
> getVectorOfRandom(size_t N
) {
88 fillValues(v
, N
, Order::Random
);
89 sortValues(v
, Order::Random
);
90 return std::vector
<T
>(v
);
93 // Realistically, data won't all be nicely contiguous in a container,
94 // we'll go through some effort to ensure that it's shuffled through memory
95 // this is especially important for containers with non-contiguous element
96 // storage, but it will affect even a std::vector, because when you copy a
97 // std::vector<std::string> the underlying data storage position for the char
98 // arrays of the copy are likely to have high locality
99 template <class Container
>
100 std::pair
<Container
, Container
> genCacheUnfriendlyData(size_t size1
, size_t size2
, OverlapPosition pos
) {
101 using ValueType
= typename
Container::value_type
;
102 auto move_into
= [](auto first
, auto last
) {
104 std::move(first
, last
, std::inserter(out
, out
.begin()));
107 const auto src_size
= pos
== OverlapPosition::None
? size1
+ size2
: std::max(size1
, size2
);
108 std::vector
<ValueType
> src
= getVectorOfRandom
<ValueType
>(src_size
);
110 if (pos
== OverlapPosition::None
) {
111 std::sort(src
.begin(), src
.end());
112 return std::make_pair(move_into(src
.begin(), src
.begin() + size1
), move_into(src
.begin() + size1
, src
.end()));
115 // All other overlap types will have to copy some part of the data, but if
116 // we copy after sorting it will likely have high locality, so we sort
117 // each copy separately
119 std::sort(src
.begin(), src
.end());
120 std::sort(copy
.begin(), copy
.end());
123 case OverlapPosition::None
:
124 // we like -Wswitch :)
127 case OverlapPosition::Front
:
128 return std::make_pair(move_into(src
.begin(), src
.begin() + size1
), move_into(copy
.begin(), copy
.begin() + size2
));
130 case OverlapPosition::Interlaced
:
131 const auto stride1
= size1
< size2
? size2
/ size1
: 1;
132 const auto stride2
= size2
< size1
? size1
/ size2
: 1;
133 return std::make_pair(move_into(StridedFwdIt(src
.begin(), stride1
), StridedFwdIt(src
.end(), stride1
)),
134 move_into(StridedFwdIt(copy
.begin(), stride2
), StridedFwdIt(copy
.end(), stride2
)));
136 std::abort(); // would be std::unreachable() if it could
137 return std::pair
<Container
, Container
>();
140 template <class ValueType
, class Container
, class Overlap
>
141 struct SetIntersection
{
142 using ContainerType
= typename
Container::template type
<Value
<ValueType
>>;
146 SetIntersection(size_t size1
, size_t size2
) : size1_(size1
), size2_(size2
) {}
148 bool skip() const noexcept
{
149 // let's save some time and skip simmetrical runs
150 return size1_
< size2_
;
153 void run(benchmark::State
& state
) const {
154 auto input
= genCacheUnfriendlyData
<ContainerType
>(size1_
, size2_
, Overlap());
155 std::vector
<Value
<ValueType
>> out(std::min(size1_
, size2_
));
157 const auto BATCH_SIZE
= std::max(size_t{512}, (2 * TestSetElements
) / (size1_
+ size2_
));
158 for (const auto& _
: state
) {
159 while (state
.KeepRunningBatch(BATCH_SIZE
)) {
160 for (unsigned i
= 0; i
< BATCH_SIZE
; ++i
) {
161 const auto& [c1
, c2
] = input
;
162 auto res
= std::set_intersection(c1
.begin(), c1
.end(), c2
.begin(), c2
.end(), out
.begin());
163 benchmark::DoNotOptimize(res
);
169 std::string
name() const {
170 return std::string("SetIntersection") + Overlap::name() + '_' + Container::Name
+ ValueType::name() + '_' +
171 std::to_string(size1_
) + '_' + std::to_string(size2_
);
177 int main(int argc
, char** argv
) { /**/
178 benchmark::Initialize(&argc
, argv
);
179 if (benchmark::ReportUnrecognizedArguments(argc
, argv
))
182 makeCartesianProductBenchmark
<SetIntersection
, AllValueTypes
, AllContainerTypes
, AllOverlapPositions
>(
183 Quantities
, Quantities
);
184 benchmark::RunSpecifiedBenchmarks();