1 /* SPDX-FileCopyrightText: 2023 Blender Authors
3 * SPDX-License-Identifier: GPL-2.0-or-later */
9 #include "BLI_generic_span.hh"
10 #include "BLI_generic_virtual_array.hh"
11 #include "BLI_index_mask.hh"
12 #include "BLI_offset_indices.hh"
13 #include "BLI_task.hh"
14 #include "BLI_virtual_array.hh"
16 namespace blender::array_utils
{
19 * Fill the destination span by copying all values from the `src` array. Threaded based on
22 void copy(const GVArray
&src
, GMutableSpan dst
, int64_t grain_size
= 4096);
24 inline void copy(const VArray
<T
> &src
, MutableSpan
<T
> dst
, const int64_t grain_size
= 4096)
26 BLI_assert(src
.size() == dst
.size());
27 threading::parallel_for(src
.index_range(), grain_size
, [&](const IndexRange range
) {
28 src
.materialize_to_uninitialized(range
, dst
);
33 * Fill the destination span by copying all values from the `src` array. Threaded based on
37 inline void copy(const Span
<T
> src
, MutableSpan
<T
> dst
, const int64_t grain_size
= 4096)
39 BLI_assert(src
.size() == dst
.size());
40 threading::parallel_for(src
.index_range(), grain_size
, [&](const IndexRange range
) {
41 dst
.slice(range
).copy_from(src
.slice(range
));
46 * Fill the destination span by copying masked values from the `src` array. Threaded based on
49 void copy(const GVArray
&src
,
50 const IndexMask
&selection
,
52 int64_t grain_size
= 4096);
55 * Fill the destination span by copying values from the `src` array. Threaded based on
59 inline void copy(const Span
<T
> src
,
60 const IndexMask
&selection
,
62 const int64_t grain_size
= 4096)
64 BLI_assert(src
.size() == dst
.size());
65 selection
.foreach_index_optimized
<int64_t>(GrainSize(grain_size
),
66 [&](const int64_t i
) { dst
[i
] = src
[i
]; });
70 * Fill the specified indices of the destination with the values in the source span.
72 template<typename T
, typename IndexT
>
73 inline void scatter(const Span
<T
> src
,
74 const Span
<IndexT
> indices
,
76 const int64_t grain_size
= 4096)
78 BLI_assert(indices
.size() == src
.size());
79 threading::parallel_for(indices
.index_range(), grain_size
, [&](const IndexRange range
) {
80 for (const int64_t i
: range
) {
81 dst
[indices
[i
]] = src
[i
];
87 inline void scatter(const Span
<T
> src
,
88 const IndexMask
&indices
,
90 const int64_t grain_size
= 4096)
92 BLI_assert(indices
.size() == src
.size());
93 BLI_assert(indices
.min_array_size() <= dst
.size());
94 indices
.foreach_index_optimized
<int64_t>(
95 GrainSize(grain_size
),
96 [&](const int64_t index
, const int64_t pos
) { dst
[index
] = src
[pos
]; });
100 * Fill the destination span by gathering indexed values from the `src` array.
102 void gather(const GVArray
&src
,
103 const IndexMask
&indices
,
105 int64_t grain_size
= 4096);
108 * Fill the destination span by gathering indexed values from the `src` array.
110 void gather(GSpan src
, const IndexMask
&indices
, GMutableSpan dst
, int64_t grain_size
= 4096);
113 * Fill the destination span by gathering indexed values from the `src` array.
116 inline void gather(const VArray
<T
> &src
,
117 const IndexMask
&indices
,
119 const int64_t grain_size
= 4096)
121 BLI_assert(indices
.size() == dst
.size());
122 threading::parallel_for(indices
.index_range(), grain_size
, [&](const IndexRange range
) {
123 src
.materialize_compressed_to_uninitialized(indices
.slice(range
), dst
.slice(range
));
128 * Fill the destination span by gathering indexed values from the `src` array.
130 template<typename T
, typename IndexT
>
131 inline void gather(const Span
<T
> src
,
132 const IndexMask
&indices
,
134 const int64_t grain_size
= 4096)
136 BLI_assert(indices
.size() == dst
.size());
137 indices
.foreach_segment(GrainSize(grain_size
),
138 [&](const IndexMaskSegment segment
, const int64_t segment_pos
) {
139 for (const int64_t i
: segment
.index_range()) {
140 dst
[segment_pos
+ i
] = src
[segment
[i
]];
146 * Fill the destination span by gathering indexed values from the `src` array.
148 template<typename T
, typename IndexT
>
149 inline void gather(const Span
<T
> src
,
150 const Span
<IndexT
> indices
,
152 const int64_t grain_size
= 4096)
154 BLI_assert(indices
.size() == dst
.size());
155 threading::parallel_for(indices
.index_range(), grain_size
, [&](const IndexRange range
) {
156 for (const int64_t i
: range
) {
157 dst
[i
] = src
[indices
[i
]];
163 * Fill the destination span by gathering indexed values from the `src` array.
165 template<typename T
, typename IndexT
>
166 inline void gather(const VArray
<T
> &src
,
167 const Span
<IndexT
> indices
,
169 const int64_t grain_size
= 4096)
171 BLI_assert(indices
.size() == dst
.size());
172 devirtualize_varray(src
, [&](const auto &src
) {
173 threading::parallel_for(indices
.index_range(), grain_size
, [&](const IndexRange range
) {
174 for (const int64_t i
: range
) {
175 dst
[i
] = src
[indices
[i
]];
182 inline void gather_group_to_group(const OffsetIndices
<int> src_offsets
,
183 const OffsetIndices
<int> dst_offsets
,
184 const IndexMask
&selection
,
188 selection
.foreach_index(GrainSize(512), [&](const int64_t src_i
, const int64_t dst_i
) {
189 dst
.slice(dst_offsets
[dst_i
]).copy_from(src
.slice(src_offsets
[src_i
]));
194 inline void gather_group_to_group(const OffsetIndices
<int> src_offsets
,
195 const OffsetIndices
<int> dst_offsets
,
196 const IndexMask
&selection
,
200 selection
.foreach_index(GrainSize(512), [&](const int64_t src_i
, const int64_t dst_i
) {
201 src
.materialize_compressed(src_offsets
[src_i
], dst
.slice(dst_offsets
[dst_i
]));
206 inline void gather_to_groups(const OffsetIndices
<int> dst_offsets
,
207 const IndexMask
&src_selection
,
211 src_selection
.foreach_index(GrainSize(1024), [&](const int src_i
, const int dst_i
) {
212 dst
.slice(dst_offsets
[dst_i
]).fill(src
[src_i
]);
217 * Copy the \a src data from the groups defined by \a src_offsets to the groups in \a dst defined
218 * by \a dst_offsets. Groups to use are masked by \a selection, and it is assumed that the
219 * corresponding groups have the same size.
221 void copy_group_to_group(OffsetIndices
<int> src_offsets
,
222 OffsetIndices
<int> dst_offsets
,
223 const IndexMask
&selection
,
227 void copy_group_to_group(OffsetIndices
<int> src_offsets
,
228 OffsetIndices
<int> dst_offsets
,
229 const IndexMask
&selection
,
233 copy_group_to_group(src_offsets
, dst_offsets
, selection
, GSpan(src
), GMutableSpan(dst
));
237 * Count the number of occurrences of each index.
238 * \param indices: The indices to count.
239 * \param counts: The number of occurrences of each index. Typically initialized to zero.
240 * Must be large enough to contain the maximum index.
242 * \note The memory referenced by the two spans must not overlap.
244 void count_indices(Span
<int> indices
, MutableSpan
<int> counts
);
246 void invert_booleans(MutableSpan
<bool> span
);
247 void invert_booleans(MutableSpan
<bool> span
, const IndexMask
&mask
);
249 int64_t count_booleans(const VArray
<bool> &varray
);
250 int64_t count_booleans(const VArray
<bool> &varray
, const IndexMask
&mask
);
252 enum class BooleanMix
{
258 BooleanMix
booleans_mix_calc(const VArray
<bool> &varray
, IndexRange range_to_check
);
259 inline BooleanMix
booleans_mix_calc(const VArray
<bool> &varray
)
261 return booleans_mix_calc(varray
, varray
.index_range());
265 * Finds all the index ranges for which consecutive values in \a span equal \a value.
267 template<typename T
> inline Vector
<IndexRange
> find_all_ranges(const Span
<T
> span
, const T
&value
)
269 if (span
.is_empty()) {
270 return Vector
<IndexRange
>();
272 Vector
<IndexRange
> ranges
;
273 int64_t length
= (span
.first() == value
) ? 1 : 0;
274 for (const int64_t i
: span
.index_range().drop_front(1)) {
275 if (span
[i
- 1] == value
&& span
[i
] != value
) {
276 ranges
.append(IndexRange::from_end_size(i
, length
));
279 else if (span
[i
] == value
) {
284 ranges
.append(IndexRange::from_end_size(span
.size(), length
));
290 * Fill the span with increasing indices: 0, 1, 2, ...
291 * Optionally, the start value can be provided.
293 template<typename T
> inline void fill_index_range(MutableSpan
<T
> span
, const T start
= 0)
295 std::iota(span
.begin(), span
.end(), start
);
299 bool indexed_data_equal(const Span
<T
> all_values
, const Span
<int> indices
, const Span
<T
> values
)
301 for (const int i
: indices
.index_range()) {
302 if (all_values
[indices
[i
]] != values
[i
]) {
309 bool indices_are_range(Span
<int> indices
, IndexRange range
);
311 } // namespace blender::array_utils