2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
28 bool index_export_to_int(const mpint_t
*t
, int_default_t
*result
)
31 return cat(mpint_export_to_
,int_default_t
)(t
, result
, &sink
);
34 static inline mpint_t
*index_alloc_mp_(unsigned long bits argument_position
)
37 result
= mem_alloc_compressed_mayfail(mpint_t
*, sizeof(mpint_t
), NULL
);
38 mpint_alloc_mayfail(result
, bits
, NULL
);
42 void index_free_mp_(array_index_t idx argument_position
)
44 mpint_t
*mp
= index_get_mp_(idx pass_position
);
46 mem_free_compressed(mp
);
49 attr_noinline array_index_t
index_copy_mp_(array_index_t idx argument_position
)
53 mp
= mem_alloc_compressed_mayfail(mpint_t
*, sizeof(mpint_t
), NULL
);
54 mpint_alloc_copy_mayfail(mp
, index_get_mp_(idx pass_position
), NULL
);
55 index_set_mp_(&result
, mp pass_position
);
59 attr_noinline
bool index_ge_index_mp_(array_index_t idx1
, array_index_t idx2 argument_position
)
61 ajla_flat_option_t result
;
62 mpint_less_equal(index_get_mp_(idx2 pass_position
), index_get_mp_(idx1 pass_position
), &result
, NULL
);
66 attr_noinline
bool index_eq_index_mp_(array_index_t idx1
, array_index_t idx2 argument_position
)
68 ajla_flat_option_t result
;
69 mpint_equal(index_get_mp_(idx2 pass_position
), index_get_mp_(idx1 pass_position
), &result
, NULL
);
73 attr_noinline array_index_t
index_op_mp_(array_index_t idx2
, array_index_t idx3
, unsigned flags
, ajla_error_t
*err argument_position
)
76 mpint_t
*mp2_p
, *mp3_p
;
80 if (!index_is_mp_(idx2 pass_position
)) {
81 if (flags
& INDEX_OP_MP_ARG3_
) {
84 mp2_p
= mem_alloc_compressed_mayfail(mpint_t
*, sizeof(mpint_t
), NULL
);
86 index_init_from_int(mp2_p
, index_to_int_(idx2 pass_position
));
88 mp2_p
= index_get_mp_(idx2 pass_position
);
90 if (!index_is_mp_(idx3 pass_position
)) {
91 index_init_from_int(&mp3
, index_to_int_(idx3 pass_position
));
94 mp3_p
= index_get_mp_(idx3 pass_position
);
96 if (flags
& INDEX_OP_MP_ARG3_
) {
97 mp
= index_alloc_mp_(maximum(mpint_estimate_bits(mp2_p
), mpint_estimate_bits(mp3_p
)) pass_position
);
98 if (!(flags
& INDEX_OP_MP_SUB_
))
99 succeeded
= mpint_add(mp2_p
, mp3_p
, mp
, err
);
101 succeeded
= mpint_subtract(mp2_p
, mp3_p
, mp
, err
);
102 index_set_mp_(&result
, mp pass_position
);
103 index_alloc_leak_(&result pass_position
);
105 if (!(flags
& INDEX_OP_MP_SUB_
))
106 succeeded
= mpint_add(mp2_p
, mp3_p
, mp2_p
, err
);
108 succeeded
= mpint_subtract(mp2_p
, mp3_p
, mp2_p
, err
);
109 index_set_mp_(&result
, mp2_p pass_position
);
110 index_free_leak_(&idx2 pass_position
);
111 index_alloc_leak_(&result pass_position
);
117 if (unlikely(!succeeded
)) {
118 index_free_(&result pass_file_line
);
119 return index_invalid();
121 #ifndef UNUSUAL_MPINT_ARRAY_INDICES
122 if (flags
& INDEX_OP_MP_SUB_
) {
124 if (index_export_to_int(index_get_mp_(result pass_position
), &id
)) {
125 index_free_mp_(result pass_position
);
126 ajla_assert_lo(id
>= 0, (caller_file_line
, "index_op_mp_: the result is negative: %"PRIdMAX
"", (intmax_t)id
));
127 index_set_value_(&result
, (uint_default_t
)id
);
135 static int_default_t
array_align_alloc(int_default_t len
)
137 #if defined(__IBMC__) && INT_DEFAULT_BITS > 32
138 /* compiler bug - causes internal error */
141 uint_default_t val
= (uint_default_t
)len
;
142 #if defined(HAVE_BUILTIN_CLZ)
143 if (is_power_of_2(sizeof(int_default_t
)) && sizeof(int_default_t
) == sizeof(unsigned)) {
144 val
= (uint_default_t
)1 << ((unsigned)(sizeof(uint_default_t
) * 8 - 1) CLZ_BSR_OP
__builtin_clz(val
+ val
- 1));
145 if (unlikely((int_default_t
)val
< 0))
146 return likely(!len
) ? 0 : val
- 1;
149 } else if (is_power_of_2(sizeof(int_default_t
)) && sizeof(int_default_t
) == sizeof(unsigned long long)) {
150 val
= (uint_default_t
)1 << ((unsigned)(sizeof(uint_default_t
) * 8 - 1) CLZ_BSR_OP
__builtin_clzll(val
+ val
- 1));
151 if (unlikely((int_default_t
)val
< 0))
152 return likely(!len
) ? 0 : val
- 1;
163 val
|= val
>> 15 >> 1;
164 val
|= val
>> 15 >> 15 >> 2;
165 val
|= val
>> 15 >> 15 >> 15 >> 15 >> 4;
167 if (unlikely((int_default_t
)val
< 0))
175 bool attr_fastcall
array_read(struct data
*array
, array_index_t idx
, pointer_t
**result_ptr
, unsigned char **result_flat
, const struct type
**flat_type
, int_default_t
*run
)
178 switch (da_tag(array
)) {
179 case DATA_TAG_array_flat
: {
180 if (unlikely(index_ge_int(idx
, da(array
,array_flat
)->n_used_entries
)))
183 *run
= da(array
,array_flat
)->n_used_entries
- index_to_int(idx
);
185 *flat_type
= da(array
,array_flat
)->type
;
186 *result_flat
= da_array_flat(array
) + index_to_int(idx
) * (size_t)(*flat_type
)->size
;
189 case DATA_TAG_array_slice
: {
190 if (unlikely(index_ge_int(idx
, da(array
,array_slice
)->n_entries
)))
193 *run
= da(array
,array_slice
)->n_entries
- index_to_int(idx
);
195 *flat_type
= da(array
,array_slice
)->type
;
196 *result_flat
= da(array
,array_slice
)->flat_data_minus_data_array_offset
+ data_array_offset
+ index_to_int(idx
) * (size_t)(*flat_type
)->size
;
199 case DATA_TAG_array_pointers
: {
200 if (unlikely(index_ge_int(idx
, da(array
,array_pointers
)->n_used_entries
)))
203 *run
= da(array
,array_pointers
)->n_used_entries
- index_to_int(idx
);
204 *result_ptr
= &da(array
,array_pointers
)->pointer
[index_to_int(idx
)];
208 case DATA_TAG_array_same
: {
209 if (unlikely(index_ge_index(idx
, da(array
,array_same
)->n_entries
)))
212 array_index_t idx_diff
;
213 index_sub3(&idx_diff
, da(array
,array_same
)->n_entries
, idx
);
214 if (likely(index_is_int(idx_diff
))) {
215 *run
= -index_to_int(idx_diff
);
217 *run
= sign_bit(int_default_t
);
219 index_free(&idx_diff
);
221 *result_ptr
= &da(array
,array_same
)->pointer
;
225 case DATA_TAG_array_btree
: {
226 btree_entries_t bt_pos
;
227 pointer_t
*new_array_ptr
;
228 struct data
*new_array
;
229 binary_search(btree_entries_t
, da(array
,array_btree
)->n_used_btree_entries
, bt_pos
, false, index_ge_index(idx
, da(array
,array_btree
)->btree
[bt_pos
].end_index
), break);
230 if (unlikely(bt_pos
== da(array
,array_btree
)->n_used_btree_entries
))
233 struct btree_level
*bt_prev
= &da(array
,array_btree
)->btree
[bt_pos
- 1];
234 index_sub(&idx
, bt_prev
->end_index
);
236 new_array_ptr
= &da(array
,array_btree
)->btree
[bt_pos
].node
;
237 new_array
= pointer_get_data(*new_array_ptr
);
238 da_array_assert_son(array
, new_array
);
243 internal(file_line
, "array_read: invalid array tag %u", da_tag(array
));
255 struct walk_context
{
258 struct data
*upper_level
;
259 btree_entries_t upper_level_pos
;
263 static struct data
*array_clone_flat(pointer_t
*ptr
, int_default_t n_allocated
, ajla_error_t
*err
)
265 struct data
*orig
, *clone
;
267 orig
= pointer_get_data(*ptr
);
269 ajla_assert(n_allocated
>= da(orig
,array_flat
)->n_used_entries
, (file_line
, "array_clone_flat: trimming btree from %"PRIdMAX
" to %"PRIdMAX
"", (intmax_t)da(orig
,array_flat
)->n_used_entries
, (intmax_t)n_allocated
));
271 clone
= data_alloc_array_flat_mayfail(da(orig
,array_flat
)->type
, n_allocated
, da(orig
,array_flat
)->n_used_entries
, false, err pass_file_line
);
272 if (unlikely(!clone
))
275 memcpy(da_array_flat(clone
), da_array_flat(orig
), da(orig
,array_flat
)->n_used_entries
* da_array_flat_element_size(orig
));
277 pointer_dereference(*ptr
);
278 *ptr
= pointer_data(clone
);
282 static struct data
*array_clone_pointers(pointer_t
*ptr
, int_default_t n_allocated
, ajla_error_t
*err
)
284 struct data
*orig
, *clone
;
285 int_default_t i
, n_used
;
287 orig
= pointer_get_data(*ptr
);
289 ajla_assert(n_allocated
>= da(orig
,array_pointers
)->n_used_entries
, (file_line
, "array_clone_pointers: trimming btree from %"PRIdMAX
" to %"PRIdMAX
"", (intmax_t)da(orig
,array_pointers
)->n_used_entries
, (intmax_t)n_allocated
));
291 clone
= data_alloc_array_pointers_mayfail(n_allocated
, da(orig
,array_pointers
)->n_used_entries
, err pass_file_line
);
292 if (unlikely(!clone
))
295 n_used
= da(clone
,array_pointers
)->n_used_entries
;
296 for (i
= 0; i
< n_used
; i
++)
297 da(clone
,array_pointers
)->pointer
[i
] = pointer_reference(&da(orig
,array_pointers
)->pointer
[i
]);
299 pointer_dereference(*ptr
);
300 *ptr
= pointer_data(clone
);
304 static struct data
*array_clone_btree(pointer_t
*ptr
, btree_entries_t n_allocated
, ajla_error_t
*err
)
306 struct data
*orig
, *clone
;
307 btree_entries_t i
, n
;
309 orig
= pointer_get_data(*ptr
);
311 ajla_assert(n_allocated
>= da(orig
,array_btree
)->n_used_btree_entries
, (file_line
, "array_clone_btree: trimming btree from %"PRIuMAX
" to %"PRIuMAX
"", (uintmax_t)da(orig
,array_btree
)->n_used_btree_entries
, (uintmax_t)n_allocated
));
313 clone
= data_alloc_flexible(array_btree
, btree
, n_allocated
, err
);
314 if (unlikely(!clone
))
316 n
= da(clone
,array_btree
)->n_used_btree_entries
= da(orig
,array_btree
)->n_used_btree_entries
;
317 da(clone
,array_btree
)->n_allocated_btree_entries
= n_allocated
;
318 da(clone
,array_btree
)->depth
= da(orig
,array_btree
)->depth
;
319 for (i
= 0; i
< n
; i
++) {
320 index_copy(&da(clone
,array_btree
)->btree
[i
].end_index
, da(orig
,array_btree
)->btree
[i
].end_index
);
321 pointer_reference_owned(da(clone
,array_btree
)->btree
[i
].node
= da(orig
,array_btree
)->btree
[i
].node
);
324 pointer_dereference(*ptr
);
325 *ptr
= pointer_data(clone
);
329 struct data
*array_clone(pointer_t
*ptr
, ajla_error_t
*err
)
331 struct data
*orig
, *clone
;
333 orig
= pointer_get_data(*ptr
);
334 switch (da_tag(orig
)) {
335 case DATA_TAG_array_flat
: {
336 return array_clone_flat(ptr
, da(orig
,array_flat
)->n_allocated_entries
, err
);
338 case DATA_TAG_array_slice
: {
339 clone
= data_alloc_array_flat_mayfail(da(orig
,array_slice
)->type
, da(orig
,array_slice
)->n_entries
, da(orig
,array_slice
)->n_entries
, false, err pass_file_line
);
340 if (unlikely(!clone
))
342 memcpy(da_array_flat(clone
), da(orig
,array_slice
)->flat_data_minus_data_array_offset
+ data_array_offset
, da(orig
,array_slice
)->n_entries
* da_array_flat_element_size(clone
));
345 case DATA_TAG_array_pointers
: {
346 return array_clone_pointers(ptr
, da(orig
,array_pointers
)->n_allocated_entries
, err
);
348 case DATA_TAG_array_same
: {
349 clone
= data_alloc(array_same
, err
);
350 if (unlikely(!clone
))
352 index_copy(&da(clone
,array_same
)->n_entries
, da(orig
,array_same
)->n_entries
);
353 da(clone
,array_same
)->pointer
= pointer_reference(&da(orig
,array_same
)->pointer
);
356 case DATA_TAG_array_btree
: {
357 return array_clone_btree(ptr
, da(orig
,array_btree
)->n_allocated_btree_entries
, err
);
360 internal(file_line
, "array_clone: invalid array tag %u", da_tag(orig
));
362 pointer_dereference(*ptr
);
363 *ptr
= pointer_data(clone
);
367 static struct data
*get_writable(pointer_t
*ptr
, ajla_error_t
*err
)
369 struct data
*array
= pointer_get_data(*ptr
);
370 if (unlikely(!data_is_writable(array
)))
371 return array_clone(ptr
, err
);
383 static void copy_btree_nodes_(struct btree_level
*target
, const struct btree_level
*source
, btree_entries_t n
, enum copy_t cp
, array_index_t idx_shift argument_position
)
386 for (i
= 0; i
< n
; i
++) {
389 index_copy_(&target
->end_index
, source
->end_index pass_position
);
392 index_add3_(&target
->end_index
, source
->end_index
, idx_shift
, NULL pass_position
);
395 index_sub3_(&target
->end_index
, source
->end_index
, idx_shift pass_position
);
398 target
->end_index
= source
->end_index
;
399 index_sub(&target
->end_index
, idx_shift
);
402 target
->node
= source
->node
;
407 #define copy_btree_nodes(target, source, n, cp, idx_shift) copy_btree_nodes_(target, source, n, cp, idx_shift pass_file_line)
409 static void free_indices_range(struct btree_level
*l
, btree_entries_t n
)
412 index_free(&l
++->end_index
);
415 static void free_btree_indices(struct data
*array
)
417 free_indices_range(da(array
,array_btree
)->btree
, da(array
,array_btree
)->n_used_btree_entries
);
420 static bool split_btree_node(struct data
*array
, struct data
**left
, array_index_t
*left_idx
, struct data
**right
, array_index_t
*right_idx
, ajla_error_t
*err
)
422 struct data
*r1
, *r2
;
423 btree_entries_t boundary
= (da(array
,array_btree
)->n_used_btree_entries
+ 1) >> 1;
425 index_copy(left_idx
, da(array
,array_btree
)->btree
[boundary
- 1].end_index
);
426 index_copy(right_idx
, da(array
,array_btree
)->btree
[da(array
,array_btree
)->n_used_btree_entries
- 1].end_index
);
428 r1
= data_alloc_flexible(array_btree
, btree
, BTREE_MAX_SIZE
, err
);
431 da(r1
,array_btree
)->n_allocated_btree_entries
= BTREE_MAX_SIZE
;
432 da(r1
,array_btree
)->n_used_btree_entries
= boundary
;
433 da(r1
,array_btree
)->depth
= da(array
,array_btree
)->depth
;
434 copy_btree_nodes(da(r1
,array_btree
)->btree
, da(array
,array_btree
)->btree
, boundary
, copy
, *left_idx
);
435 r2
= data_alloc_flexible(array_btree
, btree
, BTREE_MAX_SIZE
, err
);
438 da(r2
,array_btree
)->n_allocated_btree_entries
= BTREE_MAX_SIZE
;
439 da(r2
,array_btree
)->n_used_btree_entries
= da(array
,array_btree
)->n_used_btree_entries
- boundary
;
440 da(r2
,array_btree
)->depth
= da(array
,array_btree
)->depth
;
442 copy_btree_nodes(da(r2
,array_btree
)->btree
, da(array
,array_btree
)->btree
+ boundary
, da(r2
,array_btree
)->n_used_btree_entries
, copy_sub
, *left_idx
);
450 free_btree_indices(r1
);
453 *left
= array
; /* avoid warning */
454 *right
= array
; /* avoid warning */
455 index_free(right_idx
);
456 index_free(left_idx
);
460 static struct btree_level
*expand_parent(struct walk_context
*w
, btree_entries_t n
, array_index_t
*prev_idx
, ajla_error_t
*err
)
462 struct btree_level
*ret
;
463 if (unlikely(!w
->upper_level
)) {
465 index_from_int(prev_idx
, 0);
466 w
->upper_level
= root
= data_alloc_flexible(array_btree
, btree
, n
+ BTREE_MAX_NODE_EXPAND
, err
);
467 if (unlikely(!root
)) {
468 index_free(prev_idx
);
471 da(root
,array_btree
)->n_allocated_btree_entries
= n
+ BTREE_MAX_NODE_EXPAND
;
472 da(root
,array_btree
)->n_used_btree_entries
= n
;
473 da(root
,array_btree
)->depth
= da_array_depth(pointer_get_data(*w
->root
)) + 1;
474 *w
->root
= pointer_data(root
);
475 ret
= da(root
,array_btree
)->btree
;
477 ajla_assert_lo(da(w
->upper_level
,array_btree
)->n_used_btree_entries
+ n
- 1 <= da(w
->upper_level
,array_btree
)->n_allocated_btree_entries
, (file_line
, "expand_parent: parent node too small: %"PRIuMAX
", %"PRIuMAX
", %"PRIuMAX
"", (uintmax_t)da(w
->upper_level
,array_btree
)->n_used_btree_entries
, (uintmax_t)n
, (uintmax_t)da(w
->upper_level
,array_btree
)->n_allocated_btree_entries
));
478 if (!w
->upper_level_pos
) {
479 index_from_int(prev_idx
, 0);
481 index_copy(prev_idx
, da(w
->upper_level
,array_btree
)->btree
[w
->upper_level_pos
- 1].end_index
);
483 index_free(&da(w
->upper_level
,array_btree
)->btree
[w
->upper_level_pos
].end_index
);
484 memmove(da(w
->upper_level
,array_btree
)->btree
+ w
->upper_level_pos
+ n
, da(w
->upper_level
,array_btree
)->btree
+ w
->upper_level_pos
+ 1, (da(w
->upper_level
,array_btree
)->n_used_btree_entries
- w
->upper_level_pos
- 1) * sizeof(struct btree_level
));
485 da(w
->upper_level
,array_btree
)->n_used_btree_entries
+= n
- 1;
486 ret
= da(w
->upper_level
,array_btree
)->btree
+ w
->upper_level_pos
;
489 (void)memset(ret
, 0x01, n
* sizeof(struct btree_level
));
494 static attr_noinline
struct data
*expand_btree_node(struct walk_context
*w
, ajla_error_t
*err
)
496 struct data
*array
= pointer_get_data(*w
->ptr
);
497 btree_entries_t old_size
= da(array
,array_btree
)->n_allocated_btree_entries
;
499 if (old_size
<= BTREE_MAX_SIZE
- BTREE_MAX_NODE_EXPAND
) {
500 size_t new_size
= (size_t)old_size
* 2;
501 if (unlikely(new_size
< da(array
,array_btree
)->n_used_btree_entries
+ (size_t)BTREE_MAX_NODE_EXPAND
))
502 new_size
= da(array
,array_btree
)->n_used_btree_entries
+ (size_t)BTREE_MAX_NODE_EXPAND
;
503 if (unlikely(new_size
> BTREE_MAX_SIZE
))
504 new_size
= BTREE_MAX_SIZE
;
505 array
= array_clone_btree(w
->ptr
, new_size
, err
);
506 if (unlikely(!array
))
509 struct data
*left
, *right
;
510 array_index_t prev_idx
, left_idx
, right_idx
;
511 struct btree_level
*split
;
513 if (unlikely(!split_btree_node(array
, &left
, &left_idx
, &right
, &right_idx
, err
)))
515 split
= expand_parent(w
, 2, &prev_idx
, err
);
516 if (unlikely(!split
)) {
517 index_free(&left_idx
);
518 index_free(&right_idx
);
519 free_btree_indices(left
);
520 free_btree_indices(right
);
526 free_btree_indices(array
);
529 split
[0].node
= pointer_data(left
);
530 index_add3(&split
[0].end_index
, prev_idx
, left_idx
);
531 split
[1].node
= pointer_data(right
);
532 index_add3(&split
[1].end_index
, prev_idx
, right_idx
);
533 if (!index_ge_index(w
->idx
, left_idx
)) {
536 index_sub(&w
->idx
, left_idx
);
539 index_free(&left_idx
);
540 index_free(&right_idx
);
541 index_free(&prev_idx
);
547 static attr_noinline
struct data
*rebalance_btree_nodes(struct walk_context
*w
, ajla_error_t
*err
)
549 struct data
*left
, *right
;
550 struct data
*upper
= w
->upper_level
;
551 bool merge_previous
= !!w
->upper_level_pos
;
552 btree_entries_t pos
= w
->upper_level_pos
- (int)merge_previous
;
554 left
= get_writable(&da(upper
,array_btree
)->btree
[pos
].node
, err
);
557 right
= get_writable(&da(upper
,array_btree
)->btree
[pos
+ 1].node
, err
);
558 if (unlikely(!right
))
560 if (da(left
,array_btree
)->n_used_btree_entries
+ da(right
,array_btree
)->n_used_btree_entries
<= BTREE_MAX_SIZE
- BTREE_MAX_NODE_EXPAND
) {
561 btree_entries_t boundary
;
562 boundary
= da(left
,array_btree
)->n_used_btree_entries
;
563 if (da(left
,array_btree
)->n_allocated_btree_entries
< boundary
+ da(right
,array_btree
)->n_used_btree_entries
+ BTREE_MAX_NODE_EXPAND
) {
564 left
= array_clone_btree(&da(upper
,array_btree
)->btree
[pos
].node
, BTREE_MAX_SIZE
, err
);
568 if (merge_previous
) {
569 index_add(&w
->idx
, da(left
,array_btree
)->btree
[boundary
- 1].end_index
);
571 copy_btree_nodes(da(left
,array_btree
)->btree
+ boundary
, da(right
,array_btree
)->btree
, da(right
,array_btree
)->n_used_btree_entries
, copy_add
, da(left
,array_btree
)->btree
[boundary
- 1].end_index
);
572 da(left
,array_btree
)->n_used_btree_entries
+= da(right
,array_btree
)->n_used_btree_entries
;
573 free_btree_indices(right
);
575 index_free(&da(upper
,array_btree
)->btree
[pos
].end_index
);
576 da(upper
,array_btree
)->btree
[pos
].end_index
= da(upper
,array_btree
)->btree
[pos
+ 1].end_index
;
577 memmove(da(upper
,array_btree
)->btree
+ pos
+ 1, da(upper
,array_btree
)->btree
+ pos
+ 2, (da(upper
,array_btree
)->n_used_btree_entries
- pos
- 2) * sizeof(struct btree_level
));
578 da(upper
,array_btree
)->n_used_btree_entries
--;
579 if (unlikely(da(upper
,array_btree
)->n_used_btree_entries
== 1)) {
580 *w
->root
= da(upper
,array_btree
)->btree
[0].node
;
581 free_btree_indices(upper
);
587 array_index_t split_index
;
588 btree_entries_t half
= (da(left
,array_btree
)->n_used_btree_entries
+ da(right
,array_btree
)->n_used_btree_entries
+ 1) >> 1;
589 if (da(left
,array_btree
)->n_allocated_btree_entries
< half
+ BTREE_MAX_NODE_EXPAND
) {
590 left
= array_clone_btree(&da(upper
,array_btree
)->btree
[pos
].node
, BTREE_MAX_SIZE
, err
);
594 if (da(right
,array_btree
)->n_allocated_btree_entries
< half
+ BTREE_MAX_NODE_EXPAND
) {
595 right
= array_clone_btree(&da(upper
,array_btree
)->btree
[pos
+ 1].node
, BTREE_MAX_SIZE
, err
);
596 if (unlikely(!right
))
599 if (da(left
,array_btree
)->n_used_btree_entries
< half
) {
600 array_index_t sub_idx
;
601 btree_entries_t diff
= half
- da(left
,array_btree
)->n_used_btree_entries
;
602 if (!merge_previous
) {
605 if (!index_ge_index(w
->idx
, da(right
,array_btree
)->btree
[diff
- 1].end_index
)) {
606 index_add(&w
->idx
, da(left
,array_btree
)->btree
[da(left
,array_btree
)->n_used_btree_entries
- 1].end_index
);
609 index_sub(&w
->idx
, da(right
,array_btree
)->btree
[diff
- 1].end_index
);
613 copy_btree_nodes(da(left
,array_btree
)->btree
+ da(left
,array_btree
)->n_used_btree_entries
, da(right
,array_btree
)->btree
, diff
, copy_add
, da(left
,array_btree
)->btree
[da(left
,array_btree
)->n_used_btree_entries
- 1].end_index
);
614 index_copy(&sub_idx
, da(right
,array_btree
)->btree
[diff
- 1].end_index
);
615 free_indices_range(da(right
,array_btree
)->btree
, diff
);
616 copy_btree_nodes(da(right
,array_btree
)->btree
, da(right
,array_btree
)->btree
+ diff
, da(right
,array_btree
)->n_used_btree_entries
- diff
, move_sub
, sub_idx
);
617 index_free(&sub_idx
);
618 da(left
,array_btree
)->n_used_btree_entries
= half
;
619 da(right
,array_btree
)->n_used_btree_entries
-= diff
;
621 btree_entries_t diff
= da(left
,array_btree
)->n_used_btree_entries
- half
;
622 array_index_t idx_diff
;
623 struct btree_level
*l
;
625 l
= mem_alloc_array_mayfail(mem_alloc_mayfail
, struct btree_level
*, 0, 0, da(right
,array_btree
)->n_used_btree_entries
+ diff
, sizeof(struct btree_level
), err
);
629 index_sub3(&idx_diff
, da(left
,array_btree
)->btree
[da(left
,array_btree
)->n_used_btree_entries
- 1].end_index
, da(left
,array_btree
)->btree
[half
- 1].end_index
);
630 copy_btree_nodes(l
, da(left
,array_btree
)->btree
+ half
, diff
, copy_sub
, da(left
,array_btree
)->btree
[half
- 1].end_index
);
631 copy_btree_nodes(l
+ diff
, da(right
,array_btree
)->btree
, da(right
,array_btree
)->n_used_btree_entries
, copy_add
, idx_diff
);
632 free_btree_indices(right
);
633 memcpy(da(right
,array_btree
)->btree
, l
, (da(right
,array_btree
)->n_used_btree_entries
+ diff
) * sizeof(struct btree_level
));
635 index_free(&idx_diff
);
636 free_indices_range(da(left
,array_btree
)->btree
+ half
, da(left
,array_btree
)->n_used_btree_entries
- half
);
637 da(left
,array_btree
)->n_used_btree_entries
= half
;
638 da(right
,array_btree
)->n_used_btree_entries
+= diff
;
639 if (!merge_previous
) {
640 if (!index_ge_index(w
->idx
, da(left
,array_btree
)->btree
[half
- 1].end_index
)) {
643 index_sub(&w
->idx
, da(left
,array_btree
)->btree
[half
- 1].end_index
);
647 index_add(&w
->idx
, da(right
,array_btree
)->btree
[diff
- 1].end_index
);
651 index_copy(&split_index
, da(left
,array_btree
)->btree
[half
- 1].end_index
);
653 index_add(&split_index
, da(upper
,array_btree
)->btree
[pos
- 1].end_index
);
654 index_free(&da(upper
,array_btree
)->btree
[pos
].end_index
);
655 da(upper
,array_btree
)->btree
[pos
].end_index
= split_index
;
660 static void walk_init_context(struct walk_context
*w
, pointer_t
*root
, array_index_t idx
)
664 w
->upper_level
= NULL
;
665 w
->upper_level_pos
= 0;
669 static void walk_free_context(struct walk_context
*w
)
674 static bool walk_for_write(struct walk_context
*w
, ajla_error_t
*err
)
677 btree_entries_t bt_pos
;
679 array
= pointer_get_data(*w
->ptr
);
681 if (unlikely(da(array
,array_btree
)->n_allocated_btree_entries
- da(array
,array_btree
)->n_used_btree_entries
< BTREE_MAX_NODE_EXPAND
)) {
682 array
= get_writable(w
->ptr
, err
);
683 if (unlikely(!array
))
685 array
= expand_btree_node(w
, err
);
686 if (unlikely(!array
))
689 if (w
->upper_level
&& unlikely(da(array
,array_btree
)->n_used_btree_entries
< BTREE_MIN_SIZE
+ BTREE_MAX_NODE_COLLAPSE
)) {
690 array
= get_writable(w
->ptr
, err
);
691 if (unlikely(!array
))
693 array
= rebalance_btree_nodes(w
, err
);
694 if (unlikely(!array
))
698 binary_search(btree_entries_t
, da(array
,array_btree
)->n_used_btree_entries
, bt_pos
, false, index_ge_index(w
->idx
, da(array
,array_btree
)->btree
[bt_pos
].end_index
), break);
699 if (unlikely(bt_pos
== da(array
,array_btree
)->n_used_btree_entries
))
702 struct btree_level
*bt_prev
= &da(array
,array_btree
)->btree
[bt_pos
- 1];
703 index_sub(&w
->idx
, bt_prev
->end_index
);
705 w
->upper_level
= array
;
706 w
->upper_level_pos
= bt_pos
;
707 w
->ptr
= &da(array
,array_btree
)->btree
[bt_pos
].node
;
708 da_array_assert_son(w
->upper_level
, pointer_get_data(*w
->ptr
));
712 fatal_mayfail(error_ajla(EC_SYNC
, AJLA_ERROR_INDEX_OUT_OF_RANGE
), err
, "index out of range");
716 static struct data
*alloc_one_ptr(pointer_t target
, ajla_error_t
*err
)
718 struct data
*pointer
= data_alloc_array_pointers_mayfail(1, 1, err pass_file_line
);
719 if (unlikely(!pointer
))
721 da(pointer
,array_pointers
)->pointer
[0] = target
;
725 static bool leaf_prepend_ptr(struct walk_context
*w
, array_index_t existing_size
, pointer_t ptr
, pointer_t
**result_ptr
, ajla_error_t
*err
)
727 struct btree_level
*split
;
728 array_index_t prev_idx
;
729 pointer_t
*sibling_pointer
;
730 struct data
*sibling
;
731 pointer_t orig
= *w
->ptr
;
733 if (unlikely(!w
->upper_level
) || unlikely(!w
->upper_level_pos
))
736 sibling_pointer
= &da(w
->upper_level
,array_btree
)->btree
[w
->upper_level_pos
- 1].node
;
737 sibling
= pointer_get_data(*sibling_pointer
);
738 if (da_tag(sibling
) == DATA_TAG_array_pointers
) {
739 if (da(sibling
,array_pointers
)->n_used_entries
== da(sibling
,array_pointers
)->n_allocated_entries
) {
740 uint_default_t new_size
;
741 if (unlikely(da(sibling
,array_pointers
)->n_allocated_entries
>= SCALAR_SPLIT_SIZE
))
743 new_size
= (uint_default_t
)da(sibling
,array_pointers
)->n_allocated_entries
* 2;
744 if (new_size
> SCALAR_SPLIT_SIZE
)
745 new_size
= SCALAR_SPLIT_SIZE
;
746 sibling
= array_clone_pointers(sibling_pointer
, (int_default_t
)new_size
, err
);
747 if (unlikely(!sibling
))
750 sibling
= get_writable(sibling_pointer
, err
);
751 if (unlikely(!sibling
))
753 da(sibling
,array_pointers
)->pointer
[da(sibling
,array_pointers
)->n_used_entries
] = ptr
;
754 *result_ptr
= &da(sibling
,array_pointers
)->pointer
[da(sibling
,array_pointers
)->n_used_entries
];
755 da(sibling
,array_pointers
)->n_used_entries
++;
756 index_add_int(&get_struct(sibling_pointer
, struct btree_level
, node
)->end_index
, 1);
761 sibling
= alloc_one_ptr(ptr
, err
);
762 if (unlikely(!sibling
))
764 *result_ptr
= &da(sibling
,array_pointers
)->pointer
[0];
765 split
= expand_parent(w
, 2, &prev_idx
, err
);
766 if (unlikely(!split
)) {
767 data_free_r1(sibling
);
770 index_copy(&split
[0].end_index
, prev_idx
);
771 index_add_int(&split
[0].end_index
, 1);
772 split
[0].node
= pointer_data(sibling
);
773 split
[1].end_index
= prev_idx
;
774 index_add(&split
[1].end_index
, existing_size
);
775 split
[1].node
= orig
;
779 static bool leaf_append_ptr(struct walk_context
*w
, array_index_t existing_size
, pointer_t ptr
, pointer_t
**result_ptr
, ajla_error_t
*err
)
781 struct btree_level
*split
;
782 array_index_t prev_idx
;
783 pointer_t
*sibling_pointer
;
784 struct data
*sibling
;
785 pointer_t orig
= *w
->ptr
;
787 if (unlikely(!w
->upper_level
) || unlikely(w
->upper_level_pos
== da(w
->upper_level
,array_btree
)->n_used_btree_entries
- 1))
790 sibling_pointer
= &da(w
->upper_level
,array_btree
)->btree
[w
->upper_level_pos
+ 1].node
;
791 sibling
= pointer_get_data(*sibling_pointer
);
792 if (da_tag(sibling
) == DATA_TAG_array_pointers
) {
793 if (da(sibling
,array_pointers
)->n_used_entries
== da(sibling
,array_pointers
)->n_allocated_entries
) {
794 uint_default_t new_size
;
795 if (unlikely(da(sibling
,array_pointers
)->n_allocated_entries
>= SCALAR_SPLIT_SIZE
))
797 new_size
= (uint_default_t
)da(sibling
,array_pointers
)->n_allocated_entries
* 2;
798 if (new_size
> SCALAR_SPLIT_SIZE
)
799 new_size
= SCALAR_SPLIT_SIZE
;
800 sibling
= array_clone_pointers(sibling_pointer
, (int_default_t
)new_size
, err
);
801 if (unlikely(!sibling
))
804 sibling
= get_writable(sibling_pointer
, err
);
805 if (unlikely(!sibling
))
807 if (da(sibling
,array_pointers
)->pointer
== da(sibling
,array_pointers
)->pointer_array
) {
808 memmove(da(sibling
,array_pointers
)->pointer
+ 1, da(sibling
,array_pointers
)->pointer
, da(sibling
,array_pointers
)->n_used_entries
* sizeof(pointer_t
));
810 da(sibling
,array_pointers
)->pointer
--;
811 da(sibling
,array_pointers
)->n_allocated_entries
++;
813 da(sibling
,array_pointers
)->pointer
[0] = ptr
;
814 *result_ptr
= &da(sibling
,array_pointers
)->pointer
[0];
815 da(sibling
,array_pointers
)->n_used_entries
++;
816 index_sub_int(&get_struct(sibling_pointer
, struct btree_level
, node
)[-1].end_index
, 1);
821 sibling
= alloc_one_ptr(ptr
, err
);
822 if (unlikely(!sibling
))
824 *result_ptr
= &da(sibling
,array_pointers
)->pointer
[0];
825 split
= expand_parent(w
, 2, &prev_idx
, err
);
826 if (unlikely(!split
)) {
827 data_free_r1(sibling
);
830 index_copy(&split
[0].end_index
, prev_idx
);
831 index_add(&split
[0].end_index
, existing_size
);
832 index_sub_int(&split
[0].end_index
, 1);
833 split
[0].node
= orig
;
834 split
[1].end_index
= prev_idx
;
835 index_add(&split
[1].end_index
, existing_size
);
836 split
[1].node
= pointer_data(sibling
);
840 static struct data
*try_join_ptr(struct data
*left
, struct data
*right
)
845 sum
= (int_default_t
)((uint_default_t
)da(left
,array_pointers
)->n_used_entries
+ (uint_default_t
)da(right
,array_pointers
)->n_used_entries
);
846 /*debug("join: %lu, %lu, max %lu, ref %lu", (uint_default_t)da(left,array_pointers)->n_used_entries, (uint_default_t)da(right,array_pointers)->n_used_entries, da(left,array_pointers)->n_allocated_entries, refcount_get_nonatomic(&left->refcount));*/
847 if (unlikely(sum
< 0))
850 if (!data_is_writable(left
)) {
851 if (sum
> BTREE_MAX_SIZE
)
855 if (da(left
,array_pointers
)->n_allocated_entries
< sum
) {
858 ptr
= pointer_data(left
);
859 left
= array_clone_pointers(&ptr
, array_align_alloc(sum
), MEM_DONT_TRY_TO_FREE
);
864 quick_copy
= data_is_writable(right
);
867 memcpy(da(left
,array_pointers
)->pointer
+ da(left
,array_pointers
)->n_used_entries
, da(right
,array_pointers
)->pointer
, da(right
,array_pointers
)->n_used_entries
* sizeof(pointer_t
));
869 int_default_t i
, offs
, total
;
870 offs
= da(left
,array_pointers
)->n_used_entries
;
871 total
= da(right
,array_pointers
)->n_used_entries
;
872 for (i
= 0; i
< total
; i
++)
873 da(left
,array_pointers
)->pointer
[offs
+ i
] = pointer_reference(&da(right
,array_pointers
)->pointer
[i
]);
876 da(left
,array_pointers
)->n_used_entries
+= da(right
,array_pointers
)->n_used_entries
;
881 data_dereference(right
);
886 static void try_join_siblings_ptr(struct walk_context
*w
, pointer_t
**result_ptr
, bool ptr_right
)
888 struct data
*ul
= w
->upper_level
;
889 btree_entries_t ul_pos
= w
->upper_level_pos
;
890 struct data
*left
, *right
;
891 int_default_t ptr_pos
;
893 left
= pointer_get_data(da(ul
,array_btree
)->btree
[ul_pos
].node
);
894 right
= pointer_get_data(da(ul
,array_btree
)->btree
[ul_pos
+ 1].node
);
896 if (da_tag(left
) != DATA_TAG_array_pointers
|| da_tag(right
) != DATA_TAG_array_pointers
)
900 ptr_pos
= (int_default_t
)(*result_ptr
- da(left
,array_pointers
)->pointer
);
901 ajla_assert((uint_default_t
)ptr_pos
< (uint_default_t
)da(left
,array_pointers
)->n_used_entries
, (file_line
, "try_join_siblings_ptr: ptr is outside left node"));
903 ptr_pos
= (int_default_t
)(*result_ptr
- da(right
,array_pointers
)->pointer
);
904 ajla_assert((uint_default_t
)ptr_pos
< (uint_default_t
)da(right
,array_pointers
)->n_used_entries
, (file_line
, "try_join_siblings_ptr: ptr is outside right node"));
905 ptr_pos
+= da(left
,array_pointers
)->n_used_entries
;
908 left
= try_join_ptr(left
, right
);
912 *result_ptr
= &da(left
,array_pointers
)->pointer
[ptr_pos
];
914 da(ul
,array_btree
)->btree
[ul_pos
].node
= pointer_data(left
);
915 index_free(&da(ul
,array_btree
)->btree
[ul_pos
].end_index
);
916 da(ul
,array_btree
)->btree
[ul_pos
].end_index
= da(ul
,array_btree
)->btree
[ul_pos
+ 1].end_index
;
917 memmove(da(ul
,array_btree
)->btree
+ ul_pos
+ 1, da(ul
,array_btree
)->btree
+ ul_pos
+ 2, (da(ul
,array_btree
)->n_used_btree_entries
- ul_pos
- 2) * sizeof(struct btree_level
));
918 da(ul
,array_btree
)->n_used_btree_entries
--;
919 if (unlikely(da(ul
,array_btree
)->n_used_btree_entries
== 1)) {
920 free_btree_indices(ul
);
922 *w
->root
= pointer_data(left
);
926 static void try_join_both_siblings_ptr(struct walk_context
*w
, pointer_t
**result_ptr
)
928 if (w
->upper_level
) {
929 if (w
->upper_level_pos
< da(w
->upper_level
,array_btree
)->n_used_btree_entries
- 1) {
930 try_join_siblings_ptr(w
, result_ptr
, false);
932 if (w
->upper_level_pos
) {
933 w
->upper_level_pos
--;
934 try_join_siblings_ptr(w
, result_ptr
, true);
939 static bool flat_to_ptr(struct walk_context
*w
, pointer_t
**result_ptr
, ajla_error_t
*err
)
941 struct data
*array
, *right
, *single
;
944 array
= pointer_get_data(*w
->ptr
);
945 ptr
= flat_to_data(da(array
,array_flat
)->type
, da_array_flat(array
) + index_to_int(w
->idx
) * da_array_flat_element_size(array
));
947 if (unlikely(da(array
,array_flat
)->n_used_entries
== 1)) {
948 single
= alloc_one_ptr(ptr
, err
);
949 if (unlikely(!single
))
951 *result_ptr
= &da(single
,array_pointers
)->pointer
[0];
952 data_dereference(array
);
953 *w
->ptr
= pointer_data(single
);
954 try_join_both_siblings_ptr(w
, result_ptr
);
956 } else if (!index_to_int(w
->idx
)) {
957 array_index_t existing_size
;
959 index_from_int(&existing_size
, da(array
,array_flat
)->n_used_entries
);
960 ret
= leaf_prepend_ptr(w
, existing_size
, ptr
, result_ptr
, err
);
961 index_free(&existing_size
);
964 memmove(da_array_flat(array
), da_array_flat(array
) + da_array_flat_element_size(array
), (da(array
,array_flat
)->n_used_entries
- 1) * da_array_flat_element_size(array
));
965 da(array
,array_flat
)->n_used_entries
--;
967 } else if (index_to_int(w
->idx
) == da(array
,array_flat
)->n_used_entries
- 1) {
968 array_index_t existing_size
;
970 index_from_int(&existing_size
, da(array
,array_flat
)->n_used_entries
);
971 ret
= leaf_append_ptr(w
, existing_size
, ptr
, result_ptr
, err
);
972 index_free(&existing_size
);
975 da(array
,array_flat
)->n_used_entries
--;
978 struct btree_level
*split
;
979 array_index_t prev_idx
;
980 int_default_t n_allocated
= da(array
,array_flat
)->n_used_entries
- 1 - index_to_int(w
->idx
);
982 right
= data_alloc_array_flat_mayfail(da(array
,array_flat
)->type
, n_allocated
, da(array
,array_flat
)->n_used_entries
- 1 - index_to_int(w
->idx
), false, err pass_file_line
);
983 if (unlikely(!right
))
985 memcpy(da_array_flat(right
), da_array_flat(array
) + (index_to_int(w
->idx
) + 1) * da_array_flat_element_size(array
), da(right
,array_flat
)->n_used_entries
* da_array_flat_element_size(array
));
987 single
= alloc_one_ptr(ptr
, err
);
988 if (unlikely(!single
))
989 goto free_right_ret_err
;
990 *result_ptr
= &da(single
,array_pointers
)->pointer
[0];
992 split
= expand_parent(w
, 3, &prev_idx
, err
);
993 if (unlikely(!split
))
994 goto free_single_right_ret_err
;
996 split
[0].node
= pointer_data(array
);
997 index_add3(&split
[0].end_index
, prev_idx
, w
->idx
);
998 split
[1].node
= pointer_data(single
);
999 index_add3(&split
[1].end_index
, prev_idx
, w
->idx
);
1000 index_add_int(&split
[1].end_index
, 1);
1001 split
[2].node
= pointer_data(right
);
1002 split
[2].end_index
= prev_idx
;
1003 index_add_int(&split
[2].end_index
, da(array
,array_flat
)->n_used_entries
);
1005 da(array
,array_flat
)->n_used_entries
= index_to_int(w
->idx
);
1010 free_single_right_ret_err
:
1011 data_free_r1(single
);
1013 data_free_r1(right
);
1015 pointer_dereference(ptr
);
1019 static bool leaf_prepend_flat(struct walk_context
*w
, const struct type
*type
, array_index_t existing_size
, unsigned char **result_flat
, ajla_error_t
*err
)
1021 struct btree_level
*split
;
1022 array_index_t prev_idx
;
1023 pointer_t
*sibling_pointer
;
1024 struct data
*sibling
;
1025 pointer_t orig
= *w
->ptr
;
1027 if (unlikely(!w
->upper_level
) || unlikely(!w
->upper_level_pos
))
1030 sibling_pointer
= &da(w
->upper_level
,array_btree
)->btree
[w
->upper_level_pos
- 1].node
;
1031 sibling
= pointer_get_data(*sibling_pointer
);
1032 if (da_tag(sibling
) == DATA_TAG_array_flat
) {
1033 if (da(sibling
,array_flat
)->n_used_entries
== da(sibling
,array_flat
)->n_allocated_entries
) {
1034 uint_default_t new_size
;
1035 if (unlikely(da(sibling
,array_flat
)->n_allocated_entries
>= SCALAR_SPLIT_SIZE
))
1037 new_size
= (uint_default_t
)da(sibling
,array_flat
)->n_allocated_entries
* 2;
1038 if (new_size
> SCALAR_SPLIT_SIZE
)
1039 new_size
= SCALAR_SPLIT_SIZE
;
1040 sibling
= array_clone_flat(sibling_pointer
, (int_default_t
)new_size
, err
);
1041 if (unlikely(!sibling
))
1044 sibling
= get_writable(sibling_pointer
, err
);
1045 if (unlikely(!sibling
))
1047 *result_flat
= da_array_flat(sibling
) + (size_t)type
->size
* da(sibling
,array_flat
)->n_used_entries
;
1048 da(sibling
,array_flat
)->n_used_entries
++;
1049 index_add_int(&get_struct(sibling_pointer
, struct btree_level
, node
)->end_index
, 1);
1054 sibling
= data_alloc_array_flat_mayfail(type
, 1, 1, false, err pass_file_line
);
1055 if (unlikely(!sibling
))
1057 *result_flat
= da_array_flat(sibling
);
1058 split
= expand_parent(w
, 2, &prev_idx
, err
);
1059 if (unlikely(!split
)) {
1060 data_free_r1(sibling
);
1063 index_copy(&split
[0].end_index
, prev_idx
);
1064 index_add_int(&split
[0].end_index
, 1);
1065 split
[0].node
= pointer_data(sibling
);
1066 split
[1].end_index
= prev_idx
;
1067 index_add(&split
[1].end_index
, existing_size
);
1068 split
[1].node
= orig
;
1072 static bool leaf_append_flat(struct walk_context
*w
, const struct type
*type
, array_index_t existing_size
, unsigned char **result_flat
, ajla_error_t
*err
)
1074 struct btree_level
*split
;
1075 array_index_t prev_idx
;
1076 pointer_t
*sibling_pointer
;
1077 struct data
*sibling
;
1078 pointer_t orig
= *w
->ptr
;
1080 if (unlikely(!w
->upper_level
) || unlikely(w
->upper_level_pos
== da(w
->upper_level
,array_btree
)->n_used_btree_entries
- 1))
1083 sibling_pointer
= &da(w
->upper_level
,array_btree
)->btree
[w
->upper_level_pos
+ 1].node
;
1084 sibling
= pointer_get_data(*sibling_pointer
);
1085 if (da_tag(sibling
) == DATA_TAG_array_flat
) {
1086 if (da(sibling
,array_flat
)->n_used_entries
== da(sibling
,array_flat
)->n_allocated_entries
) {
1087 uint_default_t new_size
;
1088 if (unlikely(da(sibling
,array_flat
)->n_allocated_entries
>= SCALAR_SPLIT_SIZE
))
1090 new_size
= (uint_default_t
)da(sibling
,array_flat
)->n_allocated_entries
* 2;
1091 if (new_size
> SCALAR_SPLIT_SIZE
)
1092 new_size
= SCALAR_SPLIT_SIZE
;
1093 sibling
= array_clone_flat(sibling_pointer
, (int_default_t
)new_size
, err
);
1094 if (unlikely(!sibling
))
1097 sibling
= get_writable(sibling_pointer
, err
);
1098 if (unlikely(!sibling
))
1100 memmove(da_array_flat(sibling
) + type
->size
, da_array_flat(sibling
), (size_t)type
->size
* da(sibling
,array_flat
)->n_used_entries
);
1101 *result_flat
= da_array_flat(sibling
);
1102 da(sibling
,array_flat
)->n_used_entries
++;
1103 index_sub_int(&get_struct(sibling_pointer
, struct btree_level
, node
)[-1].end_index
, 1);
1108 sibling
= data_alloc_array_flat_mayfail(type
, 1, 1, false, err pass_file_line
);
1109 if (unlikely(!sibling
))
1111 *result_flat
= da_array_flat(sibling
);
1112 split
= expand_parent(w
, 2, &prev_idx
, err
);
1113 if (unlikely(!split
)) {
1114 data_free_r1(sibling
);
1117 index_copy(&split
[0].end_index
, prev_idx
);
1118 index_add(&split
[0].end_index
, existing_size
);
1119 index_sub_int(&split
[0].end_index
, 1);
1120 split
[0].node
= orig
;
1121 split
[1].end_index
= prev_idx
;
1122 index_add(&split
[1].end_index
, existing_size
);
1123 split
[1].node
= pointer_data(sibling
);
1127 static struct data
*try_join_flat(struct data
*left
, struct data
*right
)
1130 size_t element_size
;
1132 sum
= (int_default_t
)((uint_default_t
)da(left
,array_flat
)->n_used_entries
+ (uint_default_t
)da(right
,array_flat
)->n_used_entries
);
1133 if (unlikely(sum
< 0))
1136 if (!data_is_writable(left
) || da(left
,array_flat
)->n_allocated_entries
< sum
) {
1137 pointer_t ptr
= pointer_data(left
);
1138 left
= array_clone_flat(&ptr
, array_align_alloc(sum
), MEM_DONT_TRY_TO_FREE
);
1139 if (unlikely(!left
))
1143 element_size
= da_array_flat_element_size(left
);
1144 memcpy(da_array_flat(left
) + da(left
,array_flat
)->n_used_entries
* element_size
, da_array_flat(right
), da(right
,array_flat
)->n_used_entries
* element_size
);
1145 da(left
,array_flat
)->n_used_entries
+= da(right
,array_flat
)->n_used_entries
;
1147 data_dereference(right
);
1152 static void try_join_siblings_flat(struct walk_context
*w
, unsigned char **result_flat
, bool ptr_right
)
1154 struct data
*ul
= w
->upper_level
;
1155 btree_entries_t ul_pos
= w
->upper_level_pos
;
1156 struct data
*left
, *right
;
1158 size_t element_size
;
1160 left
= pointer_get_data(da(ul
,array_btree
)->btree
[ul_pos
].node
);
1161 right
= pointer_get_data(da(ul
,array_btree
)->btree
[ul_pos
+ 1].node
);
1163 if (da_tag(left
) != DATA_TAG_array_flat
|| da_tag(right
) != DATA_TAG_array_flat
)
1166 element_size
= da_array_flat_element_size(left
);
1168 ptr_pos
= *result_flat
- da_array_flat(left
);
1169 ajla_assert((uint_default_t
)ptr_pos
< (uint_default_t
)da(left
,array_flat
)->n_used_entries
* element_size
, (file_line
, "try_join_siblings_flat: ptr is outside left node"));
1171 ptr_pos
= *result_flat
- da_array_flat(right
);
1172 ajla_assert((uint_default_t
)ptr_pos
< (uint_default_t
)da(right
,array_flat
)->n_used_entries
* element_size
, (file_line
, "try_join_siblings_flat: ptr is outside right node"));
1173 ptr_pos
+= da(left
,array_flat
)->n_used_entries
* element_size
;
1176 left
= try_join_flat(left
, right
);
1177 if (unlikely(!left
))
1180 *result_flat
= da_array_flat(left
) + ptr_pos
;
1182 da(ul
,array_btree
)->btree
[ul_pos
].node
= pointer_data(left
);
1183 index_free(&da(ul
,array_btree
)->btree
[ul_pos
].end_index
);
1184 da(ul
,array_btree
)->btree
[ul_pos
].end_index
= da(ul
,array_btree
)->btree
[ul_pos
+ 1].end_index
;
1185 memmove(da(ul
,array_btree
)->btree
+ ul_pos
+ 1, da(ul
,array_btree
)->btree
+ ul_pos
+ 2, (da(ul
,array_btree
)->n_used_btree_entries
- ul_pos
- 2) * sizeof(struct btree_level
));
1186 da(ul
,array_btree
)->n_used_btree_entries
--;
1187 if (unlikely(da(ul
,array_btree
)->n_used_btree_entries
== 1)) {
1188 free_btree_indices(ul
);
1190 *w
->root
= pointer_data(left
);
1194 static void try_join_both_siblings_flat(struct walk_context
*w
, unsigned char **result_flat
)
1196 if (w
->upper_level
) {
1197 if (w
->upper_level_pos
< da(w
->upper_level
,array_btree
)->n_used_btree_entries
- 1) {
1198 try_join_siblings_flat(w
, result_flat
, false);
1200 if (w
->upper_level_pos
) {
1201 w
->upper_level_pos
--;
1202 try_join_siblings_flat(w
, result_flat
, true);
1207 static bool ptr_to_flat(struct walk_context
*w
, const struct type
*type
, unsigned char **result_flat
, ajla_error_t
*err
)
1209 struct data
*array
, *right
, *single
;
1211 array
= pointer_get_data(*w
->ptr
);
1213 if (unlikely(da(array
,array_pointers
)->n_used_entries
== 1)) {
1214 struct data
*new_flat
;
1215 new_flat
= data_alloc_array_flat_mayfail(type
, 1, 1, false, err pass_file_line
);
1216 if (unlikely(!new_flat
))
1218 *result_flat
= da_array_flat(new_flat
);
1219 data_dereference(array
);
1220 *w
->ptr
= pointer_data(new_flat
);
1221 try_join_both_siblings_flat(w
, result_flat
);
1223 } else if (!index_to_int(w
->idx
)) {
1224 array_index_t existing_size
;
1226 index_from_int(&existing_size
, da(array
,array_pointers
)->n_used_entries
);
1227 ret
= leaf_prepend_flat(w
, type
, existing_size
, result_flat
, err
);
1228 index_free(&existing_size
);
1231 pointer_dereference(da(array
,array_pointers
)->pointer
[0]);
1232 da(array
,array_pointers
)->n_used_entries
--;
1233 da(array
,array_pointers
)->n_allocated_entries
--;
1234 da(array
,array_pointers
)->pointer
++;
1236 } else if (index_to_int(w
->idx
) == da(array
,array_pointers
)->n_used_entries
- 1) {
1237 array_index_t existing_size
;
1239 index_from_int(&existing_size
, da(array
,array_pointers
)->n_used_entries
);
1240 ret
= leaf_append_flat(w
, type
, existing_size
, result_flat
, err
);
1241 index_free(&existing_size
);
1244 pointer_dereference(da(array
,array_pointers
)->pointer
[da(array
,array_pointers
)->n_used_entries
- 1]);
1245 da(array
,array_pointers
)->n_used_entries
--;
1248 struct btree_level
*split
;
1249 array_index_t prev_idx
;
1250 int_default_t n_allocated
= da(array
,array_pointers
)->n_used_entries
- 1 - index_to_int(w
->idx
);
1252 right
= data_alloc_array_pointers_mayfail(n_allocated
, da(array
,array_pointers
)->n_used_entries
- 1 - index_to_int(w
->idx
), err pass_file_line
);
1253 if (unlikely(!right
))
1255 memcpy(da(right
,array_pointers
)->pointer
, da(array
,array_pointers
)->pointer
+ index_to_int(w
->idx
) + 1, da(right
,array_pointers
)->n_used_entries
* sizeof(pointer_t
));
1257 single
= data_alloc_array_flat_mayfail(type
, 1, 1, false, err pass_file_line
);
1258 if (unlikely(!single
))
1259 goto free_right_ret_err
;
1261 *result_flat
= da_array_flat(single
);
1263 split
= expand_parent(w
, 3, &prev_idx
, err
);
1264 if (unlikely(!split
))
1265 goto free_single_right_ret_err
;
1267 split
[0].node
= pointer_data(array
);
1268 index_add3(&split
[0].end_index
, prev_idx
, w
->idx
);
1269 split
[1].node
= pointer_data(single
);
1270 index_copy(&split
[1].end_index
, split
[0].end_index
);
1271 index_add_int(&split
[1].end_index
, 1);
1272 split
[2].node
= pointer_data(right
);
1273 split
[2].end_index
= prev_idx
;
1274 index_add_int(&split
[2].end_index
, da(array
,array_pointers
)->n_used_entries
);
1276 da(array
,array_pointers
)->n_used_entries
= index_to_int(w
->idx
);
1277 pointer_dereference(da(array
,array_pointers
)->pointer
[index_to_int(w
->idx
)]);
1282 free_single_right_ret_err
:
1283 data_free_r1(single
);
1285 data_free_r1(right
);
1290 static bool same_to_ptr(struct walk_context
*w
, pointer_t
**result_ptr
, ajla_error_t
*err
)
1292 struct data
*array
, *right
, *single
;
1294 array_index_t n_entries_1
;
1296 array
= pointer_get_data(*w
->ptr
);
1297 ptr
= da(array
,array_same
)->pointer
;
1299 if (unlikely(!index_ge_int(da(array
,array_same
)->n_entries
, 2))) {
1300 single
= alloc_one_ptr(ptr
, err
);
1301 if (unlikely(!single
))
1303 *result_ptr
= &da(single
,array_pointers
)->pointer
[0];
1304 index_free(&da(array
,array_same
)->n_entries
);
1305 data_free_r1(array
);
1306 *w
->ptr
= pointer_data(single
);
1307 try_join_both_siblings_ptr(w
, result_ptr
);
1309 } else if (!index_ge_int(w
->idx
, 1)) {
1311 ret
= leaf_prepend_ptr(w
, da(array
,array_same
)->n_entries
, ptr
, result_ptr
, err
);
1314 index_sub_int(&da(array
,array_same
)->n_entries
, 1);
1315 pointer_reference_owned(ptr
);
1317 } else if (index_copy(&n_entries_1
, da(array
,array_same
)->n_entries
), index_sub_int(&n_entries_1
, 1), index_ge_index(w
->idx
, n_entries_1
)) {
1319 ret
= leaf_append_ptr(w
, da(array
,array_same
)->n_entries
, ptr
, result_ptr
, err
);
1320 if (unlikely(!ret
)) {
1321 index_free(&n_entries_1
);
1324 index_free(&da(array
,array_same
)->n_entries
);
1325 da(array
,array_same
)->n_entries
= n_entries_1
;
1326 pointer_reference_owned(ptr
);
1329 struct btree_level
*split
;
1330 array_index_t prev_idx
;
1332 index_sub(&n_entries_1
, w
->idx
);
1333 right
= data_alloc_array_same_mayfail(n_entries_1
, err pass_file_line
);
1334 if (unlikely(!right
))
1336 da(right
,array_same
)->pointer
= ptr
;
1338 single
= alloc_one_ptr(ptr
, err
);
1339 if (unlikely(!single
))
1340 goto free_right_ret_err
;
1341 *result_ptr
= &da(single
,array_pointers
)->pointer
[0];
1343 split
= expand_parent(w
, 3, &prev_idx
, err
);
1344 if (unlikely(!split
))
1345 goto free_single_right_ret_err
;
1347 split
[0].node
= pointer_data(array
);
1348 index_add3(&split
[0].end_index
, prev_idx
, w
->idx
);
1349 split
[1].node
= pointer_data(single
);
1350 index_copy(&split
[1].end_index
, split
[0].end_index
);
1351 index_add_int(&split
[1].end_index
, 1);
1352 split
[2].node
= pointer_data(right
);
1353 split
[2].end_index
= prev_idx
;
1354 index_add(&split
[2].end_index
, da(array
,array_same
)->n_entries
);
1356 index_free(&da(array
,array_same
)->n_entries
);
1357 index_copy(&da(array
,array_same
)->n_entries
, w
->idx
);
1359 pointer_reference_owned(ptr
);
1360 pointer_reference_owned(ptr
);
1365 free_single_right_ret_err
:
1366 data_free_r1(single
);
1368 data_free_r1(right
);
1373 static bool same_to_flat(struct walk_context
*w
, const struct type
*type
, unsigned char **result_flat
, ajla_error_t
*err
)
1375 struct data
*array
, *right
, *single
;
1377 array_index_t n_entries_1
;
1379 array
= pointer_get_data(*w
->ptr
);
1380 ptr
= da(array
,array_same
)->pointer
;
1382 if (unlikely(!index_ge_int(da(array
,array_same
)->n_entries
, 2))) {
1383 struct data
*new_flat
;
1384 new_flat
= data_alloc_array_flat_mayfail(type
, 1, 1, false, err pass_file_line
);
1385 if (unlikely(!new_flat
))
1387 *result_flat
= da_array_flat(new_flat
);
1388 data_dereference(array
);
1389 *w
->ptr
= pointer_data(new_flat
);
1390 try_join_both_siblings_flat(w
, result_flat
);
1392 } else if (!index_ge_int(w
->idx
, 1)) {
1394 ret
= leaf_prepend_flat(w
, type
, da(array
,array_same
)->n_entries
, result_flat
, err
);
1397 index_sub_int(&da(array
,array_same
)->n_entries
, 1);
1399 } else if (index_copy(&n_entries_1
, da(array
,array_same
)->n_entries
), index_sub_int(&n_entries_1
, 1), index_ge_index(w
->idx
, n_entries_1
)) {
1401 ret
= leaf_append_flat(w
, type
, da(array
,array_same
)->n_entries
, result_flat
, err
);
1402 if (unlikely(!ret
)) {
1403 index_free(&n_entries_1
);
1406 index_free(&da(array
,array_same
)->n_entries
);
1407 da(array
,array_same
)->n_entries
= n_entries_1
;
1410 struct btree_level
*split
;
1411 array_index_t prev_idx
;
1413 index_sub(&n_entries_1
, w
->idx
);
1414 right
= data_alloc_array_same_mayfail(n_entries_1
, err pass_file_line
);
1415 if (unlikely(!right
))
1417 da(right
,array_same
)->pointer
= ptr
;
1419 single
= data_alloc_array_flat_mayfail(type
, 1, 1, false, err pass_file_line
);
1420 if (unlikely(!single
))
1421 goto free_right_ret_err
;
1423 *result_flat
= da_array_flat(single
);
1425 split
= expand_parent(w
, 3, &prev_idx
, err
);
1426 if (unlikely(!split
))
1427 goto free_single_right_ret_err
;
1429 split
[0].node
= pointer_data(array
);
1430 index_add3(&split
[0].end_index
, prev_idx
, w
->idx
);
1431 split
[1].node
= pointer_data(single
);
1432 index_copy(&split
[1].end_index
, split
[0].end_index
);
1433 index_add_int(&split
[1].end_index
, 1);
1434 split
[2].node
= pointer_data(right
);
1435 split
[2].end_index
= prev_idx
;
1436 index_add(&split
[2].end_index
, da(array
,array_same
)->n_entries
);
1438 index_free(&da(array
,array_same
)->n_entries
);
1439 index_copy(&da(array
,array_same
)->n_entries
, w
->idx
);
1441 pointer_reference_owned(ptr
);
1446 free_single_right_ret_err
:
1447 data_free_r1(single
);
1449 data_free_r1(right
);
1454 bool attr_fastcall
array_modify(pointer_t
*root
, array_index_t idx
, unsigned flags
, pointer_t
**result_ptr
, unsigned char **result_flat
, const struct type
**flat_type
, frame_s
*fp
, const code_t
*ip
)
1456 struct walk_context w
;
1461 *result_flat
= NULL
;
1463 walk_init_context(&w
, root
, idx
);
1466 array
= get_writable(w
.ptr
, &err
);
1467 if (unlikely(!array
))
1469 switch (da_tag(array
)) {
1470 case DATA_TAG_array_slice
: {
1471 array
= array_clone(w
.ptr
, &err
);
1472 if (unlikely(!array
))
1476 case DATA_TAG_array_flat
: {
1477 if (unlikely(index_ge_int(w
.idx
, da(array
,array_flat
)->n_used_entries
)))
1478 goto ret_err_out_of_range
;
1480 if (unlikely(flags
& ARRAY_MODIFY_NEED_PTR
)) {
1481 if (unlikely(!flat_to_ptr(&w
, result_ptr
, &err
)))
1486 *flat_type
= da(array
,array_flat
)->type
;
1487 *result_flat
= da_array_flat(array
) + index_to_int(w
.idx
) * da_array_flat_element_size(array
);
1490 case DATA_TAG_array_pointers
: {
1491 if (unlikely(index_ge_int(w
.idx
, da(array
,array_pointers
)->n_used_entries
)))
1492 goto ret_err_out_of_range
;
1494 if (unlikely(flags
& ARRAY_MODIFY_NEED_FLAT
)) {
1495 if (unlikely(!ptr_to_flat(&w
, *flat_type
, result_flat
, &err
)))
1500 *result_ptr
= &da(array
,array_pointers
)->pointer
[index_to_int(w
.idx
)];
1503 case DATA_TAG_array_same
: {
1504 if (unlikely(index_ge_index(w
.idx
, da(array
,array_same
)->n_entries
)))
1505 goto ret_err_out_of_range
;
1506 if (!(flags
& ARRAY_MODIFY_NEED_FLAT
)) {
1507 if (unlikely(!same_to_ptr(&w
, result_ptr
, &err
)))
1511 if (unlikely(!same_to_flat(&w
, *flat_type
, result_flat
, &err
)))
1516 case DATA_TAG_array_btree
: {
1517 if (unlikely(!walk_for_write(&w
, &err
)))
1522 internal(file_line
, "array_modify: invalid array tag %u", da_tag(array
));
1525 walk_free_context(&w
);
1529 ret_err_out_of_range
:
1530 err
= error_ajla(EC_SYNC
, AJLA_ERROR_INDEX_OUT_OF_RANGE
);
1532 walk_free_context(&w
);
1533 pointer_dereference(*root
);
1534 *root
= pointer_error(err
, fp
, ip pass_file_line
);
1539 array_index_t attr_fastcall
array_len(struct data
*array
)
1541 array_index_t result
, tmp
;
1542 switch (da_tag(array
)) {
1543 case DATA_TAG_array_flat
:
1544 index_from_int(&result
, da(array
,array_flat
)->n_used_entries
);
1546 case DATA_TAG_array_slice
:
1547 index_from_int(&result
, da(array
,array_slice
)->n_entries
);
1549 case DATA_TAG_array_pointers
:
1550 index_from_int(&result
, da(array
,array_pointers
)->n_used_entries
);
1552 case DATA_TAG_array_same
:
1553 index_copy(&result
, da(array
,array_same
)->n_entries
);
1555 case DATA_TAG_array_btree
:
1556 index_copy(&result
, da(array
,array_btree
)->btree
[da(array
,array_btree
)->n_used_btree_entries
- 1].end_index
);
1558 case DATA_TAG_array_incomplete
:
1559 index_from_int(&result
, 0);
1560 follow_incomplete_chain
:
1561 tmp
= array_len(pointer_get_data(da(array
,array_incomplete
)->first
));
1562 ajla_assert_lo(index_ge_int(tmp
, 1), (file_line
, "array_len: the first part is empty"));
1563 index_add(&result
, tmp
);
1566 ajla_assert_lo(!pointer_is_thunk(da(array
,array_incomplete
)->next
), (file_line
, "array_len: incomplete thunk is not evaluated"));
1567 array
= pointer_get_data(da(array
,array_incomplete
)->next
);
1568 if (da_tag(array
) != DATA_TAG_array_incomplete
) {
1569 tmp
= array_len(array
);
1570 index_add(&result
, tmp
);
1573 goto follow_incomplete_chain
;
1577 internal(file_line
, "array_len: invalid array tag %u", da_tag(array
));
1582 bool attr_fastcall
array_is_empty(struct data
*array
)
1584 switch (da_tag(array
)) {
1585 case DATA_TAG_array_flat
:
1586 return !da(array
,array_flat
)->n_used_entries
;
1587 case DATA_TAG_array_pointers
:
1588 return !da(array
,array_pointers
)->n_used_entries
;
1589 case DATA_TAG_array_slice
:
1590 return !da(array
,array_slice
)->n_entries
;
1595 static bool array_greater(struct data
*array1
, struct data
*array2
)
1598 * Note: the code will try to run rebalance_btree_nodes on the shallower
1599 * array. If both array have equal depth, we must debalance the one
1600 * with less entries.
1602 if (da_array_depth(array1
) > da_array_depth(array2
))
1604 if (da_array_depth(array1
) < da_array_depth(array2
))
1606 if (da_tag(array1
) == DATA_TAG_array_btree
) {
1607 if (da(array1
,array_btree
)->n_used_btree_entries
> da(array2
,array_btree
)->n_used_btree_entries
)
1609 if (da(array1
,array_btree
)->n_used_btree_entries
< da(array2
,array_btree
)->n_used_btree_entries
)
1615 struct data
* attr_fastcall
array_join(struct data
*array1
, struct data
*array2
, ajla_error_t
*err
)
1617 struct walk_context w
;
1618 array_index_t len1
, len2
, total_len
, new_len
, len_array
;
1619 struct data
*array
, *new_array
;
1622 struct btree_level
*split
;
1623 array_index_t prev_idx
;
1624 struct data
*fix_idx
;
1628 tag_t tag1
= da_tag(array1
);
1629 tag_t tag2
= da_tag(array2
);
1630 if (tag1
== DATA_TAG_array_flat
&& tag2
== DATA_TAG_array_flat
) {
1631 new_array
= try_join_flat(array1
, array2
);
1632 if (likely(new_array
!= NULL
)) {
1636 if (tag1
== DATA_TAG_array_pointers
&& tag2
== DATA_TAG_array_pointers
) {
1637 new_array
= try_join_ptr(array1
, array2
);
1638 if (likely(new_array
!= NULL
)) {
1644 len1
= array_len(array1
);
1645 if (unlikely(!index_ge_int(len1
, 1))) {
1647 data_dereference(array1
);
1650 len2
= array_len(array2
);
1651 if (unlikely(!index_ge_int(len2
, 1))) {
1654 data_dereference(array2
);
1658 if (unlikely(!index_add3_(&total_len
, len1
, len2
, err pass_file_line
))) {
1661 data_dereference(array1
);
1662 data_dereference(array2
);
1665 index_free(&total_len
);
1667 if (array_greater(array1
, array2
)) {
1668 array_index_t search_for
;
1669 result
= pointer_data(array1
);
1673 index_sub_int(&search_for
, 1);
1674 walk_init_context(&w
, &result
, search_for
);
1677 array_index_t search_for
;
1678 result
= pointer_data(array2
);
1682 index_from_int(&search_for
, 0);
1683 walk_init_context(&w
, &result
, search_for
);
1688 array
= pointer_get_data(*w
.ptr
);
1690 if (da_array_depth(array
) > da_array_depth(new_array
)) {
1691 array
= get_writable(w
.ptr
, err
);
1692 if (unlikely(!array
))
1694 if (unlikely(!walk_for_write(&w
, err
)))
1699 split
= expand_parent(&w
, 2, &prev_idx
, err
);
1700 if (unlikely(!split
))
1703 len_array
= array_len(array
);
1705 split
[0].node
= pointer_data(new_array
);
1706 split
[0].end_index
= prev_idx
; /* 0 */
1707 split
[1].node
= pointer_data(array
);
1708 split
[1].end_index
= len_array
;
1710 split
[0].node
= pointer_data(array
);
1711 split
[0].end_index
= prev_idx
;
1712 index_add(&split
[0].end_index
, len_array
);
1713 split
[1].node
= pointer_data(new_array
);
1714 index_copy(&split
[1].end_index
, split
[0].end_index
);
1715 index_free(&len_array
);
1718 fix_idx
= pointer_get_data(result
);
1720 btree_entries_t i
, s
;
1721 s
= !append
? 0 : da(fix_idx
,array_btree
)->n_used_btree_entries
- 1;
1722 for (i
= s
; i
< da(fix_idx
,array_btree
)->n_used_btree_entries
; i
++)
1723 index_add(&da(fix_idx
,array_btree
)->btree
[i
].end_index
, new_len
);
1724 if (fix_idx
== w
.upper_level
) {
1725 w
.upper_level_pos
= s
;
1726 w
.ptr
= &da(fix_idx
,array_btree
)->btree
[s
].node
;
1728 index_from_int(&w
.idx
, 0);
1731 fix_idx
= pointer_get_data(da(fix_idx
,array_btree
)->btree
[s
].node
);
1734 new_tag
= da_tag(new_array
);
1735 if (new_tag
== DATA_TAG_array_btree
) {
1737 /* test it in get_writable instead of here */
1739 ac
= get_writable(w
.ptr
, err
);
1741 goto ret_err_balance
;
1743 if (unlikely(!walk_for_write(&w
, err
))) {
1744 goto ret_err_balance
;
1746 } else if (new_tag
== DATA_TAG_array_flat
) {
1747 unsigned char *ptr
= da_array_flat(new_array
);
1749 w
.upper_level_pos
--;
1750 try_join_siblings_flat(&w
, &ptr
, append
);
1751 } else if (new_tag
== DATA_TAG_array_pointers
) {
1752 pointer_t
*ptr
= &da(new_array
,array_pointers
)->pointer
[0];
1754 w
.upper_level_pos
--;
1755 try_join_siblings_ptr(&w
, &ptr
, append
);
1758 index_free(&new_len
);
1759 walk_free_context(&w
);
1761 return pointer_get_data(result
);
1764 data_dereference(new_array
);
1766 index_free(&new_len
);
1767 walk_free_context(&w
);
1768 pointer_dereference(result
);
1773 struct data
* attr_fastcall
array_sub(struct data
*array
, array_index_t start
, array_index_t len
, bool deref
, ajla_error_t
*err
)
1775 struct data
*result
;
1776 bool can_modify
= deref
;
1779 if (!data_is_writable(array
))
1782 switch (da_tag(array
)) {
1783 case DATA_TAG_array_flat
: {
1784 if (unlikely(index_eq_int(len
, da(array
,array_flat
)->n_used_entries
)))
1786 if (unlikely(index_eq_int(len
, 0))) {
1787 result
= data_alloc_array_flat_mayfail(da(array
,array_flat
)->type
, 0, 0, false, err pass_file_line
);
1788 goto ret_free_start_len
;
1791 result
= data_alloc_array_slice_mayfail(array
, da_array_flat(array
), index_to_int(start
), index_to_int(len
), err pass_file_line
);
1792 goto ret_free_start_len
;
1794 result
= data_alloc_array_flat_mayfail(da(array
,array_flat
)->type
, index_to_int(len
), index_to_int(len
), false, err pass_file_line
);
1796 goto ret_free_start_len
;
1797 memcpy(da_array_flat(result
), da_array_flat(array
) + index_to_int(start
) * da(array
,array_flat
)->type
->size
, index_to_int(len
) * da(array
,array_flat
)->type
->size
);
1798 goto ret_free_start_len
;
1801 case DATA_TAG_array_slice
: {
1803 if (unlikely(index_eq_int(len
, da(array
,array_slice
)->n_entries
)))
1805 if (unlikely(index_eq_int(len
, 0))) {
1806 result
= data_alloc_array_flat_mayfail(da(array
,array_slice
)->type
, 0, 0, false, err pass_file_line
);
1807 goto ret_free_start_len
;
1809 base
= pointer_get_data(da(array
,array_slice
)->reference
);
1810 result
= data_alloc_array_slice_mayfail(base
, da(array
,array_slice
)->flat_data_minus_data_array_offset
+ data_array_offset
, index_to_int(start
), index_to_int(len
), err pass_file_line
);
1811 goto ret_free_start_len
;
1813 case DATA_TAG_array_pointers
: {
1814 int_default_t st
, l
, i
;
1815 if (unlikely(index_eq_int(len
, da(array
,array_pointers
)->n_used_entries
)))
1817 if (unlikely(index_eq_int(len
, 0))) {
1818 result
= data_alloc_array_pointers_mayfail(0, 0, err pass_file_line
);
1819 goto ret_free_start_len
;
1821 st
= index_to_int(start
);
1822 l
= index_to_int(len
);
1824 int_default_t total
= da(array
,array_pointers
)->pointer
+ da(array
,array_pointers
)->n_allocated_entries
- da(array
,array_pointers
)->pointer_array
;
1825 if (total
/ 2 > da(array
,array_pointers
)->n_used_entries
)
1826 goto pointers_do_copy
;
1827 for (i
= 0; i
< st
; i
++)
1828 pointer_dereference(da(array
,array_pointers
)->pointer
[i
]);
1829 for (i
= st
+ l
; i
< da(array
,array_pointers
)->n_used_entries
; i
++)
1830 pointer_dereference(da(array
,array_pointers
)->pointer
[i
]);
1831 da(array
,array_pointers
)->pointer
+= st
;
1832 da(array
,array_pointers
)->n_used_entries
= l
;
1833 da(array
,array_pointers
)->n_allocated_entries
-= st
;
1836 goto ret_free_start_len
;
1839 result
= data_alloc_array_pointers_mayfail(l
, l
, err pass_file_line
);
1840 if (unlikely(!result
))
1841 goto ret_free_start_len
;
1842 for (i
= 0; i
< l
; i
++) {
1843 da(result
,array_pointers
)->pointer
[i
] = pointer_reference(&da(array
,array_pointers
)->pointer
[st
+ i
]);
1845 goto ret_free_start_len
;
1847 case DATA_TAG_array_same
: {
1848 if (unlikely(index_eq_index(len
, da(array
,array_same
)->n_entries
)))
1850 if (unlikely(index_eq_int(len
, 0))) {
1851 result
= data_alloc_array_pointers_mayfail(0, 0, err pass_file_line
);
1852 goto ret_free_start_len
;
1854 result
= data_alloc_array_same_mayfail(len
, err pass_file_line
);
1855 if (likely(result
!= NULL
)) {
1856 da(result
,array_same
)->pointer
= pointer_reference(&da(array
,array_same
)->pointer
);
1861 case DATA_TAG_array_btree
: {
1862 array_index_t max_step
;
1863 btree_entries_t bt_pos
;
1864 struct btree_level
*levels
= da(array
,array_btree
)->btree
;
1865 if (unlikely(index_eq_index(len
, levels
[da(array
,array_btree
)->n_used_btree_entries
- 1].end_index
)))
1868 if (unlikely(index_eq_int(len
, 0))) {
1869 result
= data_alloc_array_pointers_mayfail(0, 0, err pass_file_line
);
1870 goto ret_free_start_len
;
1873 binary_search(btree_entries_t
, da(array
,array_btree
)->n_used_btree_entries
, bt_pos
, false, index_ge_index(start
, levels
[bt_pos
].end_index
), break);
1875 index_sub3(&max_step
, levels
[bt_pos
].end_index
, start
);
1877 struct btree_level
*bt_prev
= &levels
[bt_pos
- 1];
1878 index_sub(&start
, bt_prev
->end_index
);
1880 if (index_ge_index(max_step
, len
)) {
1882 index_free(&max_step
);
1883 down_ptr
= levels
[bt_pos
].node
;
1885 pointer_reference_owned(down_ptr
);
1886 data_dereference(array
);
1888 array
= pointer_get_data(down_ptr
);
1891 index_sub(&len
, max_step
);
1892 result
= array_sub(pointer_get_data(levels
[bt_pos
].node
), start
, max_step
, false, err
);
1893 if (unlikely(!result
))
1896 array_index_t zero0
;
1899 index_sub3(&max_step
, levels
[bt_pos
].end_index
, levels
[bt_pos
- 1].end_index
);
1900 if (!index_ge_index(len
, max_step
)) {
1901 index_free(&max_step
);
1902 index_copy(&max_step
, len
);
1904 index_sub(&len
, max_step
);
1905 index_from_int(&zero0
, 0);
1906 xa
= array_sub(pointer_get_data(levels
[bt_pos
].node
), zero0
, max_step
, false, err
);
1908 result
= array_join(result
, xa
, err
);
1909 if (unlikely(!result
)) {
1913 } while (index_ge_int(len
, 1));
1918 internal(file_line
, "array_sub: invalid array tag %u", da_tag(array
));
1925 data_reference(array
);
1934 data_dereference(array
);
1939 static int_default_t
estimate_length(array_index_t length
)
1941 if (likely(index_is_int(length
)))
1942 return index_to_int(length
);
1943 return signed_maximum(int_default_t
);
1946 pointer_t
array_create(array_index_t length
, const struct type
*flat_type
, const unsigned char *flat
, pointer_t ptr
)
1948 pointer_t result
= pointer_empty();
1949 int_default_t est
, i
;
1951 unsigned char *flat_ptr
;
1952 ajla_error_t err
, *err_ptr
;
1955 est
= estimate_length(length
);
1961 if (!pointer_is_empty(result
))
1962 pointer_dereference(result
);
1963 result
= pointer_error(err
, NULL
, NULL pass_file_line
);
1968 if (likely(est
> 1))
1969 err_ptr
= MEM_DONT_TRY_TO_FREE
;
1971 flat_size_t element_size
;
1972 bool cnst
= data_element_is_const(flat
, flat_type
->size
);
1973 bool clear
= cnst
&& !flat
[0];
1974 array
= data_alloc_array_flat_mayfail(flat_type
, est
, est
, clear
, err_ptr pass_file_line
);
1975 if (unlikely(!array
))
1978 flat_ptr
= da_array_flat(array
);
1979 element_size
= flat_type
->size
;
1981 memset(flat_ptr
, flat
[0], element_size
* est
);
1983 int_default_t first_est
= minimum(est
, 4096 / element_size
);
1984 for (i
= 0; i
< first_est
; i
++) {
1985 flat_ptr
= mempcpy(flat_ptr
, flat
, element_size
);
1988 int_default_t this_step
= minimum(est
- i
, first_est
);
1989 flat_ptr
= mempcpy(flat_ptr
, da_array_flat(array
), this_step
* element_size
);
1995 array
= data_alloc_array_pointers_mayfail(est
, est
, err_ptr pass_file_line
);
1996 if (unlikely(!array
))
1999 pointer_dereference(ptr
);
2001 pointer_reference_owned_multiple(ptr
, est
- 1);
2002 for (i
= 0; i
< est
; i
++) {
2003 da(array
,array_pointers
)->pointer
[i
] = ptr
;
2005 ptr
= pointer_empty();
2008 if (likely(pointer_is_empty(result
))) {
2009 result
= pointer_data(array
);
2012 jr
= array_join(pointer_get_data(result
), array
, &err
);
2014 result
= pointer_error(err
, NULL
, NULL pass_file_line
);
2017 result
= pointer_data(jr
);
2020 index_sub_int(&length
, est
);
2021 if (unlikely(index_ge_int(length
, 1)))
2025 if (!pointer_is_empty(ptr
))
2026 pointer_dereference(ptr
);
2027 index_free(&length
);
2031 pointer_t
array_create_sparse(array_index_t length
, pointer_t ptr
)
2036 if (unlikely(!index_ge_int(length
, 1))) {
2038 index_free(&length
);
2039 index_from_int(&z
, 0);
2040 return array_create(z
, NULL
, NULL
, ptr
);
2043 array
= data_alloc_array_same_mayfail(length
, &err pass_file_line
);
2044 if (unlikely(!array
)) {
2045 pointer_dereference(ptr
);
2046 return pointer_error(err
, NULL
, NULL pass_file_line
);
2048 da(array
,array_same
)->pointer
= ptr
;
2050 return pointer_data(array
);
2053 pointer_t attr_fastcall
array_string(int_default_t length
, const struct type
*flat_type
, const unsigned char *flat
)
2058 array
= data_alloc_array_flat_mayfail(flat_type
, length
, length
, false, &err pass_file_line
);
2059 if (unlikely(!array
)) {
2060 return pointer_error(err
, NULL
, NULL pass_file_line
);
2063 memcpy(da_array_flat(array
), flat
, flat_type
->size
* length
);
2065 return pointer_data(array
);
2069 void attr_fastcall
array_incomplete_decompose(struct data
*array
, struct data
**first
, pointer_t
*last
)
2071 *first
= pointer_get_data(da(array
,array_incomplete
)->first
);
2072 ajla_assert_lo(!array_is_empty(*first
), (file_line
, "array_incomplete_decompose: the first part is empty"));
2073 if (data_is_writable(array
)) {
2074 *last
= da(array
,array_incomplete
)->next
;
2075 data_free_r1(array
);
2077 data_reference(*first
);
2078 *last
= pointer_reference(&da(array
,array_incomplete
)->next
);
2079 data_dereference(array
);
2083 bool attr_fastcall
array_incomplete_collapse(pointer_t
*ptr
)
2085 struct data
*array
, *first
, *next
, *next_first
;
2087 pointer_t next_next
;
2089 bool changed
= false;
2091 array
= pointer_get_data(*ptr
);
2094 pointer_follow(&da(array
,array_incomplete
)->next
, false, next
, PF_NOEVAL
, NULL
, NULL
,
2101 next_tag
= da_tag(next
);
2102 if (next_tag
!= DATA_TAG_array_incomplete
) {
2104 ajla_assert_lo(DATA_TAG_is_array(next_tag
), (file_line
, "array_incomplete_collapse: invalid tag %u", next_tag
));
2105 array_incomplete_decompose(array
, &first
, &next_next
);
2106 d
= array_join(first
, next
, &err
);
2108 *ptr
= pointer_error(err
, NULL
, NULL pass_file_line
);
2110 *ptr
= pointer_data(d
);
2113 struct data
*joined
;
2114 array_incomplete_decompose(array
, &first
, &next_next
);
2115 array_incomplete_decompose(next
, &next_first
, &next_next
);
2116 joined
= array_join(first
, next_first
, &err
);
2118 pointer_dereference(next_next
);
2119 *ptr
= pointer_error(err
, NULL
, NULL pass_file_line
);
2122 array
= data_alloc_array_incomplete(joined
, next_next
, &err pass_file_line
);
2123 if (unlikely(!array
)) {
2124 data_dereference(joined
);
2125 pointer_dereference(next_next
);
2126 *ptr
= pointer_error(err
, NULL
, NULL pass_file_line
);
2129 *ptr
= pointer_data(array
);
2130 goto try_to_join_again
;
2134 struct data
* attr_fastcall
array_from_flat_mem(const struct type
*type
, const char *mem
, size_t n_elements
, ajla_error_t
*mayfail
)
2137 struct data
*r
= NULL
;
2141 if (unlikely(n_elements
> signed_maximum(int_default_t
)))
2142 s
= signed_maximum(int_default_t
);
2144 s
= (int_default_t
)n_elements
;
2145 b
= data_alloc_array_flat_mayfail(type
, s
, s
, false, mayfail pass_file_line
);
2148 memcpy(da_array_flat(b
), mem
, s
* type
->size
);
2152 r
= array_join(r
, b
, mayfail
);
2156 if ((size_t)s
< n_elements
) {
2157 mem
+= s
* type
->size
;
2165 data_dereference(r
);
2171 #ifdef ARRAY_INDEX_T_COUNT_INDICES
2173 shared_var refcount_t n_indices
;
2175 void index_increment_count(void)
2177 refcount_inc(&n_indices
);
2180 void index_decrement_count(void)
2182 if (unlikely(refcount_dec(&n_indices
)))
2183 internal(file_line
, "index_decrement_count: index refcount underflowed");
2188 void name(array_index_init
)(void)
2190 #ifdef ARRAY_INDEX_T_COUNT_INDICES
2191 refcount_init(&n_indices
);
2195 void name(array_index_done
)(void)
2197 #ifdef ARRAY_INDEX_T_COUNT_INDICES
2198 if (unlikely(!refcount_is_one(&n_indices
)))
2199 internal(file_line
, "array_index_done: leaked %"PRIuMAX
" array indices", (uintmax_t)refcount_get_nonatomic(&n_indices
) - 1);