codegen: add a 'size' argument to ALU_WRITES_FLAGS
[ajla.git] / array.c
blob7b1a9b01d45eda2b3a3d9c121a4ce5fd33970914
1 /*
2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
9 * version.
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #include "ajla.h"
21 #ifndef FILE_OMIT
23 #include "data.h"
25 #include "array.h"
28 bool index_export_to_int(const mpint_t *t, int_default_t *result)
30 ajla_error_t sink;
31 return cat(mpint_export_to_,int_default_t)(t, result, &sink);
34 static inline mpint_t *index_alloc_mp_(unsigned long bits argument_position)
36 mpint_t *result;
37 result = mem_alloc_compressed_mayfail(mpint_t *, sizeof(mpint_t), NULL);
38 mpint_alloc_mayfail(result, bits, NULL);
39 return result;
42 void index_free_mp_(array_index_t idx argument_position)
44 mpint_t *mp = index_get_mp_(idx pass_position);
45 mpint_free(mp);
46 mem_free_compressed(mp);
49 attr_noinline array_index_t index_copy_mp_(array_index_t idx argument_position)
51 mpint_t *mp;
52 array_index_t result;
53 mp = mem_alloc_compressed_mayfail(mpint_t *, sizeof(mpint_t), NULL);
54 mpint_alloc_copy_mayfail(mp, index_get_mp_(idx pass_position), NULL);
55 index_set_mp_(&result, mp pass_position);
56 return result;
59 attr_noinline bool index_ge_index_mp_(array_index_t idx1, array_index_t idx2 argument_position)
61 ajla_flat_option_t result;
62 mpint_less_equal(index_get_mp_(idx2 pass_position), index_get_mp_(idx1 pass_position), &result, NULL);
63 return result;
66 attr_noinline bool index_eq_index_mp_(array_index_t idx1, array_index_t idx2 argument_position)
68 ajla_flat_option_t result;
69 mpint_equal(index_get_mp_(idx2 pass_position), index_get_mp_(idx1 pass_position), &result, NULL);
70 return result;
73 attr_noinline array_index_t index_op_mp_(array_index_t idx2, array_index_t idx3, unsigned flags, ajla_error_t *err argument_position)
75 mpint_t mp2, mp3;
76 mpint_t *mp2_p, *mp3_p;
77 mpint_t *mp;
78 bool succeeded;
79 array_index_t result;
80 if (!index_is_mp_(idx2 pass_position)) {
81 if (flags & INDEX_OP_MP_ARG3_) {
82 mp2_p = &mp2;
83 } else {
84 mp2_p = mem_alloc_compressed_mayfail(mpint_t *, sizeof(mpint_t), NULL);
86 index_init_from_int(mp2_p, index_to_int_(idx2 pass_position));
87 } else {
88 mp2_p = index_get_mp_(idx2 pass_position);
90 if (!index_is_mp_(idx3 pass_position)) {
91 index_init_from_int(&mp3, index_to_int_(idx3 pass_position));
92 mp3_p = &mp3;
93 } else {
94 mp3_p = index_get_mp_(idx3 pass_position);
96 if (flags & INDEX_OP_MP_ARG3_) {
97 mp = index_alloc_mp_(maximum(mpint_estimate_bits(mp2_p), mpint_estimate_bits(mp3_p)) pass_position);
98 if (!(flags & INDEX_OP_MP_SUB_))
99 succeeded = mpint_add(mp2_p, mp3_p, mp, err);
100 else
101 succeeded = mpint_subtract(mp2_p, mp3_p, mp, err);
102 index_set_mp_(&result, mp pass_position);
103 index_alloc_leak_(&result pass_position);
104 } else {
105 if (!(flags & INDEX_OP_MP_SUB_))
106 succeeded = mpint_add(mp2_p, mp3_p, mp2_p, err);
107 else
108 succeeded = mpint_subtract(mp2_p, mp3_p, mp2_p, err);
109 index_set_mp_(&result, mp2_p pass_position);
110 index_free_leak_(&idx2 pass_position);
111 index_alloc_leak_(&result pass_position);
113 if (mp2_p == &mp2)
114 mpint_free(&mp2);
115 if (mp3_p == &mp3)
116 mpint_free(&mp3);
117 if (unlikely(!succeeded)) {
118 index_free_(&result pass_file_line);
119 return index_invalid();
121 #ifndef UNUSUAL_MPINT_ARRAY_INDICES
122 if (flags & INDEX_OP_MP_SUB_) {
123 int_default_t id;
124 if (index_export_to_int(index_get_mp_(result pass_position), &id)) {
125 index_free_mp_(result pass_position);
126 ajla_assert_lo(id >= 0, (caller_file_line, "index_op_mp_: the result is negative: %"PRIdMAX"", (intmax_t)id));
127 index_set_value_(&result, (uint_default_t)id);
130 #endif
131 return result;
135 static int_default_t array_align_alloc(int_default_t len)
137 #if defined(__IBMC__) && INT_DEFAULT_BITS > 32
138 /* compiler bug - causes internal error */
139 volatile
140 #endif
141 uint_default_t val = (uint_default_t)len;
142 #if defined(HAVE_BUILTIN_CLZ)
143 if (is_power_of_2(sizeof(int_default_t)) && sizeof(int_default_t) == sizeof(unsigned)) {
144 val = (uint_default_t)1 << ((unsigned)(sizeof(uint_default_t) * 8 - 1) CLZ_BSR_OP __builtin_clz(val + val - 1));
145 if (unlikely((int_default_t)val < 0))
146 return likely(!len) ? 0 : val - 1;
147 else
148 return val;
149 } else if (is_power_of_2(sizeof(int_default_t)) && sizeof(int_default_t) == sizeof(unsigned long long)) {
150 val = (uint_default_t)1 << ((unsigned)(sizeof(uint_default_t) * 8 - 1) CLZ_BSR_OP __builtin_clzll(val + val - 1));
151 if (unlikely((int_default_t)val < 0))
152 return likely(!len) ? 0 : val - 1;
153 else
154 return val;
155 } else
156 #endif
158 val--;
159 val |= val >> 1;
160 val |= val >> 2;
161 val |= val >> 4;
162 val |= val >> 8;
163 val |= val >> 15 >> 1;
164 val |= val >> 15 >> 15 >> 2;
165 val |= val >> 15 >> 15 >> 15 >> 15 >> 4;
166 val++;
167 if (unlikely((int_default_t)val < 0))
168 return val - 1;
169 else
170 return val;
175 bool attr_fastcall array_read(struct data *array, array_index_t idx, pointer_t **result_ptr, unsigned char **result_flat, const struct type **flat_type, int_default_t *run)
177 go_down:
178 switch (da_tag(array)) {
179 case DATA_TAG_array_flat: {
180 if (unlikely(index_ge_int(idx, da(array,array_flat)->n_used_entries)))
181 goto invalid_index;
182 if (run)
183 *run = da(array,array_flat)->n_used_entries - index_to_int(idx);
184 *result_ptr = NULL;
185 *flat_type = da(array,array_flat)->type;
186 *result_flat = da_array_flat(array) + index_to_int(idx) * (size_t)(*flat_type)->size;
187 break;
189 case DATA_TAG_array_slice: {
190 if (unlikely(index_ge_int(idx, da(array,array_slice)->n_entries)))
191 goto invalid_index;
192 if (run)
193 *run = da(array,array_slice)->n_entries - index_to_int(idx);
194 *result_ptr = NULL;
195 *flat_type = da(array,array_slice)->type;
196 *result_flat = da(array,array_slice)->flat_data_minus_data_array_offset + data_array_offset + index_to_int(idx) * (size_t)(*flat_type)->size;
197 break;
199 case DATA_TAG_array_pointers: {
200 if (unlikely(index_ge_int(idx, da(array,array_pointers)->n_used_entries)))
201 goto invalid_index;
202 if (run)
203 *run = da(array,array_pointers)->n_used_entries - index_to_int(idx);
204 *result_ptr = &da(array,array_pointers)->pointer[index_to_int(idx)];
205 *result_flat = NULL;
206 break;
208 case DATA_TAG_array_same: {
209 if (unlikely(index_ge_index(idx, da(array,array_same)->n_entries)))
210 goto invalid_index;
211 if (run) {
212 array_index_t idx_diff;
213 index_sub3(&idx_diff, da(array,array_same)->n_entries, idx);
214 if (likely(index_is_int(idx_diff))) {
215 *run = -index_to_int(idx_diff);
216 } else {
217 *run = sign_bit(int_default_t);
219 index_free(&idx_diff);
221 *result_ptr = &da(array,array_same)->pointer;
222 *result_flat = NULL;
223 break;
225 case DATA_TAG_array_btree: {
226 btree_entries_t bt_pos;
227 pointer_t *new_array_ptr;
228 struct data *new_array;
229 binary_search(btree_entries_t, da(array,array_btree)->n_used_btree_entries, bt_pos, false, index_ge_index(idx, da(array,array_btree)->btree[bt_pos].end_index), break);
230 if (unlikely(bt_pos == da(array,array_btree)->n_used_btree_entries))
231 goto invalid_index;
232 if (bt_pos != 0) {
233 struct btree_level *bt_prev = &da(array,array_btree)->btree[bt_pos - 1];
234 index_sub(&idx, bt_prev->end_index);
236 new_array_ptr = &da(array,array_btree)->btree[bt_pos].node;
237 new_array = pointer_get_data(*new_array_ptr);
238 da_array_assert_son(array, new_array);
239 array = new_array;
240 goto go_down;
242 default:
243 internal(file_line, "array_read: invalid array tag %u", da_tag(array));
246 index_free(&idx);
247 return true;
249 invalid_index:
250 index_free(&idx);
251 return false;
255 struct walk_context {
256 pointer_t *root;
257 pointer_t *ptr;
258 struct data *upper_level;
259 btree_entries_t upper_level_pos;
260 array_index_t idx;
263 static struct data *array_clone_flat(pointer_t *ptr, int_default_t n_allocated, ajla_error_t *err)
265 struct data *orig, *clone;
267 orig = pointer_get_data(*ptr);
269 ajla_assert(n_allocated >= da(orig,array_flat)->n_used_entries, (file_line, "array_clone_flat: trimming btree from %"PRIdMAX" to %"PRIdMAX"", (intmax_t)da(orig,array_flat)->n_used_entries, (intmax_t)n_allocated));
271 clone = data_alloc_array_flat_mayfail(da(orig,array_flat)->type, n_allocated, da(orig,array_flat)->n_used_entries, false, err pass_file_line);
272 if (unlikely(!clone))
273 return NULL;
275 memcpy(da_array_flat(clone), da_array_flat(orig), da(orig,array_flat)->n_used_entries * da_array_flat_element_size(orig));
277 pointer_dereference(*ptr);
278 *ptr = pointer_data(clone);
279 return clone;
282 static struct data *array_clone_pointers(pointer_t *ptr, int_default_t n_allocated, ajla_error_t *err)
284 struct data *orig, *clone;
285 int_default_t i, n_used;
287 orig = pointer_get_data(*ptr);
289 ajla_assert(n_allocated >= da(orig,array_pointers)->n_used_entries, (file_line, "array_clone_pointers: trimming btree from %"PRIdMAX" to %"PRIdMAX"", (intmax_t)da(orig,array_pointers)->n_used_entries, (intmax_t)n_allocated));
291 clone = data_alloc_array_pointers_mayfail(n_allocated, da(orig,array_pointers)->n_used_entries, err pass_file_line);
292 if (unlikely(!clone))
293 return NULL;
295 n_used = da(clone,array_pointers)->n_used_entries;
296 for (i = 0; i < n_used; i++)
297 da(clone,array_pointers)->pointer[i] = pointer_reference(&da(orig,array_pointers)->pointer[i]);
299 pointer_dereference(*ptr);
300 *ptr = pointer_data(clone);
301 return clone;
304 static struct data *array_clone_btree(pointer_t *ptr, btree_entries_t n_allocated, ajla_error_t *err)
306 struct data *orig, *clone;
307 btree_entries_t i, n;
309 orig = pointer_get_data(*ptr);
311 ajla_assert(n_allocated >= da(orig,array_btree)->n_used_btree_entries, (file_line, "array_clone_btree: trimming btree from %"PRIuMAX" to %"PRIuMAX"", (uintmax_t)da(orig,array_btree)->n_used_btree_entries, (uintmax_t)n_allocated));
313 clone = data_alloc_flexible(array_btree, btree, n_allocated, err);
314 if (unlikely(!clone))
315 return NULL;
316 n = da(clone,array_btree)->n_used_btree_entries = da(orig,array_btree)->n_used_btree_entries;
317 da(clone,array_btree)->n_allocated_btree_entries = n_allocated;
318 da(clone,array_btree)->depth = da(orig,array_btree)->depth;
319 for (i = 0; i < n; i++) {
320 index_copy(&da(clone,array_btree)->btree[i].end_index, da(orig,array_btree)->btree[i].end_index);
321 pointer_reference_owned(da(clone,array_btree)->btree[i].node = da(orig,array_btree)->btree[i].node);
324 pointer_dereference(*ptr);
325 *ptr = pointer_data(clone);
326 return clone;
329 struct data *array_clone(pointer_t *ptr, ajla_error_t *err)
331 struct data *orig, *clone;
333 orig = pointer_get_data(*ptr);
334 switch (da_tag(orig)) {
335 case DATA_TAG_array_flat: {
336 return array_clone_flat(ptr, da(orig,array_flat)->n_allocated_entries, err);
338 case DATA_TAG_array_slice: {
339 clone = data_alloc_array_flat_mayfail(da(orig,array_slice)->type, da(orig,array_slice)->n_entries, da(orig,array_slice)->n_entries, false, err pass_file_line);
340 if (unlikely(!clone))
341 return NULL;
342 memcpy(da_array_flat(clone), da(orig,array_slice)->flat_data_minus_data_array_offset + data_array_offset, da(orig,array_slice)->n_entries * da_array_flat_element_size(clone));
343 break;
345 case DATA_TAG_array_pointers: {
346 return array_clone_pointers(ptr, da(orig,array_pointers)->n_allocated_entries, err);
348 case DATA_TAG_array_same: {
349 clone = data_alloc(array_same, err);
350 if (unlikely(!clone))
351 return NULL;
352 index_copy(&da(clone,array_same)->n_entries, da(orig,array_same)->n_entries);
353 da(clone,array_same)->pointer = pointer_reference(&da(orig,array_same)->pointer);
354 break;
356 case DATA_TAG_array_btree: {
357 return array_clone_btree(ptr, da(orig,array_btree)->n_allocated_btree_entries, err);
359 default:
360 internal(file_line, "array_clone: invalid array tag %u", da_tag(orig));
362 pointer_dereference(*ptr);
363 *ptr = pointer_data(clone);
364 return clone;
367 static struct data *get_writable(pointer_t *ptr, ajla_error_t *err)
369 struct data *array = pointer_get_data(*ptr);
370 if (unlikely(!data_is_writable(array)))
371 return array_clone(ptr, err);
372 else
373 return array;
376 enum copy_t {
377 copy,
378 copy_add,
379 copy_sub,
380 move_sub
383 static void copy_btree_nodes_(struct btree_level *target, const struct btree_level *source, btree_entries_t n, enum copy_t cp, array_index_t idx_shift argument_position)
385 btree_entries_t i;
386 for (i = 0; i < n; i++) {
387 switch (cp) {
388 case copy:
389 index_copy_(&target->end_index, source->end_index pass_position);
390 break;
391 case copy_add:
392 index_add3_(&target->end_index, source->end_index, idx_shift, NULL pass_position);
393 break;
394 case copy_sub:
395 index_sub3_(&target->end_index, source->end_index, idx_shift pass_position);
396 break;
397 case move_sub:
398 target->end_index = source->end_index;
399 index_sub(&target->end_index, idx_shift);
400 break;
402 target->node = source->node;
403 target++;
404 source++;
407 #define copy_btree_nodes(target, source, n, cp, idx_shift) copy_btree_nodes_(target, source, n, cp, idx_shift pass_file_line)
409 static void free_indices_range(struct btree_level *l, btree_entries_t n)
411 while (n--)
412 index_free(&l++->end_index);
415 static void free_btree_indices(struct data *array)
417 free_indices_range(da(array,array_btree)->btree, da(array,array_btree)->n_used_btree_entries);
420 static bool split_btree_node(struct data *array, struct data **left, array_index_t *left_idx, struct data **right, array_index_t *right_idx, ajla_error_t *err)
422 struct data *r1, *r2;
423 btree_entries_t boundary = (da(array,array_btree)->n_used_btree_entries + 1) >> 1;
425 index_copy(left_idx, da(array,array_btree)->btree[boundary - 1].end_index);
426 index_copy(right_idx, da(array,array_btree)->btree[da(array,array_btree)->n_used_btree_entries - 1].end_index);
428 r1 = data_alloc_flexible(array_btree, btree, BTREE_MAX_SIZE, err);
429 if (unlikely(!r1))
430 goto fail3;
431 da(r1,array_btree)->n_allocated_btree_entries = BTREE_MAX_SIZE;
432 da(r1,array_btree)->n_used_btree_entries = boundary;
433 da(r1,array_btree)->depth = da(array,array_btree)->depth;
434 copy_btree_nodes(da(r1,array_btree)->btree, da(array,array_btree)->btree, boundary, copy, *left_idx);
435 r2 = data_alloc_flexible(array_btree, btree, BTREE_MAX_SIZE, err);
436 if (unlikely(!r2))
437 goto fail5;
438 da(r2,array_btree)->n_allocated_btree_entries = BTREE_MAX_SIZE;
439 da(r2,array_btree)->n_used_btree_entries = da(array,array_btree)->n_used_btree_entries - boundary;
440 da(r2,array_btree)->depth = da(array,array_btree)->depth;
442 copy_btree_nodes(da(r2,array_btree)->btree, da(array,array_btree)->btree + boundary, da(r2,array_btree)->n_used_btree_entries, copy_sub, *left_idx);
444 *left = r1;
445 *right = r2;
447 return true;
449 fail5:
450 free_btree_indices(r1);
451 data_free_r1(r1);
452 fail3:
453 *left = array; /* avoid warning */
454 *right = array; /* avoid warning */
455 index_free(right_idx);
456 index_free(left_idx);
457 return false;
460 static struct btree_level *expand_parent(struct walk_context *w, btree_entries_t n, array_index_t *prev_idx, ajla_error_t *err)
462 struct btree_level *ret;
463 if (unlikely(!w->upper_level)) {
464 struct data *root;
465 index_from_int(prev_idx, 0);
466 w->upper_level = root = data_alloc_flexible(array_btree, btree, n + BTREE_MAX_NODE_EXPAND, err);
467 if (unlikely(!root)) {
468 index_free(prev_idx);
469 return NULL;
471 da(root,array_btree)->n_allocated_btree_entries = n + BTREE_MAX_NODE_EXPAND;
472 da(root,array_btree)->n_used_btree_entries = n;
473 da(root,array_btree)->depth = da_array_depth(pointer_get_data(*w->root)) + 1;
474 *w->root = pointer_data(root);
475 ret = da(root,array_btree)->btree;
476 } else {
477 ajla_assert_lo(da(w->upper_level,array_btree)->n_used_btree_entries + n - 1 <= da(w->upper_level,array_btree)->n_allocated_btree_entries, (file_line, "expand_parent: parent node too small: %"PRIuMAX", %"PRIuMAX", %"PRIuMAX"", (uintmax_t)da(w->upper_level,array_btree)->n_used_btree_entries, (uintmax_t)n, (uintmax_t)da(w->upper_level,array_btree)->n_allocated_btree_entries));
478 if (!w->upper_level_pos) {
479 index_from_int(prev_idx, 0);
480 } else {
481 index_copy(prev_idx, da(w->upper_level,array_btree)->btree[w->upper_level_pos - 1].end_index);
483 index_free(&da(w->upper_level,array_btree)->btree[w->upper_level_pos].end_index);
484 memmove(da(w->upper_level,array_btree)->btree + w->upper_level_pos + n, da(w->upper_level,array_btree)->btree + w->upper_level_pos + 1, (da(w->upper_level,array_btree)->n_used_btree_entries - w->upper_level_pos - 1) * sizeof(struct btree_level));
485 da(w->upper_level,array_btree)->n_used_btree_entries += n - 1;
486 ret = da(w->upper_level,array_btree)->btree + w->upper_level_pos;
488 #ifdef DEBUG
489 (void)memset(ret, 0x01, n * sizeof(struct btree_level));
490 #endif
491 return ret;
494 static attr_noinline struct data *expand_btree_node(struct walk_context *w, ajla_error_t *err)
496 struct data *array = pointer_get_data(*w->ptr);
497 btree_entries_t old_size = da(array,array_btree)->n_allocated_btree_entries;
499 if (old_size <= BTREE_MAX_SIZE - BTREE_MAX_NODE_EXPAND) {
500 size_t new_size = (size_t)old_size * 2;
501 if (unlikely(new_size < da(array,array_btree)->n_used_btree_entries + (size_t)BTREE_MAX_NODE_EXPAND))
502 new_size = da(array,array_btree)->n_used_btree_entries + (size_t)BTREE_MAX_NODE_EXPAND;
503 if (unlikely(new_size > BTREE_MAX_SIZE))
504 new_size = BTREE_MAX_SIZE;
505 array = array_clone_btree(w->ptr, new_size, err);
506 if (unlikely(!array))
507 return NULL;
508 } else {
509 struct data *left, *right;
510 array_index_t prev_idx, left_idx, right_idx;
511 struct btree_level *split;
513 if (unlikely(!split_btree_node(array, &left, &left_idx, &right, &right_idx, err)))
514 return NULL;
515 split = expand_parent(w, 2, &prev_idx, err);
516 if (unlikely(!split)) {
517 index_free(&left_idx);
518 index_free(&right_idx);
519 free_btree_indices(left);
520 free_btree_indices(right);
521 data_free_r1(left);
522 data_free_r1(right);
523 return NULL;
526 free_btree_indices(array);
527 data_free_r1(array);
529 split[0].node = pointer_data(left);
530 index_add3(&split[0].end_index, prev_idx, left_idx);
531 split[1].node = pointer_data(right);
532 index_add3(&split[1].end_index, prev_idx, right_idx);
533 if (!index_ge_index(w->idx, left_idx)) {
534 array = left;
535 } else {
536 index_sub(&w->idx, left_idx);
537 array = right;
539 index_free(&left_idx);
540 index_free(&right_idx);
541 index_free(&prev_idx);
544 return array;
547 static attr_noinline struct data *rebalance_btree_nodes(struct walk_context *w, ajla_error_t *err)
549 struct data *left, *right;
550 struct data *upper = w->upper_level;
551 bool merge_previous = !!w->upper_level_pos;
552 btree_entries_t pos = w->upper_level_pos - (int)merge_previous;
554 left = get_writable(&da(upper,array_btree)->btree[pos].node, err);
555 if (unlikely(!left))
556 return NULL;
557 right = get_writable(&da(upper,array_btree)->btree[pos + 1].node, err);
558 if (unlikely(!right))
559 return NULL;
560 if (da(left,array_btree)->n_used_btree_entries + da(right,array_btree)->n_used_btree_entries <= BTREE_MAX_SIZE - BTREE_MAX_NODE_EXPAND) {
561 btree_entries_t boundary;
562 boundary = da(left,array_btree)->n_used_btree_entries;
563 if (da(left,array_btree)->n_allocated_btree_entries < boundary + da(right,array_btree)->n_used_btree_entries + BTREE_MAX_NODE_EXPAND) {
564 left = array_clone_btree(&da(upper,array_btree)->btree[pos].node, BTREE_MAX_SIZE, err);
565 if (unlikely(!left))
566 return NULL;
568 if (merge_previous) {
569 index_add(&w->idx, da(left,array_btree)->btree[boundary - 1].end_index);
571 copy_btree_nodes(da(left,array_btree)->btree + boundary, da(right,array_btree)->btree, da(right,array_btree)->n_used_btree_entries, copy_add, da(left,array_btree)->btree[boundary - 1].end_index);
572 da(left,array_btree)->n_used_btree_entries += da(right,array_btree)->n_used_btree_entries;
573 free_btree_indices(right);
574 data_free_r1(right);
575 index_free(&da(upper,array_btree)->btree[pos].end_index);
576 da(upper,array_btree)->btree[pos].end_index = da(upper,array_btree)->btree[pos + 1].end_index;
577 memmove(da(upper,array_btree)->btree + pos + 1, da(upper,array_btree)->btree + pos + 2, (da(upper,array_btree)->n_used_btree_entries - pos - 2) * sizeof(struct btree_level));
578 da(upper,array_btree)->n_used_btree_entries--;
579 if (unlikely(da(upper,array_btree)->n_used_btree_entries == 1)) {
580 *w->root = da(upper,array_btree)->btree[0].node;
581 free_btree_indices(upper);
582 data_free_r1(upper);
584 return left;
585 } else {
586 struct data *retn;
587 array_index_t split_index;
588 btree_entries_t half = (da(left,array_btree)->n_used_btree_entries + da(right,array_btree)->n_used_btree_entries + 1) >> 1;
589 if (da(left,array_btree)->n_allocated_btree_entries < half + BTREE_MAX_NODE_EXPAND) {
590 left = array_clone_btree(&da(upper,array_btree)->btree[pos].node, BTREE_MAX_SIZE, err);
591 if (unlikely(!left))
592 return NULL;
594 if (da(right,array_btree)->n_allocated_btree_entries < half + BTREE_MAX_NODE_EXPAND) {
595 right = array_clone_btree(&da(upper,array_btree)->btree[pos + 1].node, BTREE_MAX_SIZE, err);
596 if (unlikely(!right))
597 return NULL;
599 if (da(left,array_btree)->n_used_btree_entries < half) {
600 array_index_t sub_idx;
601 btree_entries_t diff = half - da(left,array_btree)->n_used_btree_entries;
602 if (!merge_previous) {
603 retn = left;
604 } else {
605 if (!index_ge_index(w->idx, da(right,array_btree)->btree[diff - 1].end_index)) {
606 index_add(&w->idx, da(left,array_btree)->btree[da(left,array_btree)->n_used_btree_entries - 1].end_index);
607 retn = left;
608 } else {
609 index_sub(&w->idx, da(right,array_btree)->btree[diff - 1].end_index);
610 retn = right;
613 copy_btree_nodes(da(left,array_btree)->btree + da(left,array_btree)->n_used_btree_entries, da(right,array_btree)->btree, diff, copy_add, da(left,array_btree)->btree[da(left,array_btree)->n_used_btree_entries - 1].end_index);
614 index_copy(&sub_idx, da(right,array_btree)->btree[diff - 1].end_index);
615 free_indices_range(da(right,array_btree)->btree, diff);
616 copy_btree_nodes(da(right,array_btree)->btree, da(right,array_btree)->btree + diff, da(right,array_btree)->n_used_btree_entries - diff, move_sub, sub_idx);
617 index_free(&sub_idx);
618 da(left,array_btree)->n_used_btree_entries = half;
619 da(right,array_btree)->n_used_btree_entries -= diff;
620 } else {
621 btree_entries_t diff = da(left,array_btree)->n_used_btree_entries - half;
622 array_index_t idx_diff;
623 struct btree_level *l;
625 l = mem_alloc_array_mayfail(mem_alloc_mayfail, struct btree_level *, 0, 0, da(right,array_btree)->n_used_btree_entries + diff, sizeof(struct btree_level), err);
626 if (unlikely(!l))
627 return NULL;
629 index_sub3(&idx_diff, da(left,array_btree)->btree[da(left,array_btree)->n_used_btree_entries - 1].end_index, da(left,array_btree)->btree[half - 1].end_index);
630 copy_btree_nodes(l, da(left,array_btree)->btree + half, diff, copy_sub, da(left,array_btree)->btree[half - 1].end_index);
631 copy_btree_nodes(l + diff, da(right,array_btree)->btree, da(right,array_btree)->n_used_btree_entries, copy_add, idx_diff);
632 free_btree_indices(right);
633 memcpy(da(right,array_btree)->btree, l, (da(right,array_btree)->n_used_btree_entries + diff) * sizeof(struct btree_level));
634 mem_free(l);
635 index_free(&idx_diff);
636 free_indices_range(da(left,array_btree)->btree + half, da(left,array_btree)->n_used_btree_entries - half);
637 da(left,array_btree)->n_used_btree_entries = half;
638 da(right,array_btree)->n_used_btree_entries += diff;
639 if (!merge_previous) {
640 if (!index_ge_index(w->idx, da(left,array_btree)->btree[half - 1].end_index)) {
641 retn = left;
642 } else {
643 index_sub(&w->idx, da(left,array_btree)->btree[half - 1].end_index);
644 retn = right;
646 } else {
647 index_add(&w->idx, da(right,array_btree)->btree[diff - 1].end_index);
648 retn = right;
651 index_copy(&split_index, da(left,array_btree)->btree[half - 1].end_index);
652 if (pos)
653 index_add(&split_index, da(upper,array_btree)->btree[pos - 1].end_index);
654 index_free(&da(upper,array_btree)->btree[pos].end_index);
655 da(upper,array_btree)->btree[pos].end_index = split_index;
656 return retn;
660 static void walk_init_context(struct walk_context *w, pointer_t *root, array_index_t idx)
662 w->root = root;
663 w->ptr = root;
664 w->upper_level = NULL;
665 w->upper_level_pos = 0;
666 w->idx = idx;
669 static void walk_free_context(struct walk_context *w)
671 index_free(&w->idx);
674 static bool walk_for_write(struct walk_context *w, ajla_error_t *err)
676 struct data *array;
677 btree_entries_t bt_pos;
679 array = pointer_get_data(*w->ptr);
681 if (unlikely(da(array,array_btree)->n_allocated_btree_entries - da(array,array_btree)->n_used_btree_entries < BTREE_MAX_NODE_EXPAND)) {
682 array = get_writable(w->ptr, err);
683 if (unlikely(!array))
684 return false;
685 array = expand_btree_node(w, err);
686 if (unlikely(!array))
687 return false;
689 if (w->upper_level && unlikely(da(array,array_btree)->n_used_btree_entries < BTREE_MIN_SIZE + BTREE_MAX_NODE_COLLAPSE)) {
690 array = get_writable(w->ptr, err);
691 if (unlikely(!array))
692 return false;
693 array = rebalance_btree_nodes(w, err);
694 if (unlikely(!array))
695 return false;
698 binary_search(btree_entries_t, da(array,array_btree)->n_used_btree_entries, bt_pos, false, index_ge_index(w->idx, da(array,array_btree)->btree[bt_pos].end_index), break);
699 if (unlikely(bt_pos == da(array,array_btree)->n_used_btree_entries))
700 goto invalid_index;
701 if (bt_pos != 0) {
702 struct btree_level *bt_prev = &da(array,array_btree)->btree[bt_pos - 1];
703 index_sub(&w->idx, bt_prev->end_index);
705 w->upper_level = array;
706 w->upper_level_pos = bt_pos;
707 w->ptr = &da(array,array_btree)->btree[bt_pos].node;
708 da_array_assert_son(w->upper_level, pointer_get_data(*w->ptr));
709 return true;
711 invalid_index:
712 fatal_mayfail(error_ajla(EC_SYNC, AJLA_ERROR_INDEX_OUT_OF_RANGE), err, "index out of range");
713 return false;
716 static struct data *alloc_one_ptr(pointer_t target, ajla_error_t *err)
718 struct data *pointer = data_alloc_array_pointers_mayfail(1, 1, err pass_file_line);
719 if (unlikely(!pointer))
720 return NULL;
721 da(pointer,array_pointers)->pointer[0] = target;
722 return pointer;
725 static bool leaf_prepend_ptr(struct walk_context *w, array_index_t existing_size, pointer_t ptr, pointer_t **result_ptr, ajla_error_t *err)
727 struct btree_level *split;
728 array_index_t prev_idx;
729 pointer_t *sibling_pointer;
730 struct data *sibling;
731 pointer_t orig = *w->ptr;
733 if (unlikely(!w->upper_level) || unlikely(!w->upper_level_pos))
734 goto expand;
736 sibling_pointer = &da(w->upper_level,array_btree)->btree[w->upper_level_pos - 1].node;
737 sibling = pointer_get_data(*sibling_pointer);
738 if (da_tag(sibling) == DATA_TAG_array_pointers) {
739 if (da(sibling,array_pointers)->n_used_entries == da(sibling,array_pointers)->n_allocated_entries) {
740 uint_default_t new_size;
741 if (unlikely(da(sibling,array_pointers)->n_allocated_entries >= SCALAR_SPLIT_SIZE))
742 goto expand;
743 new_size = (uint_default_t)da(sibling,array_pointers)->n_allocated_entries * 2;
744 if (new_size > SCALAR_SPLIT_SIZE)
745 new_size = SCALAR_SPLIT_SIZE;
746 sibling = array_clone_pointers(sibling_pointer, (int_default_t)new_size, err);
747 if (unlikely(!sibling))
748 return false;
750 sibling = get_writable(sibling_pointer, err);
751 if (unlikely(!sibling))
752 return false;
753 da(sibling,array_pointers)->pointer[da(sibling,array_pointers)->n_used_entries] = ptr;
754 *result_ptr = &da(sibling,array_pointers)->pointer[da(sibling,array_pointers)->n_used_entries];
755 da(sibling,array_pointers)->n_used_entries++;
756 index_add_int(&get_struct(sibling_pointer, struct btree_level, node)->end_index, 1);
757 return true;
760 expand:
761 sibling = alloc_one_ptr(ptr, err);
762 if (unlikely(!sibling))
763 return false;
764 *result_ptr = &da(sibling,array_pointers)->pointer[0];
765 split = expand_parent(w, 2, &prev_idx, err);
766 if (unlikely(!split)) {
767 data_free_r1(sibling);
768 return false;
770 index_copy(&split[0].end_index, prev_idx);
771 index_add_int(&split[0].end_index, 1);
772 split[0].node = pointer_data(sibling);
773 split[1].end_index = prev_idx;
774 index_add(&split[1].end_index, existing_size);
775 split[1].node = orig;
776 return true;
779 static bool leaf_append_ptr(struct walk_context *w, array_index_t existing_size, pointer_t ptr, pointer_t **result_ptr, ajla_error_t *err)
781 struct btree_level *split;
782 array_index_t prev_idx;
783 pointer_t *sibling_pointer;
784 struct data *sibling;
785 pointer_t orig = *w->ptr;
787 if (unlikely(!w->upper_level) || unlikely(w->upper_level_pos == da(w->upper_level,array_btree)->n_used_btree_entries - 1))
788 goto expand;
790 sibling_pointer = &da(w->upper_level,array_btree)->btree[w->upper_level_pos + 1].node;
791 sibling = pointer_get_data(*sibling_pointer);
792 if (da_tag(sibling) == DATA_TAG_array_pointers) {
793 if (da(sibling,array_pointers)->n_used_entries == da(sibling,array_pointers)->n_allocated_entries) {
794 uint_default_t new_size;
795 if (unlikely(da(sibling,array_pointers)->n_allocated_entries >= SCALAR_SPLIT_SIZE))
796 goto expand;
797 new_size = (uint_default_t)da(sibling,array_pointers)->n_allocated_entries * 2;
798 if (new_size > SCALAR_SPLIT_SIZE)
799 new_size = SCALAR_SPLIT_SIZE;
800 sibling = array_clone_pointers(sibling_pointer, (int_default_t)new_size, err);
801 if (unlikely(!sibling))
802 return false;
804 sibling = get_writable(sibling_pointer, err);
805 if (unlikely(!sibling))
806 return false;
807 if (da(sibling,array_pointers)->pointer == da(sibling,array_pointers)->pointer_array) {
808 memmove(da(sibling,array_pointers)->pointer + 1, da(sibling,array_pointers)->pointer, da(sibling,array_pointers)->n_used_entries * sizeof(pointer_t));
809 } else {
810 da(sibling,array_pointers)->pointer--;
811 da(sibling,array_pointers)->n_allocated_entries++;
813 da(sibling,array_pointers)->pointer[0] = ptr;
814 *result_ptr = &da(sibling,array_pointers)->pointer[0];
815 da(sibling,array_pointers)->n_used_entries++;
816 index_sub_int(&get_struct(sibling_pointer, struct btree_level, node)[-1].end_index, 1);
817 return true;
820 expand:
821 sibling = alloc_one_ptr(ptr, err);
822 if (unlikely(!sibling))
823 return false;
824 *result_ptr = &da(sibling,array_pointers)->pointer[0];
825 split = expand_parent(w, 2, &prev_idx, err);
826 if (unlikely(!split)) {
827 data_free_r1(sibling);
828 return false;
830 index_copy(&split[0].end_index, prev_idx);
831 index_add(&split[0].end_index, existing_size);
832 index_sub_int(&split[0].end_index, 1);
833 split[0].node = orig;
834 split[1].end_index = prev_idx;
835 index_add(&split[1].end_index, existing_size);
836 split[1].node = pointer_data(sibling);
837 return true;
840 static struct data *try_join_ptr(struct data *left, struct data *right)
842 int_default_t sum;
843 bool quick_copy;
845 sum = (int_default_t)((uint_default_t)da(left,array_pointers)->n_used_entries + (uint_default_t)da(right,array_pointers)->n_used_entries);
846 /*debug("join: %lu, %lu, max %lu, ref %lu", (uint_default_t)da(left,array_pointers)->n_used_entries, (uint_default_t)da(right,array_pointers)->n_used_entries, da(left,array_pointers)->n_allocated_entries, refcount_get_nonatomic(&left->refcount));*/
847 if (unlikely(sum < 0))
848 return NULL;
850 if (!data_is_writable(left)) {
851 if (sum > BTREE_MAX_SIZE)
852 return NULL;
853 goto clon;
855 if (da(left,array_pointers)->n_allocated_entries < sum) {
856 pointer_t ptr;
857 clon:
858 ptr = pointer_data(left);
859 left = array_clone_pointers(&ptr, array_align_alloc(sum), MEM_DONT_TRY_TO_FREE);
860 if (unlikely(!left))
861 return NULL;
864 quick_copy = data_is_writable(right);
866 if (quick_copy) {
867 memcpy(da(left,array_pointers)->pointer + da(left,array_pointers)->n_used_entries, da(right,array_pointers)->pointer, da(right,array_pointers)->n_used_entries * sizeof(pointer_t));
868 } else {
869 int_default_t i, offs, total;
870 offs = da(left,array_pointers)->n_used_entries;
871 total = da(right,array_pointers)->n_used_entries;
872 for (i = 0; i < total; i++)
873 da(left,array_pointers)->pointer[offs + i] = pointer_reference(&da(right,array_pointers)->pointer[i]);
876 da(left,array_pointers)->n_used_entries += da(right,array_pointers)->n_used_entries;
878 if (quick_copy)
879 data_free_r1(right);
880 else
881 data_dereference(right);
883 return left;
886 static void try_join_siblings_ptr(struct walk_context *w, pointer_t **result_ptr, bool ptr_right)
888 struct data *ul = w->upper_level;
889 btree_entries_t ul_pos = w->upper_level_pos;
890 struct data *left, *right;
891 int_default_t ptr_pos;
893 left = pointer_get_data(da(ul,array_btree)->btree[ul_pos].node);
894 right = pointer_get_data(da(ul,array_btree)->btree[ul_pos + 1].node);
896 if (da_tag(left) != DATA_TAG_array_pointers || da_tag(right) != DATA_TAG_array_pointers)
897 return;
899 if (!ptr_right) {
900 ptr_pos = (int_default_t)(*result_ptr - da(left,array_pointers)->pointer);
901 ajla_assert((uint_default_t)ptr_pos < (uint_default_t)da(left,array_pointers)->n_used_entries, (file_line, "try_join_siblings_ptr: ptr is outside left node"));
902 } else {
903 ptr_pos = (int_default_t)(*result_ptr - da(right,array_pointers)->pointer);
904 ajla_assert((uint_default_t)ptr_pos < (uint_default_t)da(right,array_pointers)->n_used_entries, (file_line, "try_join_siblings_ptr: ptr is outside right node"));
905 ptr_pos += da(left,array_pointers)->n_used_entries;
908 left = try_join_ptr(left, right);
909 if (unlikely(!left))
910 return;
912 *result_ptr = &da(left,array_pointers)->pointer[ptr_pos];
914 da(ul,array_btree)->btree[ul_pos].node = pointer_data(left);
915 index_free(&da(ul,array_btree)->btree[ul_pos].end_index);
916 da(ul,array_btree)->btree[ul_pos].end_index = da(ul,array_btree)->btree[ul_pos + 1].end_index;
917 memmove(da(ul,array_btree)->btree + ul_pos + 1, da(ul,array_btree)->btree + ul_pos + 2, (da(ul,array_btree)->n_used_btree_entries - ul_pos - 2) * sizeof(struct btree_level));
918 da(ul,array_btree)->n_used_btree_entries--;
919 if (unlikely(da(ul,array_btree)->n_used_btree_entries == 1)) {
920 free_btree_indices(ul);
921 data_free_r1(ul);
922 *w->root = pointer_data(left);
926 static void try_join_both_siblings_ptr(struct walk_context *w, pointer_t **result_ptr)
928 if (w->upper_level) {
929 if (w->upper_level_pos < da(w->upper_level,array_btree)->n_used_btree_entries - 1) {
930 try_join_siblings_ptr(w, result_ptr, false);
932 if (w->upper_level_pos) {
933 w->upper_level_pos--;
934 try_join_siblings_ptr(w, result_ptr, true);
939 static bool flat_to_ptr(struct walk_context *w, pointer_t **result_ptr, ajla_error_t *err)
941 struct data *array, *right, *single;
942 pointer_t ptr;
944 array = pointer_get_data(*w->ptr);
945 ptr = flat_to_data(da(array,array_flat)->type, da_array_flat(array) + index_to_int(w->idx) * da_array_flat_element_size(array));
947 if (unlikely(da(array,array_flat)->n_used_entries == 1)) {
948 single = alloc_one_ptr(ptr, err);
949 if (unlikely(!single))
950 goto ret_err;
951 *result_ptr = &da(single,array_pointers)->pointer[0];
952 data_dereference(array);
953 *w->ptr = pointer_data(single);
954 try_join_both_siblings_ptr(w, result_ptr);
955 return true;
956 } else if (!index_to_int(w->idx)) {
957 array_index_t existing_size;
958 bool ret;
959 index_from_int(&existing_size, da(array,array_flat)->n_used_entries);
960 ret = leaf_prepend_ptr(w, existing_size, ptr, result_ptr, err);
961 index_free(&existing_size);
962 if (unlikely(!ret))
963 goto ret_err;
964 memmove(da_array_flat(array), da_array_flat(array) + da_array_flat_element_size(array), (da(array,array_flat)->n_used_entries - 1) * da_array_flat_element_size(array));
965 da(array,array_flat)->n_used_entries--;
966 return true;
967 } else if (index_to_int(w->idx) == da(array,array_flat)->n_used_entries - 1) {
968 array_index_t existing_size;
969 bool ret;
970 index_from_int(&existing_size, da(array,array_flat)->n_used_entries);
971 ret = leaf_append_ptr(w, existing_size, ptr, result_ptr, err);
972 index_free(&existing_size);
973 if (unlikely(!ret))
974 goto ret_err;
975 da(array,array_flat)->n_used_entries--;
976 return true;
977 } else {
978 struct btree_level *split;
979 array_index_t prev_idx;
980 int_default_t n_allocated = da(array,array_flat)->n_used_entries - 1 - index_to_int(w->idx);
982 right = data_alloc_array_flat_mayfail(da(array,array_flat)->type, n_allocated, da(array,array_flat)->n_used_entries - 1 - index_to_int(w->idx), false, err pass_file_line);
983 if (unlikely(!right))
984 goto ret_err;
985 memcpy(da_array_flat(right), da_array_flat(array) + (index_to_int(w->idx) + 1) * da_array_flat_element_size(array), da(right,array_flat)->n_used_entries * da_array_flat_element_size(array));
987 single = alloc_one_ptr(ptr, err);
988 if (unlikely(!single))
989 goto free_right_ret_err;
990 *result_ptr = &da(single,array_pointers)->pointer[0];
992 split = expand_parent(w, 3, &prev_idx, err);
993 if (unlikely(!split))
994 goto free_single_right_ret_err;
996 split[0].node = pointer_data(array);
997 index_add3(&split[0].end_index, prev_idx, w->idx);
998 split[1].node = pointer_data(single);
999 index_add3(&split[1].end_index, prev_idx, w->idx);
1000 index_add_int(&split[1].end_index, 1);
1001 split[2].node = pointer_data(right);
1002 split[2].end_index = prev_idx;
1003 index_add_int(&split[2].end_index, da(array,array_flat)->n_used_entries);
1005 da(array,array_flat)->n_used_entries = index_to_int(w->idx);
1007 return true;
1010 free_single_right_ret_err:
1011 data_free_r1(single);
1012 free_right_ret_err:
1013 data_free_r1(right);
1014 ret_err:
1015 pointer_dereference(ptr);
1016 return false;
1019 static bool leaf_prepend_flat(struct walk_context *w, const struct type *type, array_index_t existing_size, unsigned char **result_flat, ajla_error_t *err)
1021 struct btree_level *split;
1022 array_index_t prev_idx;
1023 pointer_t *sibling_pointer;
1024 struct data *sibling;
1025 pointer_t orig = *w->ptr;
1027 if (unlikely(!w->upper_level) || unlikely(!w->upper_level_pos))
1028 goto expand;
1030 sibling_pointer = &da(w->upper_level,array_btree)->btree[w->upper_level_pos - 1].node;
1031 sibling = pointer_get_data(*sibling_pointer);
1032 if (da_tag(sibling) == DATA_TAG_array_flat) {
1033 if (da(sibling,array_flat)->n_used_entries == da(sibling,array_flat)->n_allocated_entries) {
1034 uint_default_t new_size;
1035 if (unlikely(da(sibling,array_flat)->n_allocated_entries >= SCALAR_SPLIT_SIZE))
1036 goto expand;
1037 new_size = (uint_default_t)da(sibling,array_flat)->n_allocated_entries * 2;
1038 if (new_size > SCALAR_SPLIT_SIZE)
1039 new_size = SCALAR_SPLIT_SIZE;
1040 sibling = array_clone_flat(sibling_pointer, (int_default_t)new_size, err);
1041 if (unlikely(!sibling))
1042 return false;
1044 sibling = get_writable(sibling_pointer, err);
1045 if (unlikely(!sibling))
1046 return false;
1047 *result_flat = da_array_flat(sibling) + (size_t)type->size * da(sibling,array_flat)->n_used_entries;
1048 da(sibling,array_flat)->n_used_entries++;
1049 index_add_int(&get_struct(sibling_pointer, struct btree_level, node)->end_index, 1);
1050 return true;
1053 expand:
1054 sibling = data_alloc_array_flat_mayfail(type, 1, 1, false, err pass_file_line);
1055 if (unlikely(!sibling))
1056 return false;
1057 *result_flat = da_array_flat(sibling);
1058 split = expand_parent(w, 2, &prev_idx, err);
1059 if (unlikely(!split)) {
1060 data_free_r1(sibling);
1061 return false;
1063 index_copy(&split[0].end_index, prev_idx);
1064 index_add_int(&split[0].end_index, 1);
1065 split[0].node = pointer_data(sibling);
1066 split[1].end_index = prev_idx;
1067 index_add(&split[1].end_index, existing_size);
1068 split[1].node = orig;
1069 return true;
1072 static bool leaf_append_flat(struct walk_context *w, const struct type *type, array_index_t existing_size, unsigned char **result_flat, ajla_error_t *err)
1074 struct btree_level *split;
1075 array_index_t prev_idx;
1076 pointer_t *sibling_pointer;
1077 struct data *sibling;
1078 pointer_t orig = *w->ptr;
1080 if (unlikely(!w->upper_level) || unlikely(w->upper_level_pos == da(w->upper_level,array_btree)->n_used_btree_entries - 1))
1081 goto expand;
1083 sibling_pointer = &da(w->upper_level,array_btree)->btree[w->upper_level_pos + 1].node;
1084 sibling = pointer_get_data(*sibling_pointer);
1085 if (da_tag(sibling) == DATA_TAG_array_flat) {
1086 if (da(sibling,array_flat)->n_used_entries == da(sibling,array_flat)->n_allocated_entries) {
1087 uint_default_t new_size;
1088 if (unlikely(da(sibling,array_flat)->n_allocated_entries >= SCALAR_SPLIT_SIZE))
1089 goto expand;
1090 new_size = (uint_default_t)da(sibling,array_flat)->n_allocated_entries * 2;
1091 if (new_size > SCALAR_SPLIT_SIZE)
1092 new_size = SCALAR_SPLIT_SIZE;
1093 sibling = array_clone_flat(sibling_pointer, (int_default_t)new_size, err);
1094 if (unlikely(!sibling))
1095 return false;
1097 sibling = get_writable(sibling_pointer, err);
1098 if (unlikely(!sibling))
1099 return false;
1100 memmove(da_array_flat(sibling) + type->size, da_array_flat(sibling), (size_t)type->size * da(sibling,array_flat)->n_used_entries);
1101 *result_flat = da_array_flat(sibling);
1102 da(sibling,array_flat)->n_used_entries++;
1103 index_sub_int(&get_struct(sibling_pointer, struct btree_level, node)[-1].end_index, 1);
1104 return true;
1107 expand:
1108 sibling = data_alloc_array_flat_mayfail(type, 1, 1, false, err pass_file_line);
1109 if (unlikely(!sibling))
1110 return false;
1111 *result_flat = da_array_flat(sibling);
1112 split = expand_parent(w, 2, &prev_idx, err);
1113 if (unlikely(!split)) {
1114 data_free_r1(sibling);
1115 return false;
1117 index_copy(&split[0].end_index, prev_idx);
1118 index_add(&split[0].end_index, existing_size);
1119 index_sub_int(&split[0].end_index, 1);
1120 split[0].node = orig;
1121 split[1].end_index = prev_idx;
1122 index_add(&split[1].end_index, existing_size);
1123 split[1].node = pointer_data(sibling);
1124 return true;
1127 static struct data *try_join_flat(struct data *left, struct data *right)
1129 int_default_t sum;
1130 size_t element_size;
1132 sum = (int_default_t)((uint_default_t)da(left,array_flat)->n_used_entries + (uint_default_t)da(right,array_flat)->n_used_entries);
1133 if (unlikely(sum < 0))
1134 return NULL;
1136 if (!data_is_writable(left) || da(left,array_flat)->n_allocated_entries < sum) {
1137 pointer_t ptr = pointer_data(left);
1138 left = array_clone_flat(&ptr, array_align_alloc(sum), MEM_DONT_TRY_TO_FREE);
1139 if (unlikely(!left))
1140 return NULL;
1143 element_size = da_array_flat_element_size(left);
1144 memcpy(da_array_flat(left) + da(left,array_flat)->n_used_entries * element_size, da_array_flat(right), da(right,array_flat)->n_used_entries * element_size);
1145 da(left,array_flat)->n_used_entries += da(right,array_flat)->n_used_entries;
1147 data_dereference(right);
1149 return left;
1152 static void try_join_siblings_flat(struct walk_context *w, unsigned char **result_flat, bool ptr_right)
1154 struct data *ul = w->upper_level;
1155 btree_entries_t ul_pos = w->upper_level_pos;
1156 struct data *left, *right;
1157 size_t ptr_pos;
1158 size_t element_size;
1160 left = pointer_get_data(da(ul,array_btree)->btree[ul_pos].node);
1161 right = pointer_get_data(da(ul,array_btree)->btree[ul_pos + 1].node);
1163 if (da_tag(left) != DATA_TAG_array_flat || da_tag(right) != DATA_TAG_array_flat)
1164 return;
1166 element_size = da_array_flat_element_size(left);
1167 if (!ptr_right) {
1168 ptr_pos = *result_flat - da_array_flat(left);
1169 ajla_assert((uint_default_t)ptr_pos < (uint_default_t)da(left,array_flat)->n_used_entries * element_size, (file_line, "try_join_siblings_flat: ptr is outside left node"));
1170 } else {
1171 ptr_pos = *result_flat - da_array_flat(right);
1172 ajla_assert((uint_default_t)ptr_pos < (uint_default_t)da(right,array_flat)->n_used_entries * element_size, (file_line, "try_join_siblings_flat: ptr is outside right node"));
1173 ptr_pos += da(left,array_flat)->n_used_entries * element_size;
1176 left = try_join_flat(left, right);
1177 if (unlikely(!left))
1178 return;
1180 *result_flat = da_array_flat(left) + ptr_pos;
1182 da(ul,array_btree)->btree[ul_pos].node = pointer_data(left);
1183 index_free(&da(ul,array_btree)->btree[ul_pos].end_index);
1184 da(ul,array_btree)->btree[ul_pos].end_index = da(ul,array_btree)->btree[ul_pos + 1].end_index;
1185 memmove(da(ul,array_btree)->btree + ul_pos + 1, da(ul,array_btree)->btree + ul_pos + 2, (da(ul,array_btree)->n_used_btree_entries - ul_pos - 2) * sizeof(struct btree_level));
1186 da(ul,array_btree)->n_used_btree_entries--;
1187 if (unlikely(da(ul,array_btree)->n_used_btree_entries == 1)) {
1188 free_btree_indices(ul);
1189 data_free_r1(ul);
1190 *w->root = pointer_data(left);
1194 static void try_join_both_siblings_flat(struct walk_context *w, unsigned char **result_flat)
1196 if (w->upper_level) {
1197 if (w->upper_level_pos < da(w->upper_level,array_btree)->n_used_btree_entries - 1) {
1198 try_join_siblings_flat(w, result_flat, false);
1200 if (w->upper_level_pos) {
1201 w->upper_level_pos--;
1202 try_join_siblings_flat(w, result_flat, true);
1207 static bool ptr_to_flat(struct walk_context *w, const struct type *type, unsigned char **result_flat, ajla_error_t *err)
1209 struct data *array, *right, *single;
1211 array = pointer_get_data(*w->ptr);
1213 if (unlikely(da(array,array_pointers)->n_used_entries == 1)) {
1214 struct data *new_flat;
1215 new_flat = data_alloc_array_flat_mayfail(type, 1, 1, false, err pass_file_line);
1216 if (unlikely(!new_flat))
1217 goto ret_err;
1218 *result_flat = da_array_flat(new_flat);
1219 data_dereference(array);
1220 *w->ptr = pointer_data(new_flat);
1221 try_join_both_siblings_flat(w, result_flat);
1222 return true;
1223 } else if (!index_to_int(w->idx)) {
1224 array_index_t existing_size;
1225 bool ret;
1226 index_from_int(&existing_size, da(array,array_pointers)->n_used_entries);
1227 ret = leaf_prepend_flat(w, type, existing_size, result_flat, err);
1228 index_free(&existing_size);
1229 if (unlikely(!ret))
1230 goto ret_err;
1231 pointer_dereference(da(array,array_pointers)->pointer[0]);
1232 da(array,array_pointers)->n_used_entries--;
1233 da(array,array_pointers)->n_allocated_entries--;
1234 da(array,array_pointers)->pointer++;
1235 return true;
1236 } else if (index_to_int(w->idx) == da(array,array_pointers)->n_used_entries - 1) {
1237 array_index_t existing_size;
1238 bool ret;
1239 index_from_int(&existing_size, da(array,array_pointers)->n_used_entries);
1240 ret = leaf_append_flat(w, type, existing_size, result_flat, err);
1241 index_free(&existing_size);
1242 if (unlikely(!ret))
1243 goto ret_err;
1244 pointer_dereference(da(array,array_pointers)->pointer[da(array,array_pointers)->n_used_entries - 1]);
1245 da(array,array_pointers)->n_used_entries--;
1246 return true;
1247 } else {
1248 struct btree_level *split;
1249 array_index_t prev_idx;
1250 int_default_t n_allocated = da(array,array_pointers)->n_used_entries - 1 - index_to_int(w->idx);
1252 right = data_alloc_array_pointers_mayfail(n_allocated, da(array,array_pointers)->n_used_entries - 1 - index_to_int(w->idx), err pass_file_line);
1253 if (unlikely(!right))
1254 goto ret_err;
1255 memcpy(da(right,array_pointers)->pointer, da(array,array_pointers)->pointer + index_to_int(w->idx) + 1, da(right,array_pointers)->n_used_entries * sizeof(pointer_t));
1257 single = data_alloc_array_flat_mayfail(type, 1, 1, false, err pass_file_line);
1258 if (unlikely(!single))
1259 goto free_right_ret_err;
1261 *result_flat = da_array_flat(single);
1263 split = expand_parent(w, 3, &prev_idx, err);
1264 if (unlikely(!split))
1265 goto free_single_right_ret_err;
1267 split[0].node = pointer_data(array);
1268 index_add3(&split[0].end_index, prev_idx, w->idx);
1269 split[1].node = pointer_data(single);
1270 index_copy(&split[1].end_index, split[0].end_index);
1271 index_add_int(&split[1].end_index, 1);
1272 split[2].node = pointer_data(right);
1273 split[2].end_index = prev_idx;
1274 index_add_int(&split[2].end_index, da(array,array_pointers)->n_used_entries);
1276 da(array,array_pointers)->n_used_entries = index_to_int(w->idx);
1277 pointer_dereference(da(array,array_pointers)->pointer[index_to_int(w->idx)]);
1279 return true;
1282 free_single_right_ret_err:
1283 data_free_r1(single);
1284 free_right_ret_err:
1285 data_free_r1(right);
1286 ret_err:
1287 return false;
1290 static bool same_to_ptr(struct walk_context *w, pointer_t **result_ptr, ajla_error_t *err)
1292 struct data *array, *right, *single;
1293 pointer_t ptr;
1294 array_index_t n_entries_1;
1296 array = pointer_get_data(*w->ptr);
1297 ptr = da(array,array_same)->pointer;
1299 if (unlikely(!index_ge_int(da(array,array_same)->n_entries, 2))) {
1300 single = alloc_one_ptr(ptr, err);
1301 if (unlikely(!single))
1302 goto ret_err;
1303 *result_ptr = &da(single,array_pointers)->pointer[0];
1304 index_free(&da(array,array_same)->n_entries);
1305 data_free_r1(array);
1306 *w->ptr = pointer_data(single);
1307 try_join_both_siblings_ptr(w, result_ptr);
1308 return true;
1309 } else if (!index_ge_int(w->idx, 1)) {
1310 bool ret;
1311 ret = leaf_prepend_ptr(w, da(array,array_same)->n_entries, ptr, result_ptr, err);
1312 if (unlikely(!ret))
1313 goto ret_err;
1314 index_sub_int(&da(array,array_same)->n_entries, 1);
1315 pointer_reference_owned(ptr);
1316 return true;
1317 } else if (index_copy(&n_entries_1, da(array,array_same)->n_entries), index_sub_int(&n_entries_1, 1), index_ge_index(w->idx, n_entries_1)) {
1318 bool ret;
1319 ret = leaf_append_ptr(w, da(array,array_same)->n_entries, ptr, result_ptr, err);
1320 if (unlikely(!ret)) {
1321 index_free(&n_entries_1);
1322 goto ret_err;
1324 index_free(&da(array,array_same)->n_entries);
1325 da(array,array_same)->n_entries = n_entries_1;
1326 pointer_reference_owned(ptr);
1327 return true;
1328 } else {
1329 struct btree_level *split;
1330 array_index_t prev_idx;
1332 index_sub(&n_entries_1, w->idx);
1333 right = data_alloc_array_same_mayfail(n_entries_1, err pass_file_line);
1334 if (unlikely(!right))
1335 goto ret_err;
1336 da(right,array_same)->pointer = ptr;
1338 single = alloc_one_ptr(ptr, err);
1339 if (unlikely(!single))
1340 goto free_right_ret_err;
1341 *result_ptr = &da(single,array_pointers)->pointer[0];
1343 split = expand_parent(w, 3, &prev_idx, err);
1344 if (unlikely(!split))
1345 goto free_single_right_ret_err;
1347 split[0].node = pointer_data(array);
1348 index_add3(&split[0].end_index, prev_idx, w->idx);
1349 split[1].node = pointer_data(single);
1350 index_copy(&split[1].end_index, split[0].end_index);
1351 index_add_int(&split[1].end_index, 1);
1352 split[2].node = pointer_data(right);
1353 split[2].end_index = prev_idx;
1354 index_add(&split[2].end_index, da(array,array_same)->n_entries);
1356 index_free(&da(array,array_same)->n_entries);
1357 index_copy(&da(array,array_same)->n_entries, w->idx);
1359 pointer_reference_owned(ptr);
1360 pointer_reference_owned(ptr);
1362 return true;
1365 free_single_right_ret_err:
1366 data_free_r1(single);
1367 free_right_ret_err:
1368 data_free_r1(right);
1369 ret_err:
1370 return false;
1373 static bool same_to_flat(struct walk_context *w, const struct type *type, unsigned char **result_flat, ajla_error_t *err)
1375 struct data *array, *right, *single;
1376 pointer_t ptr;
1377 array_index_t n_entries_1;
1379 array = pointer_get_data(*w->ptr);
1380 ptr = da(array,array_same)->pointer;
1382 if (unlikely(!index_ge_int(da(array,array_same)->n_entries, 2))) {
1383 struct data *new_flat;
1384 new_flat = data_alloc_array_flat_mayfail(type, 1, 1, false, err pass_file_line);
1385 if (unlikely(!new_flat))
1386 goto ret_err;
1387 *result_flat = da_array_flat(new_flat);
1388 data_dereference(array);
1389 *w->ptr = pointer_data(new_flat);
1390 try_join_both_siblings_flat(w, result_flat);
1391 return true;
1392 } else if (!index_ge_int(w->idx, 1)) {
1393 bool ret;
1394 ret = leaf_prepend_flat(w, type, da(array,array_same)->n_entries, result_flat, err);
1395 if (unlikely(!ret))
1396 goto ret_err;
1397 index_sub_int(&da(array,array_same)->n_entries, 1);
1398 return true;
1399 } else if (index_copy(&n_entries_1, da(array,array_same)->n_entries), index_sub_int(&n_entries_1, 1), index_ge_index(w->idx, n_entries_1)) {
1400 bool ret;
1401 ret = leaf_append_flat(w, type, da(array,array_same)->n_entries, result_flat, err);
1402 if (unlikely(!ret)) {
1403 index_free(&n_entries_1);
1404 goto ret_err;
1406 index_free(&da(array,array_same)->n_entries);
1407 da(array,array_same)->n_entries = n_entries_1;
1408 return true;
1409 } else {
1410 struct btree_level *split;
1411 array_index_t prev_idx;
1413 index_sub(&n_entries_1, w->idx);
1414 right = data_alloc_array_same_mayfail(n_entries_1, err pass_file_line);
1415 if (unlikely(!right))
1416 goto ret_err;
1417 da(right,array_same)->pointer = ptr;
1419 single = data_alloc_array_flat_mayfail(type, 1, 1, false, err pass_file_line);
1420 if (unlikely(!single))
1421 goto free_right_ret_err;
1423 *result_flat = da_array_flat(single);
1425 split = expand_parent(w, 3, &prev_idx, err);
1426 if (unlikely(!split))
1427 goto free_single_right_ret_err;
1429 split[0].node = pointer_data(array);
1430 index_add3(&split[0].end_index, prev_idx, w->idx);
1431 split[1].node = pointer_data(single);
1432 index_copy(&split[1].end_index, split[0].end_index);
1433 index_add_int(&split[1].end_index, 1);
1434 split[2].node = pointer_data(right);
1435 split[2].end_index = prev_idx;
1436 index_add(&split[2].end_index, da(array,array_same)->n_entries);
1438 index_free(&da(array,array_same)->n_entries);
1439 index_copy(&da(array,array_same)->n_entries, w->idx);
1441 pointer_reference_owned(ptr);
1443 return true;
1446 free_single_right_ret_err:
1447 data_free_r1(single);
1448 free_right_ret_err:
1449 data_free_r1(right);
1450 ret_err:
1451 return false;
1454 bool attr_fastcall array_modify(pointer_t *root, array_index_t idx, unsigned flags, pointer_t **result_ptr, unsigned char **result_flat, const struct type **flat_type, frame_s *fp, const code_t *ip)
1456 struct walk_context w;
1457 ajla_error_t err;
1458 struct data *array;
1460 *result_ptr = NULL;
1461 *result_flat = NULL;
1463 walk_init_context(&w, root, idx);
1465 next_level:
1466 array = get_writable(w.ptr, &err);
1467 if (unlikely(!array))
1468 goto ret_err;
1469 switch (da_tag(array)) {
1470 case DATA_TAG_array_slice: {
1471 array = array_clone(w.ptr, &err);
1472 if (unlikely(!array))
1473 goto ret_err;
1475 /*-fallthrough*/
1476 case DATA_TAG_array_flat: {
1477 if (unlikely(index_ge_int(w.idx, da(array,array_flat)->n_used_entries)))
1478 goto ret_err_out_of_range;
1480 if (unlikely(flags & ARRAY_MODIFY_NEED_PTR)) {
1481 if (unlikely(!flat_to_ptr(&w, result_ptr, &err)))
1482 goto ret_err;
1483 break;
1486 *flat_type = da(array,array_flat)->type;
1487 *result_flat = da_array_flat(array) + index_to_int(w.idx) * da_array_flat_element_size(array);
1488 break;
1490 case DATA_TAG_array_pointers: {
1491 if (unlikely(index_ge_int(w.idx, da(array,array_pointers)->n_used_entries)))
1492 goto ret_err_out_of_range;
1494 if (unlikely(flags & ARRAY_MODIFY_NEED_FLAT)) {
1495 if (unlikely(!ptr_to_flat(&w, *flat_type, result_flat, &err)))
1496 goto ret_err;
1497 break;
1500 *result_ptr = &da(array,array_pointers)->pointer[index_to_int(w.idx)];
1501 break;
1503 case DATA_TAG_array_same: {
1504 if (unlikely(index_ge_index(w.idx, da(array,array_same)->n_entries)))
1505 goto ret_err_out_of_range;
1506 if (!(flags & ARRAY_MODIFY_NEED_FLAT)) {
1507 if (unlikely(!same_to_ptr(&w, result_ptr, &err)))
1508 goto ret_err;
1509 break;
1510 } else {
1511 if (unlikely(!same_to_flat(&w, *flat_type, result_flat, &err)))
1512 goto ret_err;
1513 break;
1516 case DATA_TAG_array_btree: {
1517 if (unlikely(!walk_for_write(&w, &err)))
1518 goto ret_err;
1519 goto next_level;
1521 default:
1522 internal(file_line, "array_modify: invalid array tag %u", da_tag(array));
1525 walk_free_context(&w);
1527 return true;
1529 ret_err_out_of_range:
1530 err = error_ajla(EC_SYNC, AJLA_ERROR_INDEX_OUT_OF_RANGE);
1531 ret_err:
1532 walk_free_context(&w);
1533 pointer_dereference(*root);
1534 *root = pointer_error(err, fp, ip pass_file_line);
1535 *result_ptr = root;
1536 return false;
1539 array_index_t attr_fastcall array_len(struct data *array)
1541 array_index_t result, tmp;
1542 switch (da_tag(array)) {
1543 case DATA_TAG_array_flat:
1544 index_from_int(&result, da(array,array_flat)->n_used_entries);
1545 break;
1546 case DATA_TAG_array_slice:
1547 index_from_int(&result, da(array,array_slice)->n_entries);
1548 break;
1549 case DATA_TAG_array_pointers:
1550 index_from_int(&result, da(array,array_pointers)->n_used_entries);
1551 break;
1552 case DATA_TAG_array_same:
1553 index_copy(&result, da(array,array_same)->n_entries);
1554 break;
1555 case DATA_TAG_array_btree:
1556 index_copy(&result, da(array,array_btree)->btree[da(array,array_btree)->n_used_btree_entries - 1].end_index);
1557 break;
1558 case DATA_TAG_array_incomplete:
1559 index_from_int(&result, 0);
1560 follow_incomplete_chain:
1561 tmp = array_len(pointer_get_data(da(array,array_incomplete)->first));
1562 ajla_assert_lo(index_ge_int(tmp, 1), (file_line, "array_len: the first part is empty"));
1563 index_add(&result, tmp);
1564 index_free(&tmp);
1566 ajla_assert_lo(!pointer_is_thunk(da(array,array_incomplete)->next), (file_line, "array_len: incomplete thunk is not evaluated"));
1567 array = pointer_get_data(da(array,array_incomplete)->next);
1568 if (da_tag(array) != DATA_TAG_array_incomplete) {
1569 tmp = array_len(array);
1570 index_add(&result, tmp);
1571 index_free(&tmp);
1572 } else {
1573 goto follow_incomplete_chain;
1575 break;
1576 default:
1577 internal(file_line, "array_len: invalid array tag %u", da_tag(array));
1579 return result;
1582 bool attr_fastcall array_is_empty(struct data *array)
1584 switch (da_tag(array)) {
1585 case DATA_TAG_array_flat:
1586 return !da(array,array_flat)->n_used_entries;
1587 case DATA_TAG_array_pointers:
1588 return !da(array,array_pointers)->n_used_entries;
1589 case DATA_TAG_array_slice:
1590 return !da(array,array_slice)->n_entries;
1592 return false;
1595 static bool array_greater(struct data *array1, struct data *array2)
1598 * Note: the code will try to run rebalance_btree_nodes on the shallower
1599 * array. If both array have equal depth, we must debalance the one
1600 * with less entries.
1602 if (da_array_depth(array1) > da_array_depth(array2))
1603 return true;
1604 if (da_array_depth(array1) < da_array_depth(array2))
1605 return false;
1606 if (da_tag(array1) == DATA_TAG_array_btree) {
1607 if (da(array1,array_btree)->n_used_btree_entries > da(array2,array_btree)->n_used_btree_entries)
1608 return true;
1609 if (da(array1,array_btree)->n_used_btree_entries < da(array2,array_btree)->n_used_btree_entries)
1610 return false;
1612 return true;
1615 struct data * attr_fastcall array_join(struct data *array1, struct data *array2, ajla_error_t *err)
1617 struct walk_context w;
1618 array_index_t len1, len2, total_len, new_len, len_array;
1619 struct data *array, *new_array;
1620 tag_t new_tag;
1621 bool append;
1622 struct btree_level *split;
1623 array_index_t prev_idx;
1624 struct data *fix_idx;
1625 pointer_t result;
1627 #if 1
1628 tag_t tag1 = da_tag(array1);
1629 tag_t tag2 = da_tag(array2);
1630 if (tag1 == DATA_TAG_array_flat && tag2 == DATA_TAG_array_flat) {
1631 new_array = try_join_flat(array1, array2);
1632 if (likely(new_array != NULL)) {
1633 return new_array;
1636 if (tag1 == DATA_TAG_array_pointers && tag2 == DATA_TAG_array_pointers) {
1637 new_array = try_join_ptr(array1, array2);
1638 if (likely(new_array != NULL)) {
1639 return new_array;
1642 #endif
1644 len1 = array_len(array1);
1645 if (unlikely(!index_ge_int(len1, 1))) {
1646 index_free(&len1);
1647 data_dereference(array1);
1648 return array2;
1650 len2 = array_len(array2);
1651 if (unlikely(!index_ge_int(len2, 1))) {
1652 index_free(&len2);
1653 index_free(&len1);
1654 data_dereference(array2);
1655 return array1;
1658 if (unlikely(!index_add3_(&total_len, len1, len2, err pass_file_line))) {
1659 index_free(&len1);
1660 index_free(&len2);
1661 data_dereference(array1);
1662 data_dereference(array2);
1663 return NULL;
1665 index_free(&total_len);
1667 if (array_greater(array1, array2)) {
1668 array_index_t search_for;
1669 result = pointer_data(array1);
1670 new_array = array2;
1671 new_len = len2;
1672 search_for = len1;
1673 index_sub_int(&search_for, 1);
1674 walk_init_context(&w, &result, search_for);
1675 append = true;
1676 } else {
1677 array_index_t search_for;
1678 result = pointer_data(array2);
1679 new_array = array1;
1680 new_len = len1;
1681 index_free(&len2);
1682 index_from_int(&search_for, 0);
1683 walk_init_context(&w, &result, search_for);
1684 append = false;
1687 next_level:
1688 array = pointer_get_data(*w.ptr);
1690 if (da_array_depth(array) > da_array_depth(new_array)) {
1691 array = get_writable(w.ptr, err);
1692 if (unlikely(!array))
1693 goto ret_err;
1694 if (unlikely(!walk_for_write(&w, err)))
1695 goto ret_err;
1696 goto next_level;
1699 split = expand_parent(&w, 2, &prev_idx, err);
1700 if (unlikely(!split))
1701 goto ret_err;
1703 len_array = array_len(array);
1704 if (!append) {
1705 split[0].node = pointer_data(new_array);
1706 split[0].end_index = prev_idx; /* 0 */
1707 split[1].node = pointer_data(array);
1708 split[1].end_index = len_array;
1709 } else {
1710 split[0].node = pointer_data(array);
1711 split[0].end_index = prev_idx;
1712 index_add(&split[0].end_index, len_array);
1713 split[1].node = pointer_data(new_array);
1714 index_copy(&split[1].end_index, split[0].end_index);
1715 index_free(&len_array);
1718 fix_idx = pointer_get_data(result);
1719 while (1) {
1720 btree_entries_t i, s;
1721 s = !append ? 0 : da(fix_idx,array_btree)->n_used_btree_entries - 1;
1722 for (i = s; i < da(fix_idx,array_btree)->n_used_btree_entries; i++)
1723 index_add(&da(fix_idx,array_btree)->btree[i].end_index, new_len);
1724 if (fix_idx == w.upper_level) {
1725 w.upper_level_pos = s;
1726 w.ptr = &da(fix_idx,array_btree)->btree[s].node;
1727 index_free(&w.idx);
1728 index_from_int(&w.idx, 0);
1729 break;
1731 fix_idx = pointer_get_data(da(fix_idx,array_btree)->btree[s].node);
1734 new_tag = da_tag(new_array);
1735 if (new_tag == DATA_TAG_array_btree) {
1736 #if 0
1737 /* test it in get_writable instead of here */
1738 struct data *ac;
1739 ac = get_writable(w.ptr, err);
1740 if (unlikely(!ac))
1741 goto ret_err_balance;
1742 #endif
1743 if (unlikely(!walk_for_write(&w, err))) {
1744 goto ret_err_balance;
1746 } else if (new_tag == DATA_TAG_array_flat) {
1747 unsigned char *ptr = da_array_flat(new_array);
1748 if (append)
1749 w.upper_level_pos--;
1750 try_join_siblings_flat(&w, &ptr, append);
1751 } else if (new_tag == DATA_TAG_array_pointers) {
1752 pointer_t *ptr = &da(new_array,array_pointers)->pointer[0];
1753 if (append)
1754 w.upper_level_pos--;
1755 try_join_siblings_ptr(&w, &ptr, append);
1758 index_free(&new_len);
1759 walk_free_context(&w);
1761 return pointer_get_data(result);
1763 ret_err:
1764 data_dereference(new_array);
1765 ret_err_balance:
1766 index_free(&new_len);
1767 walk_free_context(&w);
1768 pointer_dereference(result);
1769 return NULL;
1773 struct data * attr_fastcall array_sub(struct data *array, array_index_t start, array_index_t len, bool deref, ajla_error_t *err)
1775 struct data *result;
1776 bool can_modify = deref;
1778 go_down:
1779 if (!data_is_writable(array))
1780 can_modify = false;
1782 switch (da_tag(array)) {
1783 case DATA_TAG_array_flat: {
1784 if (unlikely(index_eq_int(len, da(array,array_flat)->n_used_entries)))
1785 goto ret_array;
1786 if (unlikely(index_eq_int(len, 0))) {
1787 result = data_alloc_array_flat_mayfail(da(array,array_flat)->type, 0, 0, false, err pass_file_line);
1788 goto ret_free_start_len;
1790 #if 1
1791 result = data_alloc_array_slice_mayfail(array, da_array_flat(array), index_to_int(start), index_to_int(len), err pass_file_line);
1792 goto ret_free_start_len;
1793 #else
1794 result = data_alloc_array_flat_mayfail(da(array,array_flat)->type, index_to_int(len), index_to_int(len), false, err pass_file_line);
1795 if (!result)
1796 goto ret_free_start_len;
1797 memcpy(da_array_flat(result), da_array_flat(array) + index_to_int(start) * da(array,array_flat)->type->size, index_to_int(len) * da(array,array_flat)->type->size);
1798 goto ret_free_start_len;
1799 #endif
1801 case DATA_TAG_array_slice: {
1802 struct data *base;
1803 if (unlikely(index_eq_int(len, da(array,array_slice)->n_entries)))
1804 goto ret_array;
1805 if (unlikely(index_eq_int(len, 0))) {
1806 result = data_alloc_array_flat_mayfail(da(array,array_slice)->type, 0, 0, false, err pass_file_line);
1807 goto ret_free_start_len;
1809 base = pointer_get_data(da(array,array_slice)->reference);
1810 result = data_alloc_array_slice_mayfail(base, da(array,array_slice)->flat_data_minus_data_array_offset + data_array_offset, index_to_int(start), index_to_int(len), err pass_file_line);
1811 goto ret_free_start_len;
1813 case DATA_TAG_array_pointers: {
1814 int_default_t st, l, i;
1815 if (unlikely(index_eq_int(len, da(array,array_pointers)->n_used_entries)))
1816 goto ret_array;
1817 if (unlikely(index_eq_int(len, 0))) {
1818 result = data_alloc_array_pointers_mayfail(0, 0, err pass_file_line);
1819 goto ret_free_start_len;
1821 st = index_to_int(start);
1822 l = index_to_int(len);
1823 if (can_modify) {
1824 int_default_t total = da(array,array_pointers)->pointer + da(array,array_pointers)->n_allocated_entries - da(array,array_pointers)->pointer_array;
1825 if (total / 2 > da(array,array_pointers)->n_used_entries)
1826 goto pointers_do_copy;
1827 for (i = 0; i < st; i++)
1828 pointer_dereference(da(array,array_pointers)->pointer[i]);
1829 for (i = st + l; i < da(array,array_pointers)->n_used_entries; i++)
1830 pointer_dereference(da(array,array_pointers)->pointer[i]);
1831 da(array,array_pointers)->pointer += st;
1832 da(array,array_pointers)->n_used_entries = l;
1833 da(array,array_pointers)->n_allocated_entries -= st;
1834 result = array;
1835 deref = false;
1836 goto ret_free_start_len;
1838 pointers_do_copy:
1839 result = data_alloc_array_pointers_mayfail(l, l, err pass_file_line);
1840 if (unlikely(!result))
1841 goto ret_free_start_len;
1842 for (i = 0; i < l; i++) {
1843 da(result,array_pointers)->pointer[i] = pointer_reference(&da(array,array_pointers)->pointer[st + i]);
1845 goto ret_free_start_len;
1847 case DATA_TAG_array_same: {
1848 if (unlikely(index_eq_index(len, da(array,array_same)->n_entries)))
1849 goto ret_array;
1850 if (unlikely(index_eq_int(len, 0))) {
1851 result = data_alloc_array_pointers_mayfail(0, 0, err pass_file_line);
1852 goto ret_free_start_len;
1854 result = data_alloc_array_same_mayfail(len, err pass_file_line);
1855 if (likely(result != NULL)) {
1856 da(result,array_same)->pointer = pointer_reference(&da(array,array_same)->pointer);
1858 index_free(&start);
1859 goto ret;
1861 case DATA_TAG_array_btree: {
1862 array_index_t max_step;
1863 btree_entries_t bt_pos;
1864 struct btree_level *levels = da(array,array_btree)->btree;
1865 if (unlikely(index_eq_index(len, levels[da(array,array_btree)->n_used_btree_entries - 1].end_index)))
1866 goto ret_array;
1868 if (unlikely(index_eq_int(len, 0))) {
1869 result = data_alloc_array_pointers_mayfail(0, 0, err pass_file_line);
1870 goto ret_free_start_len;
1873 binary_search(btree_entries_t, da(array,array_btree)->n_used_btree_entries, bt_pos, false, index_ge_index(start, levels[bt_pos].end_index), break);
1875 index_sub3(&max_step, levels[bt_pos].end_index, start);
1876 if (bt_pos != 0) {
1877 struct btree_level *bt_prev = &levels[bt_pos - 1];
1878 index_sub(&start, bt_prev->end_index);
1880 if (index_ge_index(max_step, len)) {
1881 pointer_t down_ptr;
1882 index_free(&max_step);
1883 down_ptr = levels[bt_pos].node;
1884 if (deref) {
1885 pointer_reference_owned(down_ptr);
1886 data_dereference(array);
1888 array = pointer_get_data(down_ptr);
1889 goto go_down;
1891 index_sub(&len, max_step);
1892 result = array_sub(pointer_get_data(levels[bt_pos].node), start, max_step, false, err);
1893 if (unlikely(!result))
1894 goto ret;
1895 do {
1896 array_index_t zero0;
1897 struct data *xa;
1898 bt_pos++;
1899 index_sub3(&max_step, levels[bt_pos].end_index, levels[bt_pos - 1].end_index);
1900 if (!index_ge_index(len, max_step)) {
1901 index_free(&max_step);
1902 index_copy(&max_step, len);
1904 index_sub(&len, max_step);
1905 index_from_int(&zero0, 0);
1906 xa = array_sub(pointer_get_data(levels[bt_pos].node), zero0, max_step, false, err);
1908 result = array_join(result, xa, err);
1909 if (unlikely(!result)) {
1910 index_free(&len);
1911 goto ret;
1913 } while (index_ge_int(len, 1));
1914 index_free(&len);
1915 goto ret;
1917 default:
1918 internal(file_line, "array_sub: invalid array tag %u", da_tag(array));
1921 ret_array:
1922 if (deref)
1923 deref = false;
1924 else
1925 data_reference(array);
1926 result = array;
1928 ret_free_start_len:
1929 index_free(&start);
1930 index_free(&len);
1932 ret:
1933 if (deref)
1934 data_dereference(array);
1935 return result;
1939 static int_default_t estimate_length(array_index_t length)
1941 if (likely(index_is_int(length)))
1942 return index_to_int(length);
1943 return signed_maximum(int_default_t);
1946 pointer_t array_create(array_index_t length, const struct type *flat_type, const unsigned char *flat, pointer_t ptr)
1948 pointer_t result = pointer_empty();
1949 int_default_t est, i;
1950 struct data *array;
1951 unsigned char *flat_ptr;
1952 ajla_error_t err, *err_ptr;
1954 try_again:
1955 est = estimate_length(length);
1957 if (false) {
1958 oom:
1959 est >>= 1;
1960 if (!est) {
1961 if (!pointer_is_empty(result))
1962 pointer_dereference(result);
1963 result = pointer_error(err, NULL, NULL pass_file_line);
1964 goto ret_result;
1967 err_ptr = &err;
1968 if (likely(est > 1))
1969 err_ptr = MEM_DONT_TRY_TO_FREE;
1970 if (flat_type) {
1971 flat_size_t element_size;
1972 bool cnst = data_element_is_const(flat, flat_type->size);
1973 bool clear = cnst && !flat[0];
1974 array = data_alloc_array_flat_mayfail(flat_type, est, est, clear, err_ptr pass_file_line);
1975 if (unlikely(!array))
1976 goto oom;
1977 if (!clear) {
1978 flat_ptr = da_array_flat(array);
1979 element_size = flat_type->size;
1980 if (cnst) {
1981 memset(flat_ptr, flat[0], element_size * est);
1982 } else {
1983 int_default_t first_est = minimum(est, 4096 / element_size);
1984 for (i = 0; i < first_est; i++) {
1985 flat_ptr = mempcpy(flat_ptr, flat, element_size);
1987 while (i < est) {
1988 int_default_t this_step = minimum(est - i, first_est);
1989 flat_ptr = mempcpy(flat_ptr, da_array_flat(array), this_step * element_size);
1990 i += this_step;
1994 } else {
1995 array = data_alloc_array_pointers_mayfail(est, est, err_ptr pass_file_line);
1996 if (unlikely(!array))
1997 goto oom;
1998 pointer_reference_owned_multiple(ptr, est);
1999 for (i = 0; i < est; i++) {
2000 da(array,array_pointers)->pointer[i] = ptr;
2004 if (likely(pointer_is_empty(result))) {
2005 result = pointer_data(array);
2006 } else {
2007 struct data *jr;
2008 jr = array_join(pointer_get_data(result), array, &err);
2009 if (!jr) {
2010 result = pointer_error(err, NULL, NULL pass_file_line);
2011 goto ret_result;
2013 result = pointer_data(jr);
2016 index_sub_int(&length, est);
2017 if (unlikely(index_ge_int(length, 1)))
2018 goto try_again;
2020 ret_result:
2021 if (!pointer_is_empty(ptr))
2022 pointer_dereference(ptr);
2023 index_free(&length);
2024 return result;
2027 pointer_t array_create_sparse(array_index_t length, pointer_t ptr)
2029 struct data *array;
2030 ajla_error_t err;
2032 if (unlikely(!index_ge_int(length, 1))) {
2033 array_index_t z;
2034 index_free(&length);
2035 index_from_int(&z, 0);
2036 return array_create(z, NULL, NULL, ptr);
2039 array = data_alloc_array_same_mayfail(length, &err pass_file_line);
2040 if (unlikely(!array)) {
2041 pointer_dereference(ptr);
2042 return pointer_error(err, NULL, NULL pass_file_line);
2044 da(array,array_same)->pointer = ptr;
2046 return pointer_data(array);
2049 pointer_t attr_fastcall array_string(int_default_t length, const struct type *flat_type, const unsigned char *flat)
2051 ajla_error_t err;
2052 struct data *array;
2054 array = data_alloc_array_flat_mayfail(flat_type, length, length, false, &err pass_file_line);
2055 if (unlikely(!array)) {
2056 return pointer_error(err, NULL, NULL pass_file_line);
2059 memcpy(da_array_flat(array), flat, flat_type->size * length);
2061 return pointer_data(array);
2065 void attr_fastcall array_incomplete_decompose(struct data *array, struct data **first, pointer_t *last)
2067 *first = pointer_get_data(da(array,array_incomplete)->first);
2068 ajla_assert_lo(!array_is_empty(*first), (file_line, "array_incomplete_decompose: the first part is empty"));
2069 if (data_is_writable(array)) {
2070 *last = da(array,array_incomplete)->next;
2071 data_free_r1(array);
2072 } else {
2073 data_reference(*first);
2074 *last = pointer_reference(&da(array,array_incomplete)->next);
2075 data_dereference(array);
2079 bool attr_fastcall array_incomplete_collapse(pointer_t *ptr)
2081 struct data *array, *first, *next, *next_first;
2082 tag_t next_tag;
2083 pointer_t next_next;
2084 ajla_error_t err;
2085 bool changed = false;
2087 array = pointer_get_data(*ptr);
2089 try_to_join_again:
2090 pointer_follow(&da(array,array_incomplete)->next, false, next, PF_NOEVAL, NULL, NULL,
2091 return changed,
2092 return changed
2095 changed = true;
2097 next_tag = da_tag(next);
2098 if (next_tag != DATA_TAG_array_incomplete) {
2099 struct data *d;
2100 ajla_assert_lo(DATA_TAG_is_array(next_tag), (file_line, "array_incomplete_collapse: invalid tag %u", next_tag));
2101 array_incomplete_decompose(array, &first, &next_next);
2102 d = array_join(first, next, &err);
2103 if (!d)
2104 *ptr = pointer_error(err, NULL, NULL pass_file_line);
2105 else
2106 *ptr = pointer_data(d);
2107 return changed;
2108 } else {
2109 struct data *joined;
2110 array_incomplete_decompose(array, &first, &next_next);
2111 array_incomplete_decompose(next, &next_first, &next_next);
2112 joined = array_join(first, next_first, &err);
2113 if (!joined) {
2114 pointer_dereference(next_next);
2115 *ptr = pointer_error(err, NULL, NULL pass_file_line);
2116 return changed;
2118 array = data_alloc_array_incomplete(joined, next_next, &err pass_file_line);
2119 if (unlikely(!array)) {
2120 data_dereference(joined);
2121 pointer_dereference(next_next);
2122 *ptr = pointer_error(err, NULL, NULL pass_file_line);
2123 return changed;
2125 *ptr = pointer_data(array);
2126 goto try_to_join_again;
2130 struct data * attr_fastcall array_from_flat_mem(const struct type *type, const char *mem, size_t n_elements, ajla_error_t *mayfail)
2132 struct data *b;
2133 struct data *r = NULL;
2134 int_default_t s;
2136 again:
2137 if (unlikely(n_elements > signed_maximum(int_default_t)))
2138 s = signed_maximum(int_default_t);
2139 else
2140 s = (int_default_t)n_elements;
2141 b = data_alloc_array_flat_mayfail(type, s, s, false, mayfail pass_file_line);
2142 if (unlikely(!b))
2143 goto ret_null;
2144 memcpy(da_array_flat(b), mem, s * type->size);
2145 if (!r) {
2146 r = b;
2147 } else {
2148 r = array_join(r, b, mayfail);
2149 if (unlikely(!r))
2150 goto ret_null;
2152 if ((size_t)s < n_elements) {
2153 mem += s * type->size;
2154 n_elements -= s;
2155 goto again;
2157 return r;
2159 ret_null:
2160 if (r)
2161 data_dereference(r);
2162 return NULL;
2167 #ifdef ARRAY_INDEX_T_COUNT_INDICES
2169 shared_var refcount_t n_indices;
2171 void index_increment_count(void)
2173 refcount_inc(&n_indices);
2176 void index_decrement_count(void)
2178 if (unlikely(refcount_dec(&n_indices)))
2179 internal(file_line, "index_decrement_count: index refcount underflowed");
2182 #endif
2184 void name(array_index_init)(void)
2186 #ifdef ARRAY_INDEX_T_COUNT_INDICES
2187 refcount_init(&n_indices);
2188 #endif
2191 void name(array_index_done)(void)
2193 #ifdef ARRAY_INDEX_T_COUNT_INDICES
2194 if (unlikely(!refcount_is_one(&n_indices)))
2195 internal(file_line, "array_index_done: leaked %"PRIuMAX" array indices", (uintmax_t)refcount_get_nonatomic(&n_indices) - 1);
2196 #endif
2199 #endif