x86-64: hack the ABI of cg_upcall_ipret_copy_variable_to_pointer
[ajla.git] / save.c
blob0f946238716b689150221f564e56abc128d692b3
1 /*
2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
9 * version.
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #include "ajla.h"
21 #ifndef FILE_OMIT
23 #include "str.h"
24 #include "tree.h"
25 #include "arindex.h"
26 #include "module.h"
27 #include "os.h"
28 #include "os_util.h"
29 #include "amalloc.h"
30 #include "thread.h"
31 #include "ipfn.h"
33 #include "save.h"
35 #include <fcntl.h>
37 #if defined(OS_HAS_MMAP) && defined(USE_AMALLOC) && !((defined(OS_CYGWIN) || defined(OS_WIN32)) && defined(POINTER_COMPRESSION))
38 #define USE_MMAP
39 #endif
41 shared_var bool save_disable shared_init(false);
43 static const char id[] = "AJLA" " " __DATE__ " " __TIME__;
45 static bool save_ok;
46 static char *save_data;
47 static size_t save_len;
49 static size_t last_md;
51 struct position_map {
52 struct tree_entry entry;
53 uintptr_t old_position;
54 uintptr_t new_position;
55 size_t size;
58 static struct tree position_tree;
60 static pointer_t *pointers;
61 static size_t pointers_len;
63 static struct function_descriptor *fn_descs;
64 static size_t fn_descs_len;
66 struct file_descriptor {
67 struct function_descriptor *fn_descs;
68 size_t fn_descs_len;
69 char *dependencies;
70 size_t dependencies_l;
71 void *base;
72 cpu_feature_mask_t cpu_feature_flags;
73 unsigned char privileged;
74 unsigned char profiling;
75 char ajla_id[sizeof(id)];
78 static char *loaded_data;
79 static size_t loaded_data_len;
80 #ifdef USE_MMAP
81 static bool loaded_data_mapped;
82 static bool loaded_data_amalloc;
83 #endif
84 #define loaded_file_descriptor cast_ptr(struct file_descriptor *, loaded_data + loaded_data_len - sizeof(struct file_descriptor))
86 static size_t loaded_fn_idx;
87 static size_t loaded_fn_cache;
90 struct dependence {
91 struct tree_entry entry;
92 char *fingerprint;
93 size_t fingerprint_l;
94 char path_name[FLEXIBLE_ARRAY];
97 static struct tree dependencies;
98 static uchar_efficient_t dependencies_failed;
99 static mutex_t dependencies_mutex;
102 static int function_compare(const struct module_designator *md1, const struct function_designator *fd1, struct function_descriptor *fd2);
103 static void save_one_entry(arg_t n_arguments, arg_t n_return_values, pointer_t *arguments, pointer_t *returns);
104 static void save_finish_one(const struct module_designator *md, const struct function_designator *fd, arg_t n_arguments, arg_t n_return_values, code_t *code, ip_t code_size, const struct local_variable_flags *local_variables_flags, frame_t n_slots, struct data *types, struct line_position *lp, size_t lp_size, void *unoptimized_code_base, size_t unoptimized_code_size, size_t *entries, size_t n_entries, struct trap_record *trap_records, size_t trap_records_size);
105 static bool dep_get_stream(char **result, size_t *result_l);
108 static bool align_output(size_t align)
110 ajla_error_t sink;
111 while (unlikely(save_len & (align - 1)) != 0) {
112 if (unlikely(!array_add_mayfail(char, &save_data, &save_len, 0, NULL, &sink))) {
113 save_ok = false;
114 return false;
117 return true;
120 static pointer_t offset_to_ptr(size_t offset)
122 tag_t tag = da_thunk_tag(save_data + offset);
123 if (unlikely(!tag_is_thunk(tag))) {
124 return pointer_data(data_pointer_tag(num_to_ptr(offset), tag));
125 } else {
126 return pointer_thunk(thunk_pointer_tag(num_to_ptr(offset)));
130 static int position_tree_compare(const struct tree_entry *t1, uintptr_t p2)
132 struct position_map *pm = get_struct(t1, struct position_map, entry);
133 if (pm->old_position + pm->size <= p2)
134 return -1;
135 if (pm->old_position > p2)
136 return 1;
137 return 0;
140 static void free_position_tree(struct tree *t)
142 while (!tree_is_empty(t)) {
143 struct position_map *pm = get_struct(tree_any(t), struct position_map, entry);
144 tree_delete(&pm->entry);
145 mem_free(pm);
149 static size_t save_range(const void *ptr, size_t align, size_t size, struct stack_entry *subptrs, size_t subptrs_l)
151 ajla_error_t sink;
152 size_t data_offset, payload_offset, i;
153 struct data *d;
154 if (unlikely(!align_output(SAVED_DATA_ALIGN)))
155 return (size_t)-1;
156 data_offset = save_len;
157 d = data_alloc_flexible(saved, offsets, subptrs_l, &sink);
158 if (unlikely(!d)) {
159 save_ok = false;
160 return (size_t)-1;
162 refcount_set_read_only(&d->refcount_);
163 da(d,saved)->n_offsets = subptrs_l;
165 if (unlikely(!array_add_multiple_mayfail(char, &save_data, &save_len, d, offsetof(struct data, u_.saved.offsets[subptrs_l]), NULL, &sink))) {
166 save_ok = false;
167 data_free(d);
168 return (size_t)-1;
170 data_free(d);
172 if (unlikely(!align_output(align)))
173 return (size_t)-1;
174 payload_offset = save_len;
176 if (unlikely(!array_add_multiple_mayfail(char, &save_data, &save_len, ptr, size, NULL, &sink))) {
177 save_ok = false;
178 return (size_t)-1;
181 d = cast_ptr(struct data *, save_data + data_offset);
182 d = data_pointer_tag(d, DATA_TAG_saved);
183 da(d,saved)->total_size = save_len - data_offset;
184 for (i = 0; i < subptrs_l; i++) {
185 da(d,saved)->offsets[i] = payload_offset - data_offset + (cast_ptr(const char *, subptrs[i].ptr) - cast_ptr(const char *, ptr));
186 /*debug("offsets: %zx - %zx (%zx %zx %p %p)", i, da(d,saved)->offsets[i], payload_offset, data_offset, subptrs[i].ptr, ptr);*/
189 return payload_offset;
192 static size_t save_pointer(pointer_t *xptr, bool verify_only)
194 ajla_error_t sink;
195 struct stack_entry *subptrs;
196 size_t subptrs_len;
197 struct stack_entry *stk;
198 size_t stk_l;
199 uintptr_t *sps;
200 size_t ret = (size_t)-1; /* avoid warning */
202 struct tree processed;
203 tree_init(&processed);
205 if (unlikely(!data_save_init_stack(xptr, &stk, &stk_l))) {
206 save_ok = false;
207 goto err;
210 cont:
211 do {
212 size_t align, size, i, data_pos;
213 struct stack_entry ste;
214 const char *p1;
215 uintptr_t p1_num;
216 struct tree_entry *e;
217 struct tree_insert_position ins;
218 bool need_sub;
219 struct position_map *pm;
221 ste = stk[stk_l - 1];
222 p1 = ste.t->get_ptr(&ste);
223 p1_num = ptr_to_num(p1);
224 e = tree_find_for_insert(&position_tree, position_tree_compare, p1_num, &ins);
226 if (verify_only && !e) {
227 e = tree_find_for_insert(&processed, position_tree_compare, p1_num, &ins);
229 if (e) {
230 pm = get_struct(e, struct position_map, entry);
231 ret = p1_num - pm->old_position + pm->new_position;
232 goto pop_stk;
235 if (unlikely(!ste.t->get_properties(&ste, &align, &size, &subptrs, &subptrs_len)))
236 goto err;
238 ajla_assert_lo(size != 0, (file_line, "save_pointer: size == 0"));
240 sps = mem_alloc_array_mayfail(mem_calloc_mayfail, uintptr_t *, 0, 0, subptrs_len, sizeof(uintptr_t), &sink);
241 if (unlikely(!sps)) {
242 save_ok = false;
243 goto err_free_subptrs;
246 need_sub = false;
247 for (i = 0; i < subptrs_len; i++) {
248 struct stack_entry *subptr;
249 const char *p2;
250 uintptr_t p2_num;
251 struct tree_entry *e2;
253 subptr = &subptrs[i];
254 if (!subptr->t->get_ptr) {
255 sps[i] = sps[i - 1];
256 continue;
258 p2 = subptr->t->get_ptr(subptr);
259 p2_num = ptr_to_num(p2);
260 e2 = tree_find(&position_tree, position_tree_compare, p2_num);
261 if (verify_only && !e2) {
262 e2 = tree_find(&processed, position_tree_compare, p2_num);
264 if (!e2) {
265 if (unlikely(!array_add_mayfail(struct stack_entry, &stk, &stk_l, *subptr, NULL, &sink))) {
266 save_ok = false;
267 goto err_free_sps;
269 need_sub = true;
270 } else {
271 struct position_map *subpm = get_struct(e2, struct position_map, entry);
272 sps[i] = subpm->new_position - subpm->old_position;
275 if (need_sub) {
276 if (subptrs)
277 mem_free(subptrs);
278 mem_free(sps);
279 goto cont;
282 if (!verify_only) {
283 if (!ste.t->wrap_on_save) {
284 if (unlikely(!align_output(align)))
285 goto err_free_sps;
286 data_pos = save_len;
287 if (unlikely(!array_add_multiple_mayfail(char, &save_data, &save_len, p1, size, NULL, &sink))) {
288 save_ok = false;
289 goto err_free_sps;
291 } else {
292 data_pos = save_range(p1, align, size, subptrs, subptrs_len);
293 if (unlikely(data_pos == (size_t)-1)) {
294 goto err_free_sps;
297 ste.t->fixup_after_copy(save_data + data_pos);
299 for (i = 0; i < subptrs_len; i++) {
300 size_t offset = cast_ptr(char *, subptrs[i].ptr) - p1;
301 subptrs[i].t->fixup_sub_ptr(save_data + data_pos + offset, sps[i]);
303 } else {
304 data_pos = 0;
306 if (subptrs)
307 mem_free(subptrs);
308 mem_free(sps);
310 pm = mem_alloc_mayfail(struct position_map *, sizeof(struct position_map), &sink);
311 if (unlikely(!pm)) {
312 save_ok = false;
313 goto err;
315 pm->old_position = p1_num;
316 pm->new_position = data_pos;
317 pm->size = size;
318 tree_insert_after_find(&pm->entry, &ins);
319 ret = data_pos;
321 pop_stk:;
322 } while (--stk_l);
324 mem_free(stk);
325 free_position_tree(&processed);
326 return ret;
328 err_free_sps:
329 mem_free(sps);
330 err_free_subptrs:
331 if (subptrs)
332 mem_free(subptrs);
333 err:
334 if (stk)
335 mem_free(stk);
336 free_position_tree(&processed);
337 return (size_t)-1;
340 void save_prepare(void)
342 ajla_error_t sink;
343 save_data = NULL;
344 save_ok = !save_disable;
345 last_md = (size_t)-1;
346 tree_init(&position_tree);
347 pointers = NULL;
348 pointers_len = 0;
349 loaded_fn_idx = 0;
350 loaded_fn_cache = (size_t)-1;
351 if (unlikely(!array_init_mayfail(char, &save_data, &save_len, &sink))) {
352 save_ok = false;
353 return;
355 if (unlikely(!array_init_mayfail(struct function_descriptor, &fn_descs, &fn_descs_len, &sink))) {
356 save_ok = false;
357 return;
361 static int compare_arguments(arg_t n_arguments, pointer_t *ptr1, pointer_t *ptr2)
363 ajla_error_t sink;
364 arg_t ai;
365 for (ai = 0; ai < n_arguments; ai++) {
366 int c = data_compare(ptr1[ai], ptr2[ai], &sink);
367 if (c)
368 return c;
370 return 0;
373 static void save_entries_until(pointer_t *arguments)
375 struct function_descriptor *fn_desc;
376 struct data *dsc;
377 if (unlikely(!save_ok))
378 return;
379 if (loaded_fn_cache == (size_t)-1)
380 return;
381 fn_desc = &loaded_file_descriptor->fn_descs[loaded_fn_idx];
382 dsc = fn_desc->data_saved_cache;
383 while (loaded_fn_cache < da(dsc,saved_cache)->n_entries) {
384 pointer_t *dsc_arguments = da(dsc,saved_cache)->pointers + loaded_fn_cache * ((size_t)da(dsc,saved_cache)->n_arguments + (size_t)da(dsc,saved_cache)->n_return_values);
385 if (arguments) {
386 int c = compare_arguments(da(dsc,saved_cache)->n_arguments, arguments, dsc_arguments);
387 if (unlikely(c == DATA_COMPARE_OOM)) {
388 save_ok = false;
389 return;
391 if (unlikely(!c))
392 internal(file_line, "save_entries_until: data already present in loaded cache");
393 if (c < 0)
394 return;
396 save_one_entry(da(dsc,saved_cache)->n_arguments, da(dsc,saved_cache)->n_return_values, dsc_arguments, dsc_arguments + da(dsc,saved_cache)->n_arguments);
397 if (!save_ok)
398 return;
399 loaded_fn_cache++;
403 static void save_loaded_function(struct function_descriptor *fn_desc)
405 struct data *dsc;
406 ajla_error_t sink;
407 size_t i, k;
408 if (unlikely(!array_init_mayfail(pointer_t, &pointers, &pointers_len, &sink))) {
409 save_ok = false;
410 return;
412 dsc = fn_desc->data_saved_cache;
413 /*debug("saving ld: %p, %lu", fn_desc, fn_desc - loaded_file_descriptor->fn_descs);*/
414 k = (size_t)da(dsc,saved_cache)->n_arguments + (size_t)da(dsc,saved_cache)->n_return_values;
415 for (i = 0; i < da(dsc,saved_cache)->n_entries; i++) {
416 pointer_t *base = da(dsc,saved_cache)->pointers + k * i;
417 save_one_entry(da(dsc,saved_cache)->n_arguments, da(dsc,saved_cache)->n_return_values, base, base + da(dsc,saved_cache)->n_arguments);
418 if (unlikely(!save_ok))
419 return;
421 save_finish_one(fn_desc->md,
422 fn_desc->fd,
423 da(dsc,saved_cache)->n_arguments,
424 da(dsc,saved_cache)->n_return_values,
425 fn_desc->code,
426 fn_desc->code_size,
427 fn_desc->local_variables_flags,
428 fn_desc->n_slots,
429 fn_desc->types,
430 fn_desc->lp,
431 fn_desc->lp_size,
432 fn_desc->unoptimized_code_base,
433 fn_desc->unoptimized_code_size,
434 fn_desc->entries,
435 fn_desc->n_entries,
436 fn_desc->trap_records,
437 fn_desc->trap_records_size);
440 static void save_functions_until(struct data *d)
442 loaded_fn_cache = (size_t)-1;
443 if (unlikely(!save_ok))
444 return;
445 if (!loaded_data)
446 return;
447 /*debug("save_functions_until: %lu, %lu", loaded_fn_idx, loaded_file_descriptor->fn_descs_len);*/
448 while (loaded_fn_idx < loaded_file_descriptor->fn_descs_len) {
449 struct function_descriptor *fn_desc = &loaded_file_descriptor->fn_descs[loaded_fn_idx];
450 /*debug("test loaded: %lu", loaded_fn_idx);*/
451 if (d) {
452 int c = function_compare(da(d,function)->module_designator, da(d,function)->function_designator, fn_desc);
453 if (c <= 0 && c != DATA_COMPARE_OOM) {
454 if (!c) {
455 loaded_fn_cache = 0;
457 return;
460 save_loaded_function(fn_desc);
461 if (!save_ok)
462 return;
463 loaded_fn_idx++;
467 static void save_one_entry(arg_t n_arguments, arg_t n_return_values, pointer_t *arguments, pointer_t *returns)
469 ajla_error_t sink;
470 arg_t i;
471 for (i = 0; i < n_arguments; i++) {
472 pointer_t ptr;
473 size_t st = save_pointer(&arguments[i], false);
474 if (unlikely(st == (size_t)-1)) {
475 save_ok = false;
476 return;
478 ptr = offset_to_ptr(st);
479 if (unlikely(!array_add_mayfail(pointer_t, &pointers, &pointers_len, ptr, NULL, &sink))) {
480 save_ok = false;
481 return;
484 for (i = 0; i < n_return_values; i++) {
485 pointer_t ptr;
486 size_t st = save_pointer(&returns[i], false);
487 if (unlikely(st == (size_t)-1)) {
488 save_ok = false;
489 return;
491 ptr = offset_to_ptr(st);
492 if (unlikely(!array_add_mayfail(pointer_t, &pointers, &pointers_len, ptr, NULL, &sink))) {
493 save_ok = false;
494 return;
499 void save_start_function(struct data *d, bool new_cache)
501 if (!da(d,function)->n_return_values)
502 return;
503 if (!da(d,function)->is_saved || new_cache) {
504 ajla_error_t sink;
505 /*const struct module_designator *md = da(d,function)->module_designator;
506 const struct function_designator *fd = da(d,function)->function_designator;
507 debug("save_start_function: %u:%.*s:%u (%lu) - %s", md->path_idx, (int)md->path_len, md->path, fd->entries[0], fd->n_entries, da(d,function)->function_name);*/
508 save_functions_until(d);
509 if (unlikely(!save_ok))
510 return;
511 if (unlikely(!array_init_mayfail(pointer_t, &pointers, &pointers_len, &sink))) {
512 save_ok = false;
513 return;
518 void save_cache_entry(struct data *d, struct cache_entry *ce)
520 arg_t i;
521 pointer_t *returns;
522 ajla_error_t sink;
524 ajla_assert_lo(!ce->n_pending, (file_line, "save_cache_entry: evaluation is in progress: %lu", (unsigned long)ce->n_pending));
525 if (unlikely(!save_ok))
526 return;
528 /*debug("save cache entry: %s", da(d,function)->function_name);*/
529 for (i = 0; i < da(d,function)->n_arguments; i++) {
530 if (unlikely(save_pointer(&ce->arguments[i], true) == (size_t)-1)) {
531 /*debug("failed arg %d", i);*/
532 return;
535 for (i = 0; i < da(d,function)->n_return_values; i++) {
536 if (unlikely(save_pointer(&ce->returns[i].ptr, true) == (size_t)-1)) {
537 /*debug("failed return %d", i);*/
538 return;
541 save_entries_until(ce->arguments);
542 if (!save_ok)
543 return;
544 returns = mem_alloc_array_mayfail(mem_alloc_mayfail, pointer_t *, 0, 0, da(d,function)->n_return_values, sizeof(pointer_t), &sink);
545 if (unlikely(!returns)) {
546 save_ok = false;
547 return;
549 for (i = 0; i < da(d,function)->n_return_values; i++) {
550 returns[i] = ce->returns[i].ptr;
552 save_one_entry(da(d,function)->n_arguments, da(d,function)->n_return_values, ce->arguments, returns);
553 mem_free(returns);
556 static void save_finish_one(const struct module_designator *md, const struct function_designator *fd, arg_t n_arguments, arg_t n_return_values, code_t *code, ip_t code_size, const struct local_variable_flags *local_variables_flags, frame_t n_slots, struct data *types, struct line_position *lp, size_t lp_size, void *unoptimized_code_base, size_t unoptimized_code_size, size_t *entries, size_t n_entries, struct trap_record *trap_records, size_t trap_records_size)
558 ajla_error_t sink;
559 size_t saved_pos;
560 struct function_descriptor fn_desc;
561 struct data *dsc;
562 size_t code_off, lvf_off, lp_off, uc_off, en_off, tr_off;
563 size_t last_fd;
564 pointer_t types_ptr = pointer_data(types);
565 size_t saved_types;
566 if (!n_return_values)
567 goto free_it;
568 if (!pointers)
569 goto free_it;
570 /*debug("save_finish_one: %u:%.*s:%u (%lu)", md->path_idx, (int)md->path_len, md->path, fd->entries[0], fd->n_entries);*/
571 dsc = data_alloc_flexible(saved_cache, pointers, pointers_len, &sink);
572 if (unlikely(!dsc)) {
573 save_ok = false;
574 goto free_it;
576 refcount_set_read_only(&dsc->refcount_);
577 da(dsc,saved_cache)->n_entries = pointers_len / ((size_t)n_arguments + (size_t)n_return_values);
578 da(dsc,saved_cache)->n_arguments = n_arguments;
579 da(dsc,saved_cache)->n_return_values = n_return_values;
580 memcpy(da(dsc,saved_cache)->pointers, pointers, pointers_len * sizeof(pointer_t));
581 if (unlikely(!align_output(SAVED_DATA_ALIGN)))
582 goto free_it_2;
584 saved_pos = save_len;
585 if (unlikely(!array_add_multiple_mayfail(char, &save_data, &save_len, dsc, offsetof(struct data, u_.saved_cache.pointers[pointers_len]), NULL, &sink))) {
586 save_ok = false;
587 goto free_it_2;
590 code_off = save_range(code, align_of(code_t), (size_t)code_size * sizeof(code_t), NULL, 0);
591 if (unlikely(code_off == (size_t)-1))
592 goto free_it_2;
594 lvf_off = save_range(local_variables_flags, align_of(struct local_variable_flags), (size_t)n_slots * sizeof(struct local_variable_flags), NULL, 0);
595 if (unlikely(lvf_off == (size_t)-1))
596 goto free_it_2;
598 saved_types = save_pointer(&types_ptr, false);
599 if (unlikely(saved_types == (size_t)-1)) {
600 save_ok = false;
601 goto free_it_2;
604 lp_off = save_range(lp, align_of(struct line_position), (size_t)lp_size * sizeof(struct line_position), NULL, 0);
605 if (unlikely(lp_off == (size_t)-1))
606 goto free_it_2;
608 uc_off = save_range(unoptimized_code_base, CODE_ALIGNMENT, unoptimized_code_size, NULL, 0);
609 if (unlikely(uc_off == (size_t)-1))
610 goto free_it_2;
612 en_off = save_range(entries, align_of(size_t), n_entries * sizeof(size_t), NULL, 0);
613 if (unlikely(en_off == (size_t)-1))
614 goto free_it_2;
616 #ifdef HAVE_CODEGEN_TRAPS
617 tr_off = save_range(trap_records, align_of(struct trap_record), trap_records_size * sizeof(struct trap_record), NULL, 0);
618 #else
619 tr_off = save_range(trap_records, 1, 0, NULL, 0);
620 #endif
621 if (unlikely(tr_off == (size_t)-1))
622 goto free_it_2;
624 if (!(last_md != (size_t)-1 && !module_designator_compare(cast_ptr(struct module_designator *, save_data + last_md), md))) {
625 last_md = save_range(md, align_of(struct module_designator), module_designator_length(md), NULL, 0);
626 if (unlikely(last_md == (size_t)-1))
627 goto free_it_2;
630 last_fd = save_range(fd, align_of(struct function_designator), function_designator_length(fd), NULL, 0);
631 if (unlikely(last_fd == (size_t)-1))
632 goto free_it_2;
634 fn_desc.data_saved_cache = num_to_ptr(saved_pos);
635 fn_desc.data_saved_cache = data_pointer_tag(fn_desc.data_saved_cache, DATA_TAG_saved_cache);
636 fn_desc.code = num_to_ptr(code_off);
637 fn_desc.code_size = code_size;
638 fn_desc.local_variables_flags = num_to_ptr(lvf_off);
639 fn_desc.n_slots = n_slots;
640 fn_desc.types = num_to_ptr(saved_types);
641 fn_desc.types = data_pointer_tag(fn_desc.types, DATA_TAG_function_types);
642 fn_desc.lp = num_to_ptr(lp_off);
643 fn_desc.lp_size = lp_size;
644 fn_desc.unoptimized_code_base = num_to_ptr(uc_off);
645 fn_desc.unoptimized_code_size = unoptimized_code_size;
646 fn_desc.entries = num_to_ptr(en_off);
647 fn_desc.n_entries = n_entries;
648 fn_desc.trap_records = num_to_ptr(tr_off);
649 fn_desc.trap_records_size = trap_records_size;
650 fn_desc.md = num_to_ptr(last_md);
651 fn_desc.fd = num_to_ptr(last_fd);
652 if (!unlikely(array_add_mayfail(struct function_descriptor, &fn_descs, &fn_descs_len, fn_desc, NULL, &sink))) {
653 save_ok = false;
654 goto free_it_2;
657 free_it_2:
658 data_free(dsc);
659 free_it:
660 if (pointers)
661 mem_free(pointers);
662 pointers = NULL;
663 pointers_len = 0;
666 void save_finish_function(struct data *d)
668 void *unoptimized_code_base = NULL;
669 size_t unoptimized_code_size = 0;
670 size_t *entries = NULL;
671 size_t n_entries = 0;
672 struct trap_record *trap_records = NULL;
673 size_t trap_records_size = 0;
674 if (loaded_fn_cache != (size_t)-1) {
675 save_entries_until(NULL);
676 if (unlikely(!save_ok))
677 return;
678 loaded_fn_idx++;
679 loaded_fn_cache = (size_t)-1;
681 #ifdef HAVE_CODEGEN
682 if (!pointer_is_thunk(da(d,function)->codegen)) {
683 ajla_error_t sink;
684 size_t i;
685 struct data *codegen = pointer_get_data(da(d,function)->codegen);
686 entries = da(codegen,codegen)->offsets = mem_alloc_array_mayfail(mem_alloc_mayfail, size_t *, 0, 0, da(codegen,codegen)->n_entries, sizeof(size_t), &sink);
687 if (unlikely(!entries)) {
688 save_ok = false;
689 return;
691 n_entries = da(codegen,codegen)->n_entries;
692 for (i = 0; i < n_entries; i++)
693 entries[i] = da(codegen,codegen)->unoptimized_code[i] - cast_ptr(char *, da(codegen,codegen)->unoptimized_code_base);
694 unoptimized_code_base = da(codegen,codegen)->unoptimized_code_base;
695 unoptimized_code_size = da(codegen,codegen)->unoptimized_code_size;
696 #ifdef HAVE_CODEGEN_TRAPS
697 trap_records = da(codegen,codegen)->trap_records;
698 trap_records_size = da(codegen,codegen)->trap_records_size;
699 #endif
701 #endif
702 save_finish_one(da(d,function)->module_designator,
703 da(d,function)->function_designator,
704 da(d,function)->n_arguments,
705 da(d,function)->n_return_values,
706 da(d,function)->code, da(d,function)->code_size,
707 da(d,function)->local_variables_flags,
708 function_n_variables(d),
709 pointer_get_data(da(d,function)->types_ptr),
710 da(d,function)->lp,
711 da(d,function)->lp_size,
712 unoptimized_code_base,
713 unoptimized_code_size,
714 entries,
715 n_entries,
716 trap_records,
717 trap_records_size);
720 static void save_finish_file(void)
722 const int fn_desc_ptrs = 10;
723 ajla_error_t sink;
724 struct stack_entry *subptrs;
725 char *deps;
726 size_t i, deps_l;
727 size_t fn_descs_offset, deps_offset, file_desc_offset;
728 struct file_descriptor file_desc;
730 if (!fn_descs_len) {
731 save_ok = false;
732 return;
735 save_functions_until(NULL);
737 subptrs = mem_alloc_array_mayfail(mem_alloc_mayfail, struct stack_entry *, 0, 0, fn_descs_len, sizeof(struct stack_entry) * fn_desc_ptrs, &sink);
738 if (unlikely(!subptrs)) {
739 save_ok = false;
740 return;
742 for (i = 0; i < fn_descs_len; i++) {
743 subptrs[i * fn_desc_ptrs + 0].ptr = &fn_descs[i].data_saved_cache;
744 subptrs[i * fn_desc_ptrs + 1].ptr = &fn_descs[i].code;
745 subptrs[i * fn_desc_ptrs + 2].ptr = &fn_descs[i].local_variables_flags;
746 subptrs[i * fn_desc_ptrs + 3].ptr = &fn_descs[i].types;
747 subptrs[i * fn_desc_ptrs + 4].ptr = &fn_descs[i].lp;
748 subptrs[i * fn_desc_ptrs + 5].ptr = &fn_descs[i].md;
749 subptrs[i * fn_desc_ptrs + 6].ptr = &fn_descs[i].fd;
750 subptrs[i * fn_desc_ptrs + 7].ptr = &fn_descs[i].unoptimized_code_base;
751 subptrs[i * fn_desc_ptrs + 8].ptr = &fn_descs[i].entries;
752 subptrs[i * fn_desc_ptrs + 9].ptr = &fn_descs[i].trap_records;
753 /*debug("%p %p %zx", fn_descs[i].data_saved_cache, fn_descs[i].md, fn_descs[i].idx);*/
755 fn_descs_offset = save_range(fn_descs, align_of(struct function_descriptor), fn_descs_len * sizeof(struct function_descriptor), subptrs, fn_descs_len * fn_desc_ptrs);
756 mem_free(subptrs);
757 if (unlikely(fn_descs_offset == (size_t)-1))
758 return;
760 file_desc.fn_descs = num_to_ptr(fn_descs_offset);
761 file_desc.fn_descs_len = fn_descs_len;
763 if (unlikely(!dep_get_stream(&deps, &deps_l))) {
764 save_ok = false;
765 return;
767 deps_offset = save_range(deps, 1, deps_l, NULL, 0);
768 mem_free(deps);
769 if (unlikely(deps_offset == (size_t)-1))
770 return;
772 file_desc.dependencies = num_to_ptr(deps_offset);
773 file_desc.dependencies_l = deps_l;
775 file_desc.base = num_to_ptr(0);
776 file_desc.cpu_feature_flags = cpu_feature_flags;
777 file_desc.privileged = ipret_is_privileged;
778 file_desc.profiling = profiling;
779 memcpy(file_desc.ajla_id, id, sizeof(id));
781 subptrs = mem_alloc_mayfail(struct stack_entry *, sizeof(struct stack_entry) * 3, &sink);
782 if (unlikely(!subptrs)) {
783 save_ok = false;
784 return;
786 subptrs[0].ptr = &file_desc.fn_descs;
787 subptrs[1].ptr = &file_desc.dependencies;
788 subptrs[2].ptr = &file_desc.base;
789 file_desc_offset = save_range(&file_desc, align_of(struct file_descriptor), sizeof(struct file_descriptor), subptrs, 3);
790 mem_free(subptrs);
791 if (unlikely(file_desc_offset == (size_t)-1))
792 return;
795 static bool adjust_pointers(char *data, size_t len, uintptr_t offset)
797 size_t pos = 0;
798 while (pos < len) {
799 refcount_t *ref;
800 struct stack_entry *subptrs;
801 size_t align, size, subptrs_l, i;
802 if (unlikely((pos & (SAVED_DATA_ALIGN - 1)) != 0)) {
803 pos++;
804 continue;
806 ref = cast_ptr(refcount_t *, data + pos + offsetof(struct data, refcount_));
807 if (refcount_is_one(ref)) {
808 pos += SAVED_DATA_ALIGN;
809 continue;
811 if (unlikely(!refcount_is_read_only(ref)))
812 internal(file_line, "adjust_pointers: invalid refcount at position %"PRIxMAX"", (uintmax_t)pos);
813 if (unlikely(!data_save(data + pos, offset, &align, &size, &subptrs, &subptrs_l)))
814 return false;
815 for (i = 0; i < subptrs_l; i++) {
816 subptrs[i].t->fixup_sub_ptr(subptrs[i].ptr, offset);
818 if (subptrs)
819 mem_free(subptrs);
820 pos += size;
822 return true;
825 static int function_compare(const struct module_designator *md1, const struct function_designator *fd1, struct function_descriptor *fd2)
827 int x = module_designator_compare(md1, fd2->md);
828 if (x)
829 return x;
830 return function_designator_compare(fd1, fd2->fd);
833 struct function_descriptor *save_find_function_descriptor(const struct module_designator *md, const struct function_designator *fd)
835 struct function_descriptor *fn_descs;
836 size_t fn_descs_len;
837 size_t result;
838 int cmp;
839 if (!loaded_data)
840 return NULL;
841 fn_descs = loaded_file_descriptor->fn_descs;
842 fn_descs_len = loaded_file_descriptor->fn_descs_len;
843 binary_search(size_t, fn_descs_len, result, !(cmp = function_compare(md, fd, &fn_descs[result])), cmp >= 0, return NULL);
844 return &fn_descs[result];
847 static int dep_compare(const struct tree_entry *e1, uintptr_t e2)
849 struct dependence *d1 = get_struct(e1, struct dependence, entry);
850 const char *n2 = num_to_ptr(e2);
851 return strcmp(d1->path_name, n2);
854 static bool dep_fingerprint(const char *path_name, char **result, size_t *result_l)
856 ajla_error_t err;
857 os_stat_t st;
858 if (unlikely(!array_init_mayfail(char, result, result_l, &err)))
859 return false;
860 if (unlikely(!os_stat(dir_none, path_name, false, &st, &err))) {
861 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &err.error_class), sizeof err.error_class, NULL, &err)))
862 return false;
863 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &err.error_type), sizeof err.error_type, NULL, &err)))
864 return false;
865 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &err.error_aux), sizeof err.error_aux, NULL, &err)))
866 return false;
867 return true;
869 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_mode), sizeof st.st_mode, NULL, &err)))
870 return false;
871 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_dev), sizeof st.st_dev, NULL, &err)))
872 return false;
873 #if !defined(OS_DOS)
874 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_ino), sizeof st.st_ino, NULL, &err)))
875 return false;
876 #endif
877 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_size), sizeof st.st_size, NULL, &err)))
878 return false;
879 #if defined(HAVE_STRUCT_STAT_ST_ATIM)
880 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_ctim.tv_sec), sizeof st.st_ctim.tv_sec, NULL, &err)))
881 return false;
882 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_ctim.tv_nsec), sizeof st.st_ctim.tv_nsec, NULL, &err)))
883 return false;
884 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_mtim.tv_sec), sizeof st.st_mtim.tv_sec, NULL, &err)))
885 return false;
886 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_mtim.tv_nsec), sizeof st.st_mtim.tv_nsec, NULL, &err)))
887 return false;
888 #elif defined(HAVE_STRUCT_STAT_ST_ATIMESPEC)
889 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_ctimespec.tv_sec), sizeof st.st_ctimespec.tv_sec, NULL, &err)))
890 return false;
891 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_ctimespec.tv_nsec), sizeof st.st_ctimespec.tv_nsec, NULL, &err)))
892 return false;
893 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_mtimespec.tv_sec), sizeof st.st_mtimespec.tv_sec, NULL, &err)))
894 return false;
895 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_mtimespec.tv_nsec), sizeof st.st_mtimespec.tv_nsec, NULL, &err)))
896 return false;
897 #else
898 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_ctime), sizeof st.st_ctime, NULL, &err)))
899 return false;
900 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, cast_ptr(char *, &st.st_mtime), sizeof st.st_mtime, NULL, &err)))
901 return false;
902 #endif
903 return true;
906 void save_register_dependence(const char *path_name)
908 struct tree_insert_position ins;
909 ajla_error_t sink;
910 size_t path_name_len;
911 struct dependence *dep;
913 mutex_lock(&dependencies_mutex);
914 /*debug("registering dependence: '%s'", path_name);*/
915 if (unlikely(tree_find_for_insert(&dependencies, dep_compare, ptr_to_num(path_name), &ins) != NULL))
916 goto unlock_ret;
918 path_name_len = strlen(path_name) + 1;
919 dep = struct_alloc_array_mayfail(mem_alloc_mayfail, struct dependence, path_name, path_name_len, &sink);
920 if (unlikely(!dep)) {
921 dependencies_failed = true;
922 goto unlock_ret;
924 memcpy(dep->path_name, path_name, path_name_len);
925 if (unlikely(!dep_fingerprint(dep->path_name, &dep->fingerprint, &dep->fingerprint_l))) {
926 mem_free(dep);
927 dependencies_failed = true;
928 goto unlock_ret;
931 tree_insert_after_find(&dep->entry, &ins);
933 unlock_ret:
934 mutex_unlock(&dependencies_mutex);
937 static bool dep_get_stream(char **result, size_t *result_l)
939 ajla_error_t sink;
940 struct tree_entry *e;
941 if (unlikely(!array_init_mayfail(char, result, result_l, &sink)))
942 return false;
943 for (e = tree_first(&dependencies); e; e = tree_next(e)) {
944 struct dependence *dep = get_struct(e, struct dependence, entry);
945 size_t path_name_len = strlen(dep->path_name) + 1;
946 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, dep->path_name, path_name_len, NULL, &sink)))
947 return false;
948 if (unlikely(!array_add_mayfail(char, result, result_l, (char)dep->fingerprint_l, NULL, &sink)))
949 return false;
950 if (unlikely(!array_add_multiple_mayfail(char, result, result_l, dep->fingerprint, dep->fingerprint_l, NULL, &sink)))
951 return false;
953 return true;
956 static bool dep_verify(void)
958 const char *ptr, *end;
959 ptr = loaded_file_descriptor->dependencies;
960 end = ptr + loaded_file_descriptor->dependencies_l;
961 while (ptr < end) {
962 char *fp;
963 size_t fp_l, l;
964 if (unlikely(!dep_fingerprint(ptr, &fp, &fp_l)))
965 return false;
966 ptr += strlen(ptr) + 1;
967 l = (unsigned char)*ptr;
968 ptr++;
969 if (unlikely(l != fp_l) || unlikely(memcmp(ptr, fp, fp_l))) {
970 mem_free(fp);
971 return false;
973 mem_free(fp);
974 ptr += fp_l;
976 ptr = loaded_file_descriptor->dependencies;
977 end = ptr + loaded_file_descriptor->dependencies_l;
978 while (ptr < end) {
979 struct tree_insert_position ins;
980 ajla_error_t sink;
981 struct dependence *dep;
983 const char *path_name, *fingerprint;
984 size_t path_name_len, fingerprint_len;
985 path_name = ptr;
986 path_name_len = strlen(ptr) + 1;
987 ptr += path_name_len;
988 fingerprint = ptr + 1;
989 fingerprint_len = (unsigned char)*ptr;
990 ptr += 1 + fingerprint_len;
992 if (unlikely(tree_find_for_insert(&dependencies, dep_compare, ptr_to_num(path_name), &ins) != NULL))
993 continue;
995 dep = struct_alloc_array_mayfail(mem_alloc_mayfail, struct dependence, path_name, path_name_len, &sink);
996 if (unlikely(!dep)) {
997 return false;
999 memcpy(dep->path_name, path_name, path_name_len);
1000 dep->fingerprint_l = fingerprint_len;
1001 dep->fingerprint = mem_alloc_mayfail(char *, fingerprint_len, &sink);
1002 if (unlikely(!dep->fingerprint)) {
1003 mem_free(dep);
1004 return false;
1006 memcpy(dep->fingerprint, fingerprint, fingerprint_len);
1007 tree_insert_after_find(&dep->entry, &ins);
1009 ajla_assert_lo(ptr == end, (file_line, "dep_verify: end mismatch: %p != %p", ptr, end));
1010 return true;
1013 static void unmap_loaded_data(void)
1015 if (loaded_data) {
1016 #ifdef USE_MMAP
1017 if (likely(loaded_data_mapped)) {
1018 os_munmap(loaded_data, loaded_data_len, true);
1019 } else if (loaded_data_amalloc) {
1020 amalloc_run_free(loaded_data, loaded_data_len);
1021 } else
1022 #endif
1024 mem_free(loaded_data);
1027 loaded_data = NULL;
1030 static char *save_get_file(void)
1032 ajla_error_t sink;
1033 char *pn, *fn, *ext;
1034 size_t pn_l, fn_l;
1035 pn = str_dup(*program_name ? program_name : "ajla", -1, &sink);
1036 if (unlikely(!pn))
1037 return NULL;
1038 pn_l = strlen(pn);
1039 if (pn_l > 5 && !strcasecmp(pn + pn_l - 5, ".ajla"))
1040 pn[pn_l -= 5] = 0;
1041 #ifndef POINTER_COMPRESSION
1042 ext = ".sav";
1043 #else
1044 ext = ".sac";
1045 #endif
1046 if (unlikely(!array_init_mayfail(char, &fn, &fn_l, &sink)))
1047 goto free_ret;
1048 if (unlikely(!array_add_multiple_mayfail(char, &fn, &fn_l, pn, pn_l, NULL, &sink)))
1049 goto free_ret;
1050 if (unlikely(!array_add_multiple_mayfail(char, &fn, &fn_l, ext, strlen(ext), NULL, &sink)))
1051 goto free_ret;
1052 if (unlikely(!array_add_mayfail(char, &fn, &fn_l, 0, NULL, &sink)))
1053 goto free_ret;
1054 free_ret:
1055 mem_free(pn);
1056 return fn;
1059 static void save_load_cache(void)
1061 ajla_error_t sink;
1062 char *path, *file;
1063 dir_handle_t dir;
1064 handle_t h;
1065 os_stat_t st;
1066 struct file_descriptor file_desc;
1068 if (unlikely(save_disable))
1069 return;
1071 path = os_get_directory_cache(&sink);
1072 if (unlikely(!path))
1073 return;
1074 dir = os_dir_open(os_cwd, path, 0, &sink);
1075 mem_free(path);
1076 if (unlikely(!dir_handle_is_valid(dir)))
1077 return;
1079 file = save_get_file();
1080 if (unlikely(!file)) {
1081 os_dir_close(dir);
1082 return;
1084 h = os_open(dir, file, O_RDONLY, 0, &sink);
1085 mem_free(file);
1086 os_dir_close(dir);
1087 if (unlikely(!handle_is_valid(h)))
1088 return;
1090 if (unlikely(!os_fstat(h, &st, &sink))) {
1091 os_close(h);
1092 return;
1094 if (unlikely(!S_ISREG(st.st_mode))) {
1095 os_close(h);
1096 return;
1098 loaded_data_len = (size_t)st.st_size;
1099 if (unlikely((uintmax_t)st.st_size != loaded_data_len)) {
1100 os_close(h);
1101 return;
1103 if (unlikely(loaded_data_len < sizeof(struct file_descriptor))) {
1104 warning("too short cache file");
1105 os_close(h);
1106 return;
1108 if (unlikely(!os_pread_all(h, cast_ptr(char *, &file_desc), sizeof(struct file_descriptor), st.st_size - sizeof(struct file_descriptor), &sink))) {
1109 os_close(h);
1110 return;
1112 if (unlikely(file_desc.cpu_feature_flags != cpu_feature_flags) ||
1113 unlikely(file_desc.privileged != ipret_is_privileged) ||
1114 unlikely(file_desc.profiling != profiling) ||
1115 unlikely(memcmp(file_desc.ajla_id, id, sizeof(id)))) {
1116 os_close(h);
1117 return;
1119 #ifdef USE_MMAP
1121 int prot_flags = PROT_READ
1122 #ifdef HAVE_CODEGEN
1123 | PROT_EXEC
1124 #endif
1126 void *ptr;
1127 #ifndef POINTER_COMPRESSION
1128 ptr = os_mmap(file_desc.base, loaded_data_len, prot_flags, MAP_PRIVATE, h, 0, &sink);
1129 /*debug("mapped: %p, %lx -> %p", file_desc.base, loaded_data_len, ptr);*/
1130 if (unlikely(ptr == MAP_FAILED))
1131 goto skip_mmap;
1132 if (unlikely(ptr != file_desc.base)) {
1133 /*debug("address mismatch");*/
1134 os_munmap(ptr, loaded_data_len, true);
1135 goto skip_mmap;
1137 loaded_data = ptr;
1138 loaded_data_mapped = true;
1139 #else
1140 if (unlikely(!amalloc_ptrcomp_try_reserve_range(file_desc.base, loaded_data_len))) {
1141 /*debug("amalloc_ptrcomp_try_reserve_range failed");*/
1142 goto skip_mmap;
1144 ptr = os_mmap(file_desc.base, loaded_data_len, prot_flags, MAP_PRIVATE | MAP_FIXED, h, 0, &sink);
1145 if (unlikely(ptr == MAP_FAILED)) {
1146 amalloc_run_free(file_desc.base, loaded_data_len);
1147 goto skip_mmap;
1149 if (unlikely(ptr != file_desc.base))
1150 internal(file_line, "save_load_cache: os_mmap(MAP_FIXED) returned different pointer: %p != %p", ptr, file_desc.base);
1151 loaded_data = ptr;
1152 loaded_data_amalloc = true;
1153 #endif
1154 os_close(h);
1155 goto verify_ret;
1157 skip_mmap:
1158 #endif
1159 loaded_data = mem_alloc_mayfail(char *, st.st_size, &sink);
1160 if (unlikely(!loaded_data)) {
1161 os_close(h);
1162 return;
1164 if (unlikely(!os_pread_all(h, loaded_data, st.st_size, 0, &sink))) {
1165 os_close(h);
1166 mem_free(loaded_data);
1167 loaded_data = NULL;
1168 return;
1170 os_close(h);
1171 #ifdef HAVE_CODEGEN
1172 #if defined(CODEGEN_USE_HEAP) || !defined(OS_HAS_MMAP)
1173 /*debug("adjusting pointers: %p, %p", loaded_data, loaded_data + loaded_data_len);*/
1174 adjust_pointers(loaded_data, loaded_data_len, ptr_to_num(loaded_data) - ptr_to_num(loaded_file_descriptor->base));
1175 os_code_invalidate_cache(cast_ptr(uint8_t *, loaded_data), loaded_data_len, true);
1176 #else
1178 void *new_ptr;
1179 new_ptr = amalloc_run_alloc(CODE_ALIGNMENT, loaded_data_len, false, false);
1180 if (unlikely(!new_ptr)) {
1181 unmap_loaded_data();
1182 return;
1184 memcpy(new_ptr, loaded_data, loaded_data_len);
1185 mem_free(loaded_data);
1186 loaded_data = new_ptr;
1187 /*debug("adjusting pointers: %p, %p", loaded_data, loaded_data + loaded_data_len);*/
1188 adjust_pointers(loaded_data, loaded_data_len, ptr_to_num(loaded_data) - ptr_to_num(loaded_file_descriptor->base));
1189 os_code_invalidate_cache(cast_ptr(uint8_t *, loaded_data), loaded_data_len, true);
1190 loaded_data_amalloc = true;
1192 #endif
1193 #endif
1194 /*adjust_pointers(loaded_data, loaded_data_len, 0);*/
1195 #ifdef USE_MMAP
1196 verify_ret:
1197 #endif
1198 if (unlikely(!dep_verify())) {
1199 unmap_loaded_data();
1200 return;
1202 #ifdef DEBUG
1204 size_t i;
1205 for (i = 0; i < loaded_file_descriptor->fn_descs_len; i++) {
1206 struct function_descriptor *fn_desc = &loaded_file_descriptor->fn_descs[i];
1207 struct data *dsc = fn_desc->data_saved_cache;
1208 size_t j, k;
1209 /*const struct module_designator *md = fn_desc->md;
1210 debug("content: %u:%.*s:%lu:%lu", md->path_idx, (int)md->path_len, md->path, fn_desc->fd->n_entries, (long)fn_desc->fd->entries[0]);*/
1211 if (i > 0) {
1212 int c = function_compare(loaded_file_descriptor->fn_descs[i - 1].md, loaded_file_descriptor->fn_descs[i - 1].fd, &loaded_file_descriptor->fn_descs[i]);
1213 if (unlikely(c >= 0))
1214 internal(file_line, "save_load_cache: misordered function descriptors: %d (%"PRIuMAX" / %"PRIuMAX")", c, (uintmax_t)i, (uintmax_t)loaded_file_descriptor->fn_descs_len);
1216 k = (size_t)da(dsc,saved_cache)->n_arguments + (size_t)da(dsc,saved_cache)->n_return_values;
1217 if (da(dsc,saved_cache)->n_entries) {
1218 for (j = 0; j < da(dsc,saved_cache)->n_entries - 1; j++) {
1219 pointer_t *p1 = &da(dsc,saved_cache)->pointers[j * k];
1220 pointer_t *p2 = &da(dsc,saved_cache)->pointers[(j + 1) * k];
1221 int c = compare_arguments(da(dsc,saved_cache)->n_arguments, p1, p2);
1222 if (unlikely(c >= 0) && c != DATA_COMPARE_OOM)
1223 internal(file_line, "save_load_cache: misordered cache entries: %d", c);
1228 #endif
1231 void name(save_init)(void)
1233 loaded_data = NULL;
1234 #ifdef USE_MMAP
1235 loaded_data_mapped = false;
1236 loaded_data_amalloc = false;
1237 #endif
1238 tree_init(&dependencies);
1239 dependencies_failed = false;
1240 mutex_init(&dependencies_mutex);
1241 save_load_cache();
1244 static void save_stream(void)
1246 ajla_error_t sink;
1247 char *file, *path;
1248 #ifdef USE_MMAP
1249 char *save_data_mapped;
1250 #endif
1251 path = os_get_directory_cache(&sink);
1252 if (unlikely(!path))
1253 return;
1254 file = save_get_file();
1255 if (!file) {
1256 mem_free(path);
1257 return;
1259 /*debug("writing file: '%s'", file);*/
1260 #ifdef USE_MMAP
1261 save_data_mapped = amalloc_run_alloc(1, save_len, false, true);
1262 /*debug("save_stream: %p, %llx", save_data_mapped, save_len);*/
1263 if (save_data_mapped) {
1264 memcpy(save_data_mapped, save_data, save_len);
1265 /*debug("adjusting pointers when saving");*/
1266 adjust_pointers(save_data_mapped, save_len, ptr_to_num(save_data_mapped));
1267 os_write_atomic(path, file, save_data_mapped, save_len, &sink);
1268 amalloc_run_free(save_data_mapped, save_len);
1269 } else
1270 #endif
1272 os_write_atomic(path, file, save_data, save_len, &sink);
1274 mem_free(path);
1275 mem_free(file);
1278 void name(save_done)(void)
1280 /*debug("1: save_data: %p, save_ok %d", save_data, save_ok);*/
1281 if (save_ok) {
1282 save_finish_file();
1284 free_position_tree(&position_tree);
1285 /*debug("2: save_data: %p, save_ok %d", save_data, save_ok);*/
1286 if (save_data) {
1287 if (save_ok) {
1288 save_stream();
1290 mem_free(save_data);
1292 if (fn_descs) {
1293 mem_free(fn_descs);
1295 unmap_loaded_data();
1296 while (!tree_is_empty(&dependencies)) {
1297 struct dependence *dep = get_struct(tree_any(&dependencies), struct dependence, entry);
1298 tree_delete(&dep->entry);
1299 mem_free(dep->fingerprint);
1300 mem_free(dep);
1302 mutex_done(&dependencies_mutex);
1305 #endif