ssa: don't swap the arguments of comparison operators
[ajla.git] / type.c
blob6c5f8da77b7f5279f3ce5d23595b4b88a5584cfb
1 /*
2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
9 * version.
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #include "ajla.h"
21 #include "mem_al.h"
22 #include "tree.h"
23 #include "rwlock.h"
24 #include "layout.h"
26 #include "type.h"
28 static const struct type builtin_types[TYPE_TAG_unknown + 1] = {
29 #define f(n, s, u, sz, bits) \
30 { TYPE_TAG_fixed + 2 * n, 0, 0, sizeof(s), align_of(s) },\
31 { TYPE_TAG_fixed + 2 * n + 1, 0, 0, sizeof(u), align_of(u) },
32 for_all_fixed(f)
33 #undef f
34 #define f(n, s, u, sz, bits) \
35 { TYPE_TAG_integer + n, 0, 0, sizeof(s), align_of(s), },
36 #define fn(n, s) \
37 { TYPE_TAG_N, 0, 0, 0, 0, },
38 for_all_int(f, fn)
39 #undef f
40 #undef fn
41 #define f(n, t, nt, pack, unpack)\
42 { TYPE_TAG_real + n, 0, n == 3, sizeof(t), align_of(t) },
43 #define fn(n, t) \
44 { TYPE_TAG_N, 0, 0, 0, 0, },
45 for_all_real(f, fn)
46 #undef f
47 #undef fn
48 { TYPE_TAG_flat_option, 0, 0, sizeof(ajla_flat_option_t), align_of(ajla_flat_option_t) },
49 { TYPE_TAG_unknown, 0, 0, 0, 1 },
52 const struct type *type_get_fixed(unsigned idx, bool uns)
54 if (unlikely(idx >= TYPE_FIXED_N + uzero))
55 return NULL;
56 return &builtin_types[TYPE_TAG_fixed + 2 * idx + (uns ? TYPE_TAG_fixed_unsigned : TYPE_TAG_fixed_signed)];
59 const struct type *type_get_int(unsigned idx)
61 if (unlikely(!(INT_MASK & (1U << idx)))) {
62 int i;
63 for (i = (int)idx + 1; i < TYPE_INT_N; i++) {
64 if (INT_MASK & (1U << i)) {
65 idx = (unsigned)i;
66 goto ret_type;
69 for (i = (int)idx - 1; i >= 0; i--) {
70 if (INT_MASK & (1U << i)) {
71 idx = (unsigned)i;
72 goto ret_type;
75 internal(file_line, "type_get_int: can't select integer type %u", idx);
77 ret_type:
78 return &builtin_types[TYPE_TAG_integer + idx];
81 const struct type *type_get_real(unsigned idx)
83 if (unlikely(!(REAL_MASK & (1U << idx)))) {
84 int i;
85 for (i = (int)idx + 1; i < TYPE_REAL_N; i++) {
86 if (REAL_MASK & (1U << i)) {
87 idx = (unsigned)i;
88 goto ret_type;
91 return NULL;
93 ret_type:
94 return &builtin_types[TYPE_TAG_real + idx];
97 const struct type *type_get_flat_option(void)
99 return &builtin_types[TYPE_TAG_flat_option];
102 const struct type *type_get_unknown(void)
104 return &builtin_types[TYPE_TAG_unknown];
107 const struct type *type_get_from_tag(type_tag_t tag)
109 ajla_assert_lo(tag < n_array_elements(builtin_types), (file_line, "type_get_from_tag: invalid tag %u", tag));
110 return &builtin_types[tag];
114 shared_var struct list record_list;
115 shared_var mutex_t record_list_mutex;
117 struct record_definition *type_alloc_record_definition(frame_t size, ajla_error_t *mayfail)
119 struct record_definition *def;
120 def = struct_alloc_array_mayfail(mem_calloc_mayfail, struct record_definition, types, size, mayfail);
121 if (unlikely(!def))
122 return NULL;
123 def->type = *type_get_unknown();
124 def->type.tag = TYPE_TAG_record;
125 mutex_lock(&record_list_mutex);
126 list_add(&record_list, &def->entry);
127 mutex_unlock(&record_list_mutex);
128 return def;
132 shared_var struct tree type_tree;
133 rwlock_decl(type_tree_mutex);
135 struct type_entry {
136 struct tree_entry entry;
137 union {
138 struct flat_type_head head;
139 struct flat_record_definition flat_record_definition;
140 struct flat_array_definition flat_array_definition;
141 } u;
144 static int type_entry_compare(const struct tree_entry *e1, uintptr_t e2)
146 const struct type_entry *t1 = get_struct(e1, struct type_entry, entry);
147 const struct type_entry *t2 = cast_ptr(const struct type_entry *, num_to_ptr(e2));
149 ajla_assert((t1->u.head.type.tag == TYPE_TAG_flat_record || t1->u.head.type.tag == TYPE_TAG_flat_array) &&
150 (t2->u.head.type.tag == TYPE_TAG_flat_record || t2->u.head.type.tag == TYPE_TAG_flat_array),
151 (file_line, "type_entry_compare: invalid type tags: %d, %d",
152 t1->u.head.type.tag == TYPE_TAG_flat_record,
153 t2->u.head.type.tag == TYPE_TAG_flat_record));
155 if (unlikely(t1->u.head.type.tag != t2->u.head.type.tag))
156 return (int)t1->u.head.type.tag - (int)t2->u.head.type.tag;
158 if (t1->u.head.type.tag == TYPE_TAG_flat_record) {
159 frame_t n_slots, i;
160 ajla_assert(t1->u.flat_record_definition.base->tag == TYPE_TAG_record &&
161 t2->u.flat_record_definition.base->tag == TYPE_TAG_record,
162 (file_line, "type_entry_compare: invalid record bases: %d, %d",
163 t1->u.flat_record_definition.base->tag,
164 t2->u.flat_record_definition.base->tag));
165 if (ptr_to_num(t1->u.flat_record_definition.base) < ptr_to_num(t2->u.flat_record_definition.base))
166 return -1;
167 if (ptr_to_num(t1->u.flat_record_definition.base) > ptr_to_num(t2->u.flat_record_definition.base))
168 return 1;
169 n_slots = flat_record_n_slots(&t1->u.flat_record_definition);
170 for (i = 0; i < n_slots; i++) {
171 if (ptr_to_num(t1->u.flat_record_definition.entries[i].subtype) < ptr_to_num(t2->u.flat_record_definition.entries[i].subtype))
172 return -1;
173 if (ptr_to_num(t1->u.flat_record_definition.entries[i].subtype) > ptr_to_num(t2->u.flat_record_definition.entries[i].subtype))
174 return 1;
176 return 0;
177 } else {
178 if (ptr_to_num(t1->u.flat_array_definition.base) < ptr_to_num(t2->u.flat_array_definition.base))
179 return -1;
180 if (ptr_to_num(t1->u.flat_array_definition.base) > ptr_to_num(t2->u.flat_array_definition.base))
181 return 1;
182 return (int)t1->u.flat_array_definition.n_elements - (int)t2->u.flat_array_definition.n_elements;
186 struct type_entry *type_prepare_flat_record(const struct type *base, ajla_error_t *mayfail)
188 frame_t n_slots;
189 struct type_entry *def;
191 if (unlikely(base->depth >= TYPE_MAX_DEPTH))
192 goto err_overflow;
194 n_slots = type_def(base,record)->n_slots;
195 def = struct_alloc_array_mayfail(mem_alloc_mayfail, struct type_entry, u.flat_record_definition.entries, n_slots, mayfail);
196 if (unlikely(!def))
197 return NULL;
199 def->u.flat_record_definition.type.tag = TYPE_TAG_flat_record;
200 def->u.flat_record_definition.type.extra_compare = 0;
201 def->u.flat_record_definition.base = base;
202 (void)memset(def->u.flat_record_definition.entries, 0, n_slots * sizeof(struct flat_record_definition_entry));
204 return def;
206 err_overflow:
207 fatal_mayfail(error_ajla(EC_ASYNC, AJLA_ERROR_SIZE_OVERFLOW), mayfail, "flat record too deep");
208 return NULL;
211 static frame_t flat_record_slot(struct type_entry *def, arg_t idx)
213 return record_definition_slot(type_def(def->u.flat_record_definition.base,record), idx);
216 void type_set_flat_record_entry(struct type_entry *def, arg_t idx, const struct type *subtype)
218 frame_t slot = flat_record_slot(def, idx);
219 ajla_assert_lo(!def->u.flat_record_definition.entries[slot].subtype, (file_line, "type_set_flat_record_entry: entry for index %"PRIuMAX" slot %"PRIuMAX" defined twice", (uintmax_t)idx, (uintmax_t)slot));
220 def->u.flat_record_definition.entries[slot].subtype = subtype;
221 if (subtype->extra_compare)
222 def->u.flat_record_definition.type.extra_compare = 1;
225 static bool type_flat_record_allocate(struct type_entry *def, ajla_error_t *mayfail)
227 arg_t i;
228 struct layout *l;
229 frame_t size, alignment;
230 unsigned char *usemap;
231 size_t u;
233 l = layout_start(0, -1, 1, 0, mayfail);
234 if (unlikely(!l))
235 goto err;
237 for (i = 0; i < flat_record_n_entries(&def->u.flat_record_definition); i++) {
238 frame_t slot = flat_record_slot(def, i);
239 const struct type *subtype = def->u.flat_record_definition.entries[slot].subtype;
240 ajla_assert_lo(subtype != NULL, (file_line, "type_flat_record_allocate: subtype for entry %"PRIuMAX" not set", (uintmax_t)i));
241 if (unlikely(!layout_add(l, subtype->size, subtype->align, mayfail)))
242 goto err_free_layout;
245 if (unlikely(!layout_compute(l, true, mayfail)))
246 goto err_free_layout;
248 size = layout_size(l);
249 alignment = layout_alignment(l);
250 if (unlikely(size != (flat_size_t)size) ||
251 unlikely(alignment != (flat_size_t)alignment))
252 goto err_overflow;
254 def->u.flat_record_definition.type.depth = def->u.flat_record_definition.base->depth + 1;
255 def->u.flat_record_definition.type.align = (flat_size_t)alignment;
256 size = round_up(size, alignment);
257 if (unlikely(!(flat_size_t)size))
258 goto err_overflow;
259 def->u.flat_record_definition.type.size = (flat_size_t)size;
261 usemap = mem_calloc_mayfail(unsigned char *, size * sizeof(unsigned char), mayfail);
262 if (unlikely(!usemap))
263 goto err_free_layout;
265 for (i = 0; i < flat_record_n_entries(&def->u.flat_record_definition); i++) {
266 frame_t slot = flat_record_slot(def, i);
267 const struct type *subtype = def->u.flat_record_definition.entries[slot].subtype;
268 flat_size_t offset = (flat_size_t)layout_get(l, i);
269 def->u.flat_record_definition.entries[slot].flat_offset = offset;
270 memset(usemap + offset, 1, subtype->size);
273 for (u = 0; u < size; u++) {
274 if (!usemap[u]) {
275 def->u.flat_record_definition.type.extra_compare = 1;
276 break;
280 mem_free(usemap);
282 layout_free(l);
283 return true;
285 err_overflow:
286 fatal_mayfail(error_ajla(EC_ASYNC, AJLA_ERROR_SIZE_OVERFLOW), mayfail, "flat record too large");
287 err_free_layout:
288 layout_free(l);
289 err:
290 return false;
293 const struct type *type_get_flat_record(struct type_entry *def, ajla_error_t *mayfail)
295 struct tree_entry *e;
296 struct tree_insert_position ins;
298 rwlock_lock_read(&type_tree_mutex);
299 e = tree_find(&type_tree, type_entry_compare, ptr_to_num(def));
300 if (likely(e != NULL)) {
301 rwlock_unlock_read(&type_tree_mutex);
302 type_free_flat_record(def);
303 return &get_struct(e, struct type_entry, entry)->u.flat_record_definition.type;
305 rwlock_unlock_read(&type_tree_mutex);
307 if (unlikely(!type_flat_record_allocate(def, mayfail))) {
308 type_free_flat_record(def);
309 return NULL;
312 rwlock_lock_write(&type_tree_mutex);
313 e = tree_find_for_insert(&type_tree, type_entry_compare, ptr_to_num(def), &ins);
314 if (unlikely(e != NULL)) {
315 rwlock_unlock_write(&type_tree_mutex);
316 type_free_flat_record(def);
317 return &get_struct(e, struct type_entry, entry)->u.flat_record_definition.type;
319 tree_insert_after_find(&def->entry, &ins);
320 rwlock_unlock_write(&type_tree_mutex);
322 return &def->u.flat_record_definition.type;
325 void type_free_flat_record(struct type_entry *def)
327 mem_free(def);
330 const struct type *type_get_flat_array(const struct type *base, pcode_t n_elements, ajla_error_t *mayfail)
332 struct type_entry ae;
333 struct type_entry *ap;
334 struct tree_entry *e;
335 struct tree_insert_position ins;
336 flat_size_t size;
338 if (unlikely(n_elements != (pcode_t)(flat_size_t)n_elements))
339 goto err_overflow;
341 memset(&ae, 0, sizeof ae); /* avoid warning */
342 ae.u.flat_array_definition.type.tag = TYPE_TAG_flat_array;
343 ae.u.flat_array_definition.base = base;
344 ae.u.flat_array_definition.n_elements = (flat_size_t)n_elements;
346 rwlock_lock_read(&type_tree_mutex);
347 e = tree_find(&type_tree, type_entry_compare, ptr_to_num(&ae));
348 if (likely(e != NULL)) {
349 rwlock_unlock_read(&type_tree_mutex);
350 return &get_struct(e, struct type_entry, entry)->u.flat_array_definition.type;
352 rwlock_unlock_read(&type_tree_mutex);
354 if (unlikely(base->depth >= TYPE_MAX_DEPTH))
355 goto err_overflow;
357 size = (flat_size_t)((uintmax_t)base->size * (uintmax_t)n_elements);
358 if (n_elements && unlikely(size / n_elements != base->size))
359 goto err_overflow;
361 ap = mem_alloc_mayfail(struct type_entry *, partial_sizeof(struct type_entry, u.flat_array_definition), mayfail);
362 ap->u.flat_array_definition.type.tag = TYPE_TAG_flat_array;
363 ap->u.flat_array_definition.type.depth = base->depth + 1;
364 ap->u.flat_array_definition.type.extra_compare = base->extra_compare;
365 ap->u.flat_array_definition.type.size = size;
366 ap->u.flat_array_definition.type.align = base->align; /* !!! TODO: use higher align for SIMD */
367 ap->u.flat_array_definition.base = base;
368 ap->u.flat_array_definition.n_elements = (flat_size_t)n_elements;
370 rwlock_lock_write(&type_tree_mutex);
371 e = tree_find_for_insert(&type_tree, type_entry_compare, ptr_to_num(&ae), &ins);
372 if (unlikely(e != NULL)) {
373 rwlock_unlock_write(&type_tree_mutex);
374 mem_free(ap);
375 return &get_struct(e, struct type_entry, entry)->u.flat_array_definition.type;
377 tree_insert_after_find(&ap->entry, &ins);
378 rwlock_unlock_write(&type_tree_mutex);
380 return &ap->u.flat_array_definition.type;
382 err_overflow:
383 fatal_mayfail(error_ajla(EC_ASYNC, AJLA_ERROR_SIZE_OVERFLOW), mayfail, "flat array too large");
384 return NULL;
388 int type_memcmp(const unsigned char *flat1, const unsigned char *flat2, const struct type *type, size_t n)
390 size_t i;
391 if (likely(!type->extra_compare))
392 return memcmp(flat1, flat2, type->size * n);
393 for (i = 0; i < n; i++, flat1 += type->size, flat2 += type->size) {
394 int c;
395 if (TYPE_TAG_IS_REAL(type->tag) && TYPE_TAG_IDX_REAL(type->tag) == 3) {
396 c = memcmp(flat1, flat2, 10);
397 if (c)
398 return c;
399 } else if (type->tag == TYPE_TAG_flat_record) {
400 struct flat_record_definition *fd = type_def(type,flat_record);
401 struct record_definition *rec = type_def(fd->base,record);
402 arg_t i;
403 for (i = 0; i < rec->n_entries; i++) {
404 frame_t f = rec->idx_to_frame[i];
405 struct flat_record_definition_entry *fde;
406 if (unlikely(f == NO_FRAME_T))
407 continue;
408 fde = &fd->entries[f];
409 c = type_memcmp(flat1 + fde->flat_offset, flat2 + fde->flat_offset, fde->subtype, 1);
410 if (c)
411 return c;
413 } else if (type->tag == TYPE_TAG_flat_array) {
414 struct flat_array_definition *fd = type_def(type,flat_array);
415 c = type_memcmp(flat1, flat2, fd->base, fd->n_elements);
416 if (c)
417 return c;
418 } else {
419 internal(file_line, "type_memcmp: invalid type tag %u", type->tag);
422 return 0;
426 void type_init(void)
428 list_init(&record_list);
429 mutex_init(&record_list_mutex);
430 tree_init(&type_tree);
431 rwlock_init(&type_tree_mutex);
434 void type_done(void)
436 while (!tree_is_empty(&type_tree)) {
437 struct type_entry *t = get_struct(tree_any(&type_tree), struct type_entry, entry);
438 tree_delete(&t->entry);
439 mem_free(t);
441 rwlock_done(&type_tree_mutex);
442 while (!list_is_empty(&record_list)) {
443 struct record_definition *def = get_struct(record_list.prev, struct record_definition, entry);
444 list_del(&def->entry);
445 if (def->idx_to_frame)
446 mem_free(def->idx_to_frame);
447 mem_free(def);
449 mutex_done(&record_list_mutex);