2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #define frame_offs(x) ((ssize_t)offsetof(struct frame_struct, x) - (ssize_t)frame_offset)
21 #if defined(C_LITTLE_ENDIAN)
22 #define lo_word(size) (0)
23 #define hi_word(size) ((size_t)1 << (size))
24 #elif defined(C_BIG_ENDIAN)
25 #define lo_word(size) ((size_t)1 << (size))
26 #define hi_word(size) (0)
32 static bool attr_w gen_frame_load_raw(struct codegen_context *ctx, unsigned size, enum extend ex, frame_t slot, int64_t offset, bool dual, unsigned reg);
33 static bool attr_w gen_frame_store_raw(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, unsigned reg);
36 static const struct type *get_type_of_local(struct codegen_context *ctx, frame_t pos)
39 const struct data *function = ctx->fn;
40 t = da(function,function)->local_variables[pos].type;
42 TYPE_TAG_VALIDATE(t->tag);
46 static unsigned real_type_to_op_size(unsigned real_type)
49 case 0: return OP_SIZE_2;
50 case 1: return OP_SIZE_4;
51 case 2: return OP_SIZE_8;
52 case 3: return OP_SIZE_10;
53 case 4: return OP_SIZE_16;
55 internal(file_line, "real_type_to_op_size: invalid type %u", real_type);
60 static unsigned spill_size(const struct type *t)
62 if (!TYPE_TAG_IS_BUILTIN(t->tag)) {
64 } else if (TYPE_TAG_IS_REAL(t->tag)) {
65 return real_type_to_op_size(TYPE_TAG_IDX_REAL(t->tag));
67 return log_2(t->size);
71 static bool attr_w spill(struct codegen_context *ctx, frame_t v)
73 const struct type *t = get_type_of_local(ctx, v);
74 g(gen_frame_store_raw(ctx, spill_size(t), v, 0, ctx->registers[v]));
78 static bool attr_w unspill(struct codegen_context *ctx, frame_t v)
80 const struct type *t = get_type_of_local(ctx, v);
81 enum extend ex = garbage;
82 if (t->tag == TYPE_TAG_flat_option)
84 else if (!TYPE_IS_FLAT(t))
86 g(gen_frame_load_raw(ctx, spill_size(t), ex, v, 0, false, ctx->registers[v]));
91 static bool attr_w gen_frame_address(struct codegen_context *ctx, frame_t slot, int64_t offset, unsigned reg)
93 offset += (size_t)slot * slot_size;
94 g(gen_3address_alu_imm(ctx, i_size(OP_SIZE_ADDRESS), ALU_ADD, reg, R_FRAME, offset, 0));
98 static int64_t get_constant(unsigned size, enum extend ex, frame_t slot, int64_t offset, bool dual)
103 ex = ARCH_PREFERS_SX(size) ? sign_x : zero_x;
106 if (unlikely(offset != 0))
108 c = frame_t_get_const(slot);
110 if (size == OP_SIZE_1) c &= 0xFFUL;
111 if (size == OP_SIZE_2) c &= 0xFFFFUL;
112 if (size == OP_SIZE_4) c &= 0xFFFFFFFFUL;
116 if (offset == (int64_t)lo_word(size))
117 return frame_t_get_const(slot);
118 if (offset == (int64_t)hi_word(size))
119 return frame_t_get_const(slot) < 0 ? -1 : 0;
121 internal(file_line, "get_constant: invalid offset %"PRIdMAX"", (intmax_t)offset);
124 static bool attr_w gen_frame_load_raw(struct codegen_context *ctx, unsigned size, enum extend ex, frame_t slot, int64_t offset, bool dual, unsigned reg)
127 if (frame_t_is_const(slot)) {
128 g(gen_load_constant(ctx, reg, get_constant(size, ex, slot, offset, dual)));
131 if (ex == garbage || ex == native) {
133 ex = ARCH_PREFERS_SX(size) ? sign_x : zero_x;
137 x_offset = offset + (size_t)slot * slot_size;
138 if (!ARCH_HAS_BWX && size < OP_SIZE_4) {
139 g(gen_address(ctx, R_FRAME, x_offset, reg_is_fp(reg) ? IMM_PURPOSE_VLDR_VSTR_OFFSET : IMM_PURPOSE_LDR_SX_OFFSET, OP_SIZE_4));
140 gen_insn(INSN_MOVSX, OP_SIZE_4, 0, 0);
142 gen_address_offset();
144 g(gen_extend(ctx, size, ex, reg, reg));
148 #if defined(ARCH_ALPHA)
149 if (size < OP_SIZE_4) {
150 g(gen_address(ctx, R_FRAME, x_offset, reg_is_fp(reg) ? IMM_PURPOSE_VLDR_VSTR_OFFSET : IMM_PURPOSE_LDR_OFFSET, size));
151 gen_insn(INSN_MOV, size, 0, 0);
153 gen_address_offset();
156 g(gen_extend(ctx, size, ex, reg, reg));
160 if (size == OP_SIZE_4 && !reg_is_fp(reg) && ex == zero_x) {
161 g(gen_frame_load_raw(ctx, size, sign_x, slot, offset, false, reg));
162 g(gen_extend(ctx, size, ex, reg, reg));
167 #if defined(ARCH_MIPS)
168 if (reg_is_fp(reg) && size == OP_SIZE_8 && !MIPS_HAS_LS_DOUBLE) {
169 #if defined(C_LITTLE_ENDIAN)
170 g(gen_frame_load_raw(ctx, OP_SIZE_4, zero_x, slot, offset, true, reg));
171 g(gen_frame_load_raw(ctx, OP_SIZE_4, zero_x, slot, offset + 4, true, reg + 1));
173 g(gen_frame_load_raw(ctx, OP_SIZE_4, zero_x, slot, offset, true, reg + 1));
174 g(gen_frame_load_raw(ctx, OP_SIZE_4, zero_x, slot, offset + 4, true, reg));
179 #if defined(ARCH_IA64) || defined(ARCH_PARISC)
181 g(gen_address(ctx, R_FRAME, x_offset, IMM_PURPOSE_LDR_OFFSET, size));
182 gen_insn(INSN_MOV, size, 0, 0);
184 gen_address_offset();
186 g(gen_extend(ctx, size, ex, reg, reg));
191 #if defined(ARCH_POWER)
192 if (size == OP_SIZE_1 && ex == sign_x) {
193 g(gen_address(ctx, R_FRAME, x_offset, IMM_PURPOSE_LDR_OFFSET, size));
194 gen_insn(INSN_MOV, size, 0, 0);
196 gen_address_offset();
198 g(gen_extend(ctx, size, ex, reg, reg));
203 #if defined(ARCH_S390)
204 if (size == OP_SIZE_1 && !cpu_test_feature(CPU_FEATURE_long_displacement)) {
205 g(gen_address(ctx, R_FRAME, x_offset, IMM_PURPOSE_LDR_OFFSET, size));
206 gen_insn(INSN_MOV_MASK, OP_SIZE_NATIVE, MOV_MASK_0_8, 0);
209 gen_address_offset();
211 g(gen_extend(ctx, size, ex, reg, reg));
215 if (size == OP_SIZE_16 && reg_is_fp(reg)) {
216 g(gen_frame_load_raw(ctx, OP_SIZE_8, zero_x, 0, x_offset, true, reg));
217 g(gen_frame_load_raw(ctx, OP_SIZE_8, zero_x, 0, x_offset + 8, true, reg + 2));
222 g(gen_address(ctx, R_FRAME, x_offset, reg_is_fp(reg) ? IMM_PURPOSE_VLDR_VSTR_OFFSET : ex ? IMM_PURPOSE_LDR_SX_OFFSET : IMM_PURPOSE_LDR_OFFSET, size));
223 gen_insn(unlikely(ex == sign_x) ? INSN_MOVSX : INSN_MOV, size, 0, 0);
225 gen_address_offset();
230 static bool attr_w gen_frame_load(struct codegen_context *ctx, unsigned size, enum extend ex, frame_t slot, int64_t offset, bool dual, unsigned reg)
232 ajla_assert_lo((slot >= MIN_USEABLE_SLOT && slot < function_n_variables(ctx->fn)) || frame_t_is_const(slot), (file_line, "gen_frame_load: invalid slot: %lu >= %lu", (unsigned long)slot, (unsigned long)function_n_variables(ctx->fn)));
233 if (slot_is_register(ctx, slot)) {
234 if (unlikely(offset != 0))
235 internal(file_line, "gen_frame_load: offset is non-zero: %"PRIdMAX"", (intmax_t)offset);
236 if (ex != garbage && size < OP_SIZE_NATIVE && !reg_is_fp(reg)) {
237 g(gen_extend(ctx, size, ex, reg, ctx->registers[slot]));
240 g(gen_mov(ctx, !reg_is_fp(reg) ? OP_SIZE_NATIVE : size, reg, ctx->registers[slot]));
244 g(gen_frame_load_raw(ctx, size, ex, slot, offset, dual, reg));
247 if (size < OP_SIZE_NATIVE && ex == garbage) {
249 g(gen_extend(ctx, size, zero_x, reg, reg));
250 mask = (rand()) | ((uint64_t)rand() << 31) | ((uint64_t)rand() << 62);
251 mask <<= 8ULL << size;
252 g(gen_imm(ctx, mask, IMM_PURPOSE_OR, OP_SIZE_NATIVE));
253 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_OR, ALU_WRITES_FLAGS(ALU_OR, false));
262 static bool attr_w gen_frame_get(struct codegen_context *ctx, unsigned size, enum extend ex, frame_t slot, unsigned reg, unsigned *dest)
264 const struct type *t;
265 if (frame_t_is_const(slot)) {
266 g(gen_frame_load(ctx, size, ex, slot, 0, false, reg));
270 t = get_type_of_local(ctx, slot);
271 ajla_assert_lo(slot >= MIN_USEABLE_SLOT && slot < function_n_variables(ctx->fn), (file_line, "gen_frame_get: invalid slot: %lu >= %lu", (unsigned long)slot, (unsigned long)function_n_variables(ctx->fn)));
272 if (slot_is_register(ctx, slot)) {
273 unsigned reg = ctx->registers[slot];
274 if (ex != garbage && size < OP_SIZE_NATIVE && !reg_is_fp(reg)) {
275 if (t->tag == TYPE_TAG_flat_option && size <= ARCH_BOOL_SIZE)
277 g(gen_extend(ctx, size, ex, reg, reg));
284 g(gen_frame_load(ctx, size, ex, slot, 0, false, reg));
287 if (size < OP_SIZE_NATIVE && ex == garbage && t->tag != TYPE_TAG_flat_option) {
289 g(gen_extend(ctx, size, zero_x, *dest, *dest));
290 mask = (rand()) | ((uint64_t)rand() << 31) | ((uint64_t)rand() << 62);
291 mask <<= 8ULL << size;
292 g(gen_imm(ctx, mask, IMM_PURPOSE_OR, OP_SIZE_NATIVE));
293 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_OR, ALU_WRITES_FLAGS(ALU_OR, false));
302 #if defined(ARCH_X86)
303 static bool attr_w gen_frame_load_x87(struct codegen_context *ctx, unsigned insn, unsigned size, unsigned alu, frame_t slot)
305 if (slot_is_register(ctx, slot))
307 g(gen_address(ctx, R_FRAME, (size_t)slot * slot_size, IMM_PURPOSE_LDR_OFFSET, size));
308 gen_insn(insn, size, alu, 0);
309 gen_address_offset();
313 static bool attr_w gen_frame_store_x87(struct codegen_context *ctx, unsigned insn, unsigned size, frame_t slot)
315 g(gen_address(ctx, R_FRAME, (size_t)slot * slot_size, IMM_PURPOSE_STR_OFFSET, size));
316 gen_insn(insn, size, 0, 0);
317 gen_address_offset();
318 if (slot_is_register(ctx, slot))
319 g(unspill(ctx,slot));
324 static bool attr_w gen_frame_load_op(struct codegen_context *ctx, unsigned size, enum extend ex, unsigned alu, unsigned writes_flags, frame_t slot, int64_t offset, bool dual, unsigned reg)
326 ajla_assert_lo((slot >= MIN_USEABLE_SLOT && slot < function_n_variables(ctx->fn)) || frame_t_is_const(slot), (file_line, "gen_frame_load_op: invalid slot: %lu >= %lu", (unsigned long)slot, (unsigned long)function_n_variables(ctx->fn)));
327 if (slot_is_register(ctx, slot)) {
328 if (size != i_size(size) + (unsigned)zero && ex != garbage)
330 g(gen_3address_alu(ctx, i_size(size), alu, reg, reg, ctx->registers[slot], writes_flags));
333 if (frame_t_is_const(slot)) {
334 g(gen_imm(ctx, get_constant(size, ex, slot, offset, dual), alu_purpose(alu), i_size(size)));
335 gen_insn(INSN_ALU + ARCH_PARTIAL_ALU(size), i_size(size), alu, ALU_WRITES_FLAGS(alu, is_imm()) | writes_flags);
341 #if defined(ARCH_X86) || defined(ARCH_S390)
342 #if defined(ARCH_S390)
343 if (size >= OP_SIZE_4)
346 offset += (size_t)slot * slot_size;
347 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDR_OFFSET, size));
348 gen_insn(INSN_ALU + ARCH_PARTIAL_ALU(size), size, alu, (alu == ALU_MUL ? ALU_WRITES_FLAGS(alu, false) : 1) | writes_flags);
351 gen_address_offset();
356 #if defined(R_SCRATCH_NA_1)
357 g(gen_frame_load(ctx, size, ex, slot, offset, dual, R_SCRATCH_NA_1));
358 g(gen_3address_alu(ctx, i_size(size), alu, reg, reg, R_SCRATCH_NA_1, writes_flags));
363 static bool attr_w attr_unused gen_frame_load_op1(struct codegen_context *ctx, unsigned size, unsigned alu, unsigned writes_flags, frame_t slot, unsigned reg)
365 ajla_assert_lo(slot >= MIN_USEABLE_SLOT && slot < function_n_variables(ctx->fn), (file_line, "gen_frame_load_op1: invalid slot: %lu >= %lu", (unsigned long)slot, (unsigned long)function_n_variables(ctx->fn)));
366 if (slot_is_register(ctx, slot)) {
367 g(gen_2address_alu1(ctx, size, alu, reg, ctx->registers[slot], writes_flags));
370 #if defined(ARCH_X86)
372 int64_t offset = (size_t)slot * slot_size;
373 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDR_OFFSET, size));
374 gen_insn(INSN_ALU1 + ARCH_PARTIAL_ALU(size), size, alu, ALU1_WRITES_FLAGS(alu) | writes_flags);
376 gen_address_offset();
380 #if !defined(ARCH_X86)
381 g(gen_frame_load(ctx, size, garbage, slot, 0, false, reg));
382 g(gen_2address_alu1(ctx, size, alu, reg, reg, writes_flags));
388 static bool attr_w gen_frame_load_cmp(struct codegen_context *ctx, unsigned size, bool logical, enum extend attr_unused ex, bool swap, frame_t slot, int64_t offset, bool dual, unsigned reg)
390 if (slot_is_register(ctx, slot)) {
391 if (size != i_size_cmp(size) + (unsigned)zero && ex != garbage)
393 gen_insn(INSN_CMP, i_size_cmp(size), 0, 1 + logical);
396 gen_one(ctx->registers[slot]);
398 gen_one(ctx->registers[slot]);
403 if (frame_t_is_const(slot)) {
405 g(gen_imm(ctx, get_constant(size, ex, slot, offset, dual), !logical ? IMM_PURPOSE_CMP : IMM_PURPOSE_CMP_LOGICAL, size));
406 gen_insn(INSN_CMP, i_size_cmp(size), 0, 1 + logical);
410 g(gen_load_constant(ctx, R_CONST_IMM, get_constant(size, ex, slot, offset, dual)));
411 gen_insn(INSN_CMP, i_size_cmp(size), 0, 1 + logical);
412 gen_one(R_CONST_IMM);
417 #if defined(ARCH_S390) || defined(ARCH_X86)
418 #if defined(ARCH_S390)
419 if (size < OP_SIZE_4)
422 offset += (size_t)slot * slot_size;
423 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDR_OFFSET, size));
424 gen_insn(INSN_CMP, size, 0, 1 + logical);
427 gen_address_offset();
429 gen_address_offset();
435 #if defined(R_SCRATCH_NA_1)
436 g(gen_frame_load(ctx, size, ex, slot, offset, false, R_SCRATCH_NA_1));
437 gen_insn(INSN_CMP, i_size_cmp(size), 0, 1 + logical);
440 gen_one(R_SCRATCH_NA_1);
442 gen_one(R_SCRATCH_NA_1);
449 static bool attr_w gen_frame_load_cmp_imm(struct codegen_context *ctx, unsigned size, bool logical, enum extend attr_unused ex, frame_t slot, int64_t offset, int64_t value)
451 if (slot_is_register(ctx, slot)) {
452 #if defined(ARCH_X86)
453 g(gen_imm(ctx, value, logical ? IMM_PURPOSE_CMP_LOGICAL : IMM_PURPOSE_CMP, size));
454 gen_insn(INSN_CMP, size, 0, 1 + logical);
455 gen_one(ctx->registers[slot]);
458 if (size != i_size(size) + (unsigned)zero && size < OP_SIZE_4 && ex != garbage)
460 g(gen_imm(ctx, value, logical ? IMM_PURPOSE_CMP_LOGICAL : IMM_PURPOSE_CMP, size));
461 gen_insn(INSN_CMP, i_size_cmp(size), 0, 1 + logical);
462 gen_one(ctx->registers[slot]);
467 #if defined(ARCH_X86)
468 offset += (size_t)slot * slot_size;
469 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_MVI_CLI_OFFSET, size));
470 g(gen_imm(ctx, value, logical ? IMM_PURPOSE_CMP_LOGICAL : IMM_PURPOSE_CMP, size));
471 gen_insn(INSN_CMP, size, 0, 1 + logical);
472 gen_address_offset();
476 #if defined(ARCH_S390)
477 if (size != OP_SIZE_1 || !logical)
479 offset += (size_t)slot * slot_size;
480 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_MVI_CLI_OFFSET, size));
481 gen_insn(INSN_CMP, size, 0, 1 + logical);
482 gen_address_offset();
484 gen_eight((int8_t)value);
487 #if defined(R_SCRATCH_NA_1)
490 g(gen_frame_load(ctx, size, ex, slot, offset, false, R_SCRATCH_NA_1));
491 g(gen_imm(ctx, value, logical ? IMM_PURPOSE_CMP_LOGICAL : IMM_PURPOSE_CMP, size));
492 gen_insn(INSN_CMP, i_size(size), 0, 1 + logical);
493 gen_one(R_SCRATCH_NA_1);
500 static bool attr_w gen_frame_load_2(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, unsigned reg1, unsigned reg2)
502 if (frame_t_is_const(slot))
504 #if defined(ARCH_ARM64)
505 offset += (size_t)slot * slot_size;
506 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDP_STP_OFFSET, size));
507 gen_insn(INSN_LDP, size, 0, 0);
510 gen_address_offset();
513 #if defined(ARCH_ARM32)
514 if (likely(!(reg1 & 1)) && likely(reg2 == reg1 + 1) && likely(cpu_test_feature(CPU_FEATURE_armv6)))
515 #elif defined(ARCH_SPARC32)
516 if (likely(!(reg2 & 1)) && likely(reg1 == reg2 + 1))
517 #elif defined(ARCH_S390)
518 if (likely(reg1 == reg2 + 1))
523 offset += (size_t)slot * slot_size;
524 if (UNALIGNED_TRAP) {
525 if (unlikely((offset & ((2U << size) - 1)) != 0)) {
526 offset -= (size_t)slot * slot_size;
530 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDP_STP_OFFSET, size));
531 gen_insn(INSN_LDP, size, 0, 0);
534 gen_address_offset();
539 g(gen_frame_load(ctx, size, garbage, slot, offset + lo_word(size), true, reg1));
540 g(gen_frame_load(ctx, size, garbage, slot, offset + hi_word(size), true, reg2));
544 static bool attr_w gen_frame_store_raw(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, unsigned reg)
546 offset += (size_t)slot * slot_size;
548 size = maximum(OP_SIZE_4, size);
549 #if defined(ARCH_MIPS)
550 if (reg_is_fp(reg) && size == OP_SIZE_8 && !MIPS_HAS_LS_DOUBLE) {
551 #if defined(C_LITTLE_ENDIAN)
552 g(gen_frame_store_raw(ctx, OP_SIZE_4, 0, offset, reg));
553 g(gen_frame_store_raw(ctx, OP_SIZE_4, 0, offset + 4, reg + 1));
555 g(gen_frame_store_raw(ctx, OP_SIZE_4, 0, offset, reg + 1));
556 g(gen_frame_store_raw(ctx, OP_SIZE_4, 0, offset + 4, reg));
561 #if defined(ARCH_S390)
562 if (size == OP_SIZE_16 && reg_is_fp(reg)) {
563 g(gen_frame_store_raw(ctx, OP_SIZE_8, 0, offset, reg));
564 g(gen_frame_store_raw(ctx, OP_SIZE_8, 0, offset + 8, reg + 2));
568 g(gen_address(ctx, R_FRAME, offset, reg_is_fp(reg) ? IMM_PURPOSE_VLDR_VSTR_OFFSET : IMM_PURPOSE_STR_OFFSET, size));
569 gen_insn(INSN_MOV, size, 0, 0);
570 gen_address_offset();
575 static bool attr_w gen_frame_store(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, unsigned reg)
577 ajla_assert_lo(slot >= MIN_USEABLE_SLOT && slot < function_n_variables(ctx->fn), (file_line, "gen_frame_store: invalid slot: %lu >= %lu", (unsigned long)slot, (unsigned long)function_n_variables(ctx->fn)));
578 if (slot_is_register(ctx, slot)) {
579 if (unlikely(offset != 0))
580 internal(file_line, "gen_frame_store: offset is non-zero: %"PRIdMAX"", (intmax_t)offset);
581 g(gen_mov(ctx, !reg_is_fp(reg) ? OP_SIZE_NATIVE : size, ctx->registers[slot], reg));
584 return gen_frame_store_raw(ctx, size, slot, offset, reg);
587 static unsigned gen_frame_target(struct codegen_context *ctx, frame_t slot_r, frame_t slot_na_1, frame_t slot_na_2, unsigned reg)
589 if (slot_is_register(ctx, slot_r)) {
590 short d = ctx->registers[slot_r];
591 if (slot_na_1 != NO_FRAME_T && slot_is_register(ctx, slot_na_1) && ctx->registers[slot_na_1] == d)
593 if (slot_na_2 != NO_FRAME_T && slot_is_register(ctx, slot_na_2) && ctx->registers[slot_na_2] == d)
600 static bool attr_w gen_frame_store_2(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, unsigned reg1, unsigned reg2)
602 #if defined(ARCH_ARM64)
603 offset += (size_t)slot * slot_size;
604 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDP_STP_OFFSET, size));
605 gen_insn(INSN_STP, size, 0, 0);
606 gen_address_offset();
611 #if defined(ARCH_ARM32)
612 if (likely(!(reg1 & 1)) && likely(reg2 == reg1 + 1) && likely(cpu_test_feature(CPU_FEATURE_armv6)))
613 #elif defined(ARCH_SPARC32)
614 if (likely(!(reg2 & 1)) && likely(reg1 == reg2 + 1))
615 #elif defined(ARCH_S390)
616 if (likely(reg1 == reg2 + 1))
621 offset += (size_t)slot * slot_size;
622 if (UNALIGNED_TRAP) {
623 if (unlikely((offset & ((2U << size) - 1)) != 0)) {
624 offset -= (size_t)slot * slot_size;
628 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_LDP_STP_OFFSET, size));
629 gen_insn(INSN_STP, size, 0, 0);
630 gen_address_offset();
637 g(gen_frame_store(ctx, size, slot, offset + lo_word(size), reg1));
638 g(gen_frame_store(ctx, size, slot, offset + hi_word(size), reg2));
642 static bool attr_w gen_frame_store_imm_raw(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, int64_t imm)
644 offset += (size_t)slot * slot_size;
646 size = maximum(OP_SIZE_4, size);
647 g(gen_address(ctx, R_FRAME, offset, size == OP_SIZE_1 ? IMM_PURPOSE_MVI_CLI_OFFSET : IMM_PURPOSE_STR_OFFSET, size));
648 g(gen_imm(ctx, imm, IMM_PURPOSE_STORE_VALUE, size));
649 gen_insn(INSN_MOV, size, 0, 0);
650 gen_address_offset();
655 static bool attr_w gen_frame_store_imm(struct codegen_context *ctx, unsigned size, frame_t slot, int64_t offset, int64_t imm)
657 ajla_assert_lo(slot >= MIN_USEABLE_SLOT && slot < function_n_variables(ctx->fn), (file_line, "gen_frame_store_imm: invalid slot: %lu >= %lu", (unsigned long)slot, (unsigned long)function_n_variables(ctx->fn)));
658 if (slot_is_register(ctx, slot)) {
659 if (unlikely(offset != 0))
660 internal(file_line, "gen_frame_store_imm: offset is non-zero: %"PRIdMAX"", (intmax_t)offset);
661 if (size == OP_SIZE_1)
662 imm = ARCH_PREFERS_SX(size) ? (int64_t)(int8_t)imm : (int64_t)(uint8_t)imm;
663 if (size == OP_SIZE_2)
664 imm = ARCH_PREFERS_SX(size) ? (int64_t)(int16_t)imm : (int64_t)(uint16_t)imm;
665 if (size == OP_SIZE_4)
666 imm = ARCH_PREFERS_SX(size) ? (int64_t)(int32_t)imm : (int64_t)(uint32_t)imm;
667 g(gen_load_constant(ctx, ctx->registers[slot], imm));
670 return gen_frame_store_imm_raw(ctx, size, slot, offset, imm);
673 static bool attr_w gen_frame_clear_raw(struct codegen_context *ctx, unsigned size, frame_t slot)
675 g(gen_frame_store_imm_raw(ctx, size, slot, 0, 0));
679 static bool attr_w gen_frame_clear(struct codegen_context *ctx, unsigned size, frame_t slot)
681 g(gen_frame_store_imm(ctx, size, slot, 0, 0));
682 if (slot_is_register(ctx, slot))
687 #if defined(ARCH_X86)
688 static bool attr_w gen_frame_set_cond(struct codegen_context *ctx, unsigned attr_unused size, bool attr_unused logical, unsigned cond, frame_t slot)
691 if (slot_is_register(ctx, slot)) {
692 unsigned reg = ctx->registers[slot];
693 #if defined(ARCH_X86_32)
695 gen_insn(INSN_SET_COND_PARTIAL, OP_SIZE_1, cond, 0);
696 gen_one(R_SCRATCH_1);
697 gen_one(R_SCRATCH_1);
699 g(gen_mov(ctx, OP_SIZE_1, reg, R_SCRATCH_1));
703 gen_insn(INSN_SET_COND_PARTIAL, OP_SIZE_1, cond, 0);
707 if (sizeof(ajla_flat_option_t) > 1) {
708 g(gen_mov(ctx, OP_SIZE_1, reg, reg));
713 offset = (size_t)slot * slot_size;
714 if (sizeof(ajla_flat_option_t) > 1) {
715 gen_insn(INSN_SET_COND_PARTIAL, OP_SIZE_1, cond, 0);
716 gen_one(R_SCRATCH_1);
717 gen_one(R_SCRATCH_1);
719 g(gen_mov(ctx, OP_SIZE_1, R_SCRATCH_1, R_SCRATCH_1));
721 g(gen_frame_store(ctx, log_2(sizeof(ajla_flat_option_t)), slot, 0, R_SCRATCH_1));
723 g(gen_address(ctx, R_FRAME, offset, IMM_PURPOSE_STR_OFFSET, OP_SIZE_1));
724 gen_insn(INSN_SET_COND, OP_SIZE_1, cond, 0);
725 gen_address_offset();
729 #elif defined(ARCH_ARM64)
730 static bool attr_w gen_frame_set_cond(struct codegen_context *ctx, unsigned attr_unused size, bool attr_unused logical, unsigned cond, frame_t slot)
732 if (slot_is_register(ctx, slot)) {
733 gen_insn(INSN_SET_COND, OP_SIZE_4, cond, 0);
734 gen_one(ctx->registers[slot]);
736 gen_insn(INSN_SET_COND, OP_SIZE_4, cond, 0);
737 gen_one(R_SCRATCH_1);
738 g(gen_frame_store(ctx, log_2(sizeof(ajla_flat_option_t)), slot, 0, R_SCRATCH_1));
743 static bool attr_w gen_frame_set_cond(struct codegen_context *ctx, unsigned size, bool logical, unsigned cond, frame_t slot)
745 unsigned target = gen_frame_target(ctx, slot, NO_FRAME_T, NO_FRAME_T, R_SCRATCH_1);
746 #if defined(ARCH_POWER)
747 if (!cpu_test_feature(CPU_FEATURE_v203))
748 #elif defined(ARCH_S390)
749 if (!cpu_test_feature(CPU_FEATURE_misc_45))
750 #elif defined(ARCH_SPARC32)
757 g(gen_load_constant(ctx, target, 1));
758 label = alloc_label(ctx);
759 if (unlikely(!label))
761 gen_insn(!logical ? INSN_JMP_COND : INSN_JMP_COND_LOGICAL, i_size_cmp(size), cond, 0);
763 g(gen_load_constant(ctx, target, 0));
767 g(gen_load_constant(ctx, target, 1));
768 g(gen_imm(ctx, 0, IMM_PURPOSE_CMOV, OP_SIZE_NATIVE));
769 if (cond & COND_FP) {
770 gen_insn(INSN_CMOV, OP_SIZE_NATIVE, cond ^ 1, 0);
772 #if defined(ARCH_S390)
773 gen_insn(logical ? INSN_CMOV_XCC : INSN_CMOV, OP_SIZE_NATIVE, cond ^ 1, 0);
775 gen_insn(size == OP_SIZE_8 ? INSN_CMOV_XCC : INSN_CMOV, OP_SIZE_NATIVE, cond ^ 1, 0);
782 g(gen_frame_store(ctx, log_2(sizeof(ajla_flat_option_t)), slot, 0, target));
788 static bool attr_w gen_frame_cmp_imm_set_cond_reg(struct codegen_context *ctx, unsigned size, unsigned reg, int64_t imm, unsigned cond, frame_t slot_r)
791 dest_reg = gen_frame_target(ctx, slot_r, NO_FRAME_T, NO_FRAME_T, R_CMP_RESULT);
792 g(gen_cmp_dest_reg(ctx, size, reg, (unsigned)-1, dest_reg, imm, cond));
793 g(gen_frame_store(ctx, log_2(sizeof(ajla_flat_option_t)), slot_r, 0, dest_reg));
799 static bool attr_w gen_frame_load_cmp_set_cond(struct codegen_context *ctx, unsigned size, enum extend ex, frame_t slot, unsigned reg, unsigned cond, frame_t slot_r)
802 bool logical = COND_IS_LOGICAL(cond);
803 g(gen_frame_load_cmp(ctx, size, logical, ex, false, slot, 0, false, reg));
804 g(gen_frame_set_cond(ctx, size, logical, cond, slot_r));
806 unsigned src_reg, dest_reg;
807 g(gen_frame_get(ctx, size, ex, slot, R_SCRATCH_NA_1, &src_reg));
808 dest_reg = gen_frame_target(ctx, slot_r, NO_FRAME_T, NO_FRAME_T, R_SCRATCH_NA_1);
809 g(gen_cmp_dest_reg(ctx, size, reg, src_reg, dest_reg, 0, cond));
810 g(gen_frame_store(ctx, log_2(sizeof(ajla_flat_option_t)), slot_r, 0, dest_reg));
815 static bool attr_w gen_frame_load_cmp_imm_set_cond(struct codegen_context *ctx, unsigned size, enum extend ex, frame_t slot, int64_t value, unsigned cond, frame_t slot_r)
818 bool logical = COND_IS_LOGICAL(cond);
819 #if defined(ARCH_S390)
823 g(gen_frame_load_cmp_imm(ctx, size, logical, ex, slot, 0, value));
824 g(gen_frame_set_cond(ctx, size, false, cond, slot_r));
827 g(gen_frame_get(ctx, size, ex, slot, R_SCRATCH_NA_1, &src_reg));
828 g(gen_frame_cmp_imm_set_cond_reg(ctx, size, src_reg, value, cond, slot_r));
833 static bool attr_w gen_upcall_start(struct codegen_context *ctx, unsigned args)
836 size_t attr_unused n_pushes;
837 ajla_assert_lo(ctx->upcall_args == -1, (file_line, "gen_upcall_start: gen_upcall_end not called"));
838 ctx->upcall_args = (int)args;
840 #if (defined(ARCH_X86_64) || defined(ARCH_X86_X32)) && !defined(ARCH_X86_WIN_ABI)
841 for (i = 0; i < ctx->need_spill_l; i++) {
842 unsigned reg = ctx->registers[ctx->need_spill[i]];
844 g(spill(ctx, ctx->need_spill[i]));
847 for (i = 0; i < ctx->need_spill_l; i++) {
848 unsigned reg = ctx->registers[ctx->need_spill[i]];
849 if (!reg_is_fp(reg)) {
850 gen_insn(INSN_PUSH, OP_SIZE_8, 0, 0);
856 gen_insn(INSN_PUSH, OP_SIZE_8, 0, 0);
860 for (i = 0; i < ctx->need_spill_l; i++)
861 g(spill(ctx, ctx->need_spill[i]));
866 static bool attr_w gen_upcall_end(struct codegen_context *ctx, unsigned args)
869 size_t attr_unused n_pushes;
870 ajla_assert_lo(ctx->upcall_args == (int)args, (file_line, "gen_upcall_end: gen_upcall_start mismatch: %d", ctx->upcall_args));
871 ctx->upcall_args = -1;
873 #if (defined(ARCH_X86_64) || defined(ARCH_X86_X32)) && !defined(ARCH_X86_WIN_ABI)
875 for (i = 0; i < ctx->need_spill_l; i++) {
876 unsigned reg = ctx->registers[ctx->need_spill[i]];
881 gen_insn(INSN_POP, OP_SIZE_8, 0, 0);
884 for (i = ctx->need_spill_l; i;) {
887 reg = ctx->registers[ctx->need_spill[i]];
888 if (!reg_is_fp(reg)) {
889 gen_insn(INSN_POP, OP_SIZE_8, 0, 0);
893 for (i = 0; i < ctx->need_spill_l; i++) {
894 unsigned reg = ctx->registers[ctx->need_spill[i]];
896 g(unspill(ctx, ctx->need_spill[i]));
899 for (i = 0; i < ctx->need_spill_l; i++)
900 g(unspill(ctx, ctx->need_spill[i]));
905 static bool attr_w gen_memcpy_raw(struct codegen_context *ctx, unsigned dest_base, int64_t dest_offset, unsigned src_base, int64_t src_offset, size_t size, size_t attr_unused align)
910 if (align < 4 || (size & 3))
913 #if defined(ARCH_S390)
915 if (!(size & 3) || (cpu_test_feature(CPU_FEATURE_long_displacement) && cpu_test_feature(CPU_FEATURE_extended_imm)))
916 goto do_explicit_copy;
918 if (size <= 0x100 && dest_offset >= 0 && dest_offset < 0x1000 && src_offset >= 0 && src_offset < 0x1000) {
919 gen_insn(INSN_MEMCPY, 0, 0, 0);
920 gen_one(ARG_ADDRESS_1);
922 gen_eight(dest_offset);
923 gen_one(ARG_ADDRESS_1);
925 gen_eight(src_offset);
933 if (size <= INLINE_COPY_SIZE) {
936 unsigned this_op_size;
937 #if defined(ARCH_ARM)
938 if (size >= 2U << OP_SIZE_NATIVE
939 #if defined(ARCH_ARM32)
940 && align >= 1U << OP_SIZE_NATIVE
943 g(gen_address(ctx, src_base, src_offset, IMM_PURPOSE_LDP_STP_OFFSET, OP_SIZE_NATIVE));
944 gen_insn(INSN_LDP, OP_SIZE_NATIVE, 0, 0);
945 gen_one(R_SCRATCH_NA_1);
946 gen_one(R_SCRATCH_NA_2);
947 gen_address_offset();
949 g(gen_address(ctx, dest_base, dest_offset, IMM_PURPOSE_LDP_STP_OFFSET, OP_SIZE_NATIVE));
950 gen_insn(INSN_STP, OP_SIZE_NATIVE, 0, 0);
951 gen_address_offset();
952 gen_one(R_SCRATCH_NA_1);
953 gen_one(R_SCRATCH_NA_2);
955 size -= 2U << OP_SIZE_NATIVE;
956 src_offset += 2U << OP_SIZE_NATIVE;
957 dest_offset += 2U << OP_SIZE_NATIVE;
962 if (size >= 8 && OP_SIZE_NATIVE >= OP_SIZE_8)
971 this_step = minimum(this_step, align);
972 this_op_size = log_2(this_step);
974 g(gen_address(ctx, src_base, src_offset, ARCH_PREFERS_SX(this_op_size) ? IMM_PURPOSE_LDR_SX_OFFSET : IMM_PURPOSE_LDR_OFFSET, this_op_size));
975 gen_insn(ARCH_PREFERS_SX(this_op_size) ? INSN_MOVSX : INSN_MOV, this_op_size, 0, 0);
976 gen_one(R_SCRATCH_1);
977 gen_address_offset();
979 g(gen_address(ctx, dest_base, dest_offset, IMM_PURPOSE_STR_OFFSET, this_op_size));
980 gen_insn(INSN_MOV, this_op_size, 0, 0);
981 gen_address_offset();
982 gen_one(R_SCRATCH_1);
985 src_offset += this_step;
986 dest_offset += this_step;
992 g(gen_upcall_start(ctx, 3));
993 if (unlikely(R_ARG0 == src_base)) {
994 if (unlikely(R_ARG1 == dest_base))
995 internal(file_line, "gen_memcpy_raw: swapped registers: %u, %u", src_base, dest_base);
996 g(gen_3address_alu_imm(ctx, i_size(OP_SIZE_ADDRESS), ALU_ADD, R_ARG1, src_base, src_offset, 0));
997 g(gen_upcall_argument(ctx, 1));
1000 g(gen_3address_alu_imm(ctx, i_size(OP_SIZE_ADDRESS), ALU_ADD, R_ARG0, dest_base, dest_offset, 0));
1001 g(gen_upcall_argument(ctx, 0));
1003 if (R_ARG0 != src_base) {
1004 g(gen_3address_alu_imm(ctx, i_size(OP_SIZE_ADDRESS), ALU_ADD, R_ARG1, src_base, src_offset, 0));
1005 g(gen_upcall_argument(ctx, 1));
1008 #if (defined(ARCH_X86_64) || defined(ARCH_X86_X32)) && !defined(ARCH_X86_WIN_ABI)
1009 if (cpu_test_feature(CPU_FEATURE_erms)) {
1010 g(gen_load_constant(ctx, R_CX, size));
1012 gen_insn(INSN_MEMCPY, 0, 0, 0);
1013 gen_one(ARG_ADDRESS_1_POST_I);
1016 gen_one(ARG_ADDRESS_1_POST_I);
1020 g(gen_upcall_end(ctx, 3));
1025 g(gen_load_constant(ctx, R_ARG2, size));
1026 g(gen_upcall_argument(ctx, 2));
1028 g(gen_upcall(ctx, offsetof(struct cg_upcall_vector_s, mem_copy), 3));
1033 static bool attr_w gen_memcpy_to_slot(struct codegen_context *ctx, frame_t dest_slot, unsigned src_base, int64_t src_offset)
1035 const struct type *t = get_type_of_local(ctx, dest_slot);
1036 unsigned size = spill_size(t);
1037 short dest_reg = ctx->registers[dest_slot];
1038 if (dest_reg >= 0) {
1039 if (ARCH_PREFERS_SX(size) && !reg_is_fp(dest_reg)) {
1040 #if defined(ARCH_S390)
1041 if (size == OP_SIZE_1 && !cpu_test_feature(CPU_FEATURE_long_displacement)) {
1042 g(gen_address(ctx, src_base, src_offset, IMM_PURPOSE_LDR_OFFSET, size));
1043 gen_insn(INSN_MOV_MASK, OP_SIZE_NATIVE, MOV_MASK_0_8, 0);
1046 gen_address_offset();
1047 g(gen_extend(ctx, size, sign_x, dest_reg, dest_reg));
1051 g(gen_address(ctx, src_base, src_offset, IMM_PURPOSE_LDR_SX_OFFSET, size));
1052 gen_insn(INSN_MOVSX, size, 0, 0);
1054 g(gen_address(ctx, src_base, src_offset, reg_is_fp(dest_reg) ? IMM_PURPOSE_VLDR_VSTR_OFFSET : IMM_PURPOSE_LDR_OFFSET, size));
1055 gen_insn(INSN_MOV, size, 0, 0);
1058 gen_address_offset();
1061 g(gen_memcpy_raw(ctx, R_FRAME, (size_t)dest_slot * slot_size, src_base, src_offset, t->size, t->align));
1065 static bool attr_w gen_memcpy_from_slot(struct codegen_context *ctx, unsigned dest_base, int64_t dest_offset, frame_t src_slot)
1067 const struct type *t = get_type_of_local(ctx, src_slot);
1068 short src_reg = ctx->registers[src_slot];
1070 unsigned size = spill_size(t);
1071 g(gen_address(ctx, dest_base, dest_offset, reg_is_fp(src_reg) ? IMM_PURPOSE_VLDR_VSTR_OFFSET : IMM_PURPOSE_STR_OFFSET, size));
1072 gen_insn(INSN_MOV, size, 0, 0);
1073 gen_address_offset();
1077 g(gen_memcpy_raw(ctx, dest_base, dest_offset, R_FRAME, (size_t)src_slot * slot_size, t->size, t->align));
1081 static bool attr_w gen_memcpy_slots(struct codegen_context *ctx, frame_t dest_slot, frame_t src_slot)
1083 const struct type *t = get_type_of_local(ctx, src_slot);
1084 short dest_reg = ctx->registers[dest_slot];
1085 short src_reg = ctx->registers[src_slot];
1086 if (dest_reg >= 0 && src_reg >= 0) {
1087 unsigned size = spill_size(t);
1088 g(gen_mov(ctx, reg_is_fp(src_reg) ? size : OP_SIZE_NATIVE, dest_reg, src_reg));
1091 if (dest_reg >= 0) {
1092 unsigned size = spill_size(t);
1093 g(gen_frame_load(ctx, size, garbage, src_slot, 0, false, dest_reg));
1097 unsigned size = spill_size(t);
1098 g(gen_frame_store(ctx, size, dest_slot, 0, src_reg));
1101 g(gen_memcpy_raw(ctx, R_FRAME, (size_t)dest_slot * slot_size, R_FRAME, (size_t)src_slot * slot_size, t->size, maximum(slot_size, t->align)));
1105 static bool attr_w gen_clear_bitmap(struct codegen_context *ctx, unsigned additional_offset, unsigned dest_base, int64_t dest_offset, frame_t bitmap_slots)
1107 if (bitmap_slots <= INLINE_BITMAP_SLOTS) {
1108 bool attr_unused scratch_2_zeroed = false;
1109 size_t bitmap_length = (size_t)bitmap_slots * slot_size;
1110 size_t clear_offset = 0;
1111 additional_offset += (unsigned)dest_offset;
1112 #if defined(ARCH_X86)
1113 g(gen_3address_alu(ctx, OP_SIZE_4, ALU_XOR, R_SCRATCH_1, R_SCRATCH_1, R_SCRATCH_1, 0));
1115 #if defined(ARCH_ARM32) || defined(ARCH_S390)
1116 g(gen_load_constant(ctx, R_SCRATCH_1, 0));
1118 while (clear_offset < bitmap_length) {
1119 size_t len = bitmap_length - clear_offset;
1120 if (len > frame_align)
1122 if (additional_offset)
1123 len = minimum(len, additional_offset & -additional_offset);
1124 #if defined(ARCH_ARM32) || defined(ARCH_S390)
1125 len = minimum(len, 2U << OP_SIZE_NATIVE);
1126 if (len == 2U << OP_SIZE_NATIVE) {
1127 if (!scratch_2_zeroed) {
1128 g(gen_load_constant(ctx, R_SCRATCH_2, 0));
1129 scratch_2_zeroed = true;
1131 g(gen_address(ctx, dest_base, dest_offset + clear_offset, IMM_PURPOSE_LDP_STP_OFFSET, OP_SIZE_NATIVE));
1132 gen_insn(INSN_STP, OP_SIZE_NATIVE, 0, 0);
1133 gen_address_offset();
1134 gen_one(R_SCRATCH_1);
1135 gen_one(R_SCRATCH_2);
1138 #elif defined(ARCH_ARM64)
1139 len = minimum(len, 1U << OP_SIZE_16);
1140 if (len == 1U << OP_SIZE_16) {
1141 g(gen_address(ctx, dest_base, dest_offset + clear_offset, IMM_PURPOSE_LDP_STP_OFFSET, OP_SIZE_8));
1142 g(gen_imm(ctx, 0, IMM_PURPOSE_STORE_VALUE, OP_SIZE_8));
1143 gen_insn(INSN_STP, OP_SIZE_NATIVE, 0, 0);
1144 gen_address_offset();
1149 #elif defined(ARCH_X86)
1150 len = minimum(len, 1U << OP_SIZE_16);
1151 if (len == 1U << OP_SIZE_16 && cpu_test_feature(CPU_FEATURE_sse)) {
1152 if (!scratch_2_zeroed) {
1153 g(gen_3address_alu(ctx, OP_SIZE_16, ALU_XOR, R_XMM0, R_XMM0, R_XMM0, 0));
1154 scratch_2_zeroed = true;
1156 g(gen_address(ctx, dest_base, dest_offset + clear_offset, IMM_PURPOSE_VLDR_VSTR_OFFSET, OP_SIZE_16));
1157 gen_insn(INSN_MOV, OP_SIZE_16, 0, 0);
1158 gen_address_offset();
1163 len = minimum(len, 1U << OP_SIZE_NATIVE);
1164 len = (size_t)1 << high_bit(len);
1165 #if defined(ARCH_X86) || defined(ARCH_ARM32) || defined(ARCH_S390)
1166 g(gen_address(ctx, dest_base, dest_offset + clear_offset, IMM_PURPOSE_STR_OFFSET, log_2(len)));
1167 gen_insn(INSN_MOV, log_2(len), 0, 0);
1168 gen_address_offset();
1169 gen_one(R_SCRATCH_1);
1171 g(gen_address(ctx, dest_base, dest_offset + clear_offset, IMM_PURPOSE_STR_OFFSET, log_2(len)));
1172 g(gen_imm(ctx, 0, IMM_PURPOSE_STORE_VALUE, log_2(len)));
1173 gen_insn(INSN_MOV, log_2(len), 0, 0);
1174 gen_address_offset();
1179 clear_offset += len;
1180 additional_offset += len;
1184 #if (defined(ARCH_X86_64) || defined(ARCH_X86_X32)) && !defined(ARCH_X86_WIN_ABI)
1185 if (cpu_test_feature(CPU_FEATURE_erms)) {
1186 gen_insn(INSN_PUSH, OP_SIZE_8, 0, 0);
1189 g(gen_3address_alu_imm(ctx, i_size(OP_SIZE_ADDRESS), ALU_ADD, R_DI, dest_base, dest_offset, 0));
1191 g(gen_load_constant(ctx, R_CX, (size_t)bitmap_slots * slot_size));
1193 g(gen_3address_alu(ctx, OP_SIZE_4, ALU_XOR, R_AX, R_AX, R_AX, 0));
1195 gen_insn(INSN_MEMSET, 0, 0, 0);
1196 gen_one(ARG_ADDRESS_1_POST_I);
1202 gen_insn(INSN_POP, OP_SIZE_8, 0, 0);
1208 g(gen_upcall_start(ctx, 2));
1210 g(gen_3address_alu_imm(ctx, i_size(OP_SIZE_ADDRESS), ALU_ADD, R_ARG0, dest_base, dest_offset, 0));
1211 g(gen_upcall_argument(ctx, 0));
1213 g(gen_load_constant(ctx, R_ARG1, (size_t)bitmap_slots * slot_size));
1214 g(gen_upcall_argument(ctx, 1));
1216 g(gen_upcall(ctx, offsetof(struct cg_upcall_vector_s, mem_clear), 2));
1221 static bool attr_w load_function_offset(struct codegen_context *ctx, unsigned dest, size_t fn_offset)
1223 g(gen_frame_load_raw(ctx, OP_SIZE_ADDRESS, zero_x, 0, frame_offs(function), false, dest));
1225 g(gen_address(ctx, dest, fn_offset, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_ADDRESS));
1226 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
1228 gen_address_offset();