2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #define OP_SIZE_NATIVE OP_SIZE_8
20 #define OP_SIZE_ADDRESS OP_SIZE_NATIVE
22 #define JMP_LIMIT JMP_EXTRA_LONG
24 #define UNALIGNED_TRAP (!cpu_test_feature(CPU_FEATURE_unaligned))
26 #define ALU_WRITES_FLAGS(alu, im) 0
27 #define ALU1_WRITES_FLAGS(alu) 0
28 #define ROT_WRITES_FLAGS(alu, size, im) 0
29 #define COND_IS_LOGICAL(cond) 0
31 #define ARCH_PARTIAL_ALU(size) 0
32 #define ARCH_IS_3ADDRESS(alu, f) 1
33 #define ARCH_IS_3ADDRESS_IMM(alu, f) 1
34 #define ARCH_IS_3ADDRESS_ROT(alu, size) 1
35 #define ARCH_IS_3ADDRESS_ROT_IMM(alu) 1
36 #define ARCH_IS_2ADDRESS(alu) 1
37 #define ARCH_IS_3ADDRESS_FP 1
38 #define ARCH_HAS_JMP_2REGS(cond) 1
39 #define ARCH_HAS_FLAGS 0
40 #define ARCH_PREFERS_SX(size) 0
41 #define ARCH_HAS_BWX 1
42 #define ARCH_HAS_MUL 1
43 #define ARCH_HAS_DIV 1
44 #define ARCH_HAS_ANDN cpu_test_feature(CPU_FEATURE_zbb)
45 #define ARCH_HAS_SHIFTED_ADD(bits) ((bits) <= 3 && cpu_test_feature(CPU_FEATURE_zba))
46 #define ARCH_HAS_BTX(btx, size, cnst) (((size) == OP_SIZE_8 || (cnst)) && cpu_test_feature(CPU_FEATURE_zbs))
47 #define ARCH_SHIFT_SIZE OP_SIZE_4
48 #define ARCH_BOOL_SIZE OP_SIZE_NATIVE
49 #define ARCH_HAS_FP_GP_MOV 0
50 #define ARCH_NEEDS_BARRIER 0
52 #define i_size(size) OP_SIZE_NATIVE
53 #define i_size_rot(size) maximum(size, OP_SIZE_4)
54 #define i_size_cmp(size) OP_SIZE_NATIVE
123 #define R_UPCALL R_S1
124 #define R_TIMESTAMP R_S2
126 #define R_SCRATCH_1 R_A0
127 #define R_SCRATCH_2 R_A1
128 #define R_SCRATCH_3 R_A2
129 #define R_SCRATCH_4 R_A3
130 #define R_SCRATCH_NA_1 R_A4
131 #define R_SCRATCH_NA_2 R_A5
132 #ifdef HAVE_BITWISE_FRAME
133 #define R_SCRATCH_NA_3 R_A6
136 #define R_SAVED_1 R_S3
137 #define R_SAVED_2 R_S4
146 #define R_OFFSET_IMM R_T0
147 #define R_CONST_IMM R_T1
148 #define R_CONST_HELPER R_T2
149 #define R_CMP_RESULT R_T3
151 #define FR_SCRATCH_1 R_FT0
152 #define FR_SCRATCH_2 R_FT1
154 #define SUPPORTED_FP 0x6
156 #define FRAME_SIZE 0x70
158 static bool reg_is_fp(unsigned reg)
160 return reg >= 0x20 && reg < 0x40;
163 static const uint8_t regs_saved[] = { R_S5, R_S6, R_S7, R_S8, R_S9, R_S10, R_S11 };
164 static const uint8_t regs_volatile[] = { R_RA,
165 #ifndef HAVE_BITWISE_FRAME
168 R_A7, R_T4, R_T5, R_T6 };
169 static const uint8_t fp_saved[] = { 0 };
170 #define n_fp_saved 0U
171 static const uint8_t fp_volatile[] = { R_FT2, R_FT3, R_FT4, R_FT5, R_FT6, R_FT7, R_FA0, R_FA1, R_FA2, R_FA3, R_FA4, R_FA5, R_FA6, R_FA7, R_FT8, R_FT9, R_FT10, R_FT11 };
172 #define reg_is_saved(r) (((r) >= R_S0 && (r) <= R_S1) || ((r) >= R_S2 && (r) <= R_S11) || ((r) >= R_FS0 && (r) <= R_FS1) || ((r) >= R_FS2 && (r) <= R_FS11))
174 static const struct {
177 } riscv_compress[] = {
178 #include "riscv-c.inc"
181 static bool attr_w gen_load_constant(struct codegen_context *ctx, unsigned reg, uint64_t c)
184 int32_t c1, c2, c3, c4;
192 c2 = (c >> 12) & 0xfffffUL;
198 c3 = (c >> 32) & 0xfffUL;
202 c += 0x100000000000UL;
209 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
212 gen_eight((uint64_t)c4 << 12);
216 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
224 gen_insn(INSN_ROT, OP_SIZE_NATIVE, ROT_SHL, 0);
232 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
233 gen_one(R_CONST_HELPER);
235 gen_eight((uint64_t)c2 << 12);
237 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
240 gen_one(R_CONST_HELPER);
242 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
245 gen_eight((uint64_t)c2 << 12);
249 if (c1 || r == R_ZERO) {
250 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
259 static bool attr_w gen_address(struct codegen_context *ctx, unsigned base, int64_t imm, unsigned purpose, unsigned size)
261 ctx->base_reg = base;
262 ctx->offset_imm = imm;
263 ctx->offset_reg = false;
265 case IMM_PURPOSE_LDR_OFFSET:
266 case IMM_PURPOSE_LDR_SX_OFFSET:
267 case IMM_PURPOSE_STR_OFFSET:
268 case IMM_PURPOSE_VLDR_VSTR_OFFSET:
269 case IMM_PURPOSE_MVI_CLI_OFFSET:
270 if (likely(imm >= -0x800) && likely(imm < 0x800))
274 internal(file_line, "gen_address: invalid purpose %u (imm %"PRIxMAX", size %u)", purpose, (uintmax_t)imm, size);
276 g(gen_load_constant(ctx, R_OFFSET_IMM, imm));
277 gen_insn(INSN_ALU, OP_SIZE_ADDRESS, ALU_ADD, 0);
278 gen_one(R_OFFSET_IMM);
279 gen_one(R_OFFSET_IMM);
281 ctx->base_reg = R_OFFSET_IMM;
286 static bool is_direct_const(int64_t imm, unsigned purpose, unsigned size)
289 case IMM_PURPOSE_STORE_VALUE:
293 case IMM_PURPOSE_ADD:
294 case IMM_PURPOSE_AND:
296 case IMM_PURPOSE_XOR:
297 case IMM_PURPOSE_TEST:
298 case IMM_PURPOSE_CMP:
299 case IMM_PURPOSE_CMP_LOGICAL:
300 if (likely(imm >= -0x800) && likely(imm < 0x800))
303 case IMM_PURPOSE_SUB:
304 if (likely(imm > -0x800) && likely(imm <= 0x800))
307 case IMM_PURPOSE_ANDN:
309 case IMM_PURPOSE_JMP_2REGS:
311 case IMM_PURPOSE_MUL:
313 case IMM_PURPOSE_BITWISE:
316 internal(file_line, "is_direct_const: invalid purpose %u (imm %"PRIxMAX", size %u)", purpose, (uintmax_t)imm, size);
321 static bool attr_w gen_entry(struct codegen_context *ctx)
323 g(gen_imm(ctx, -FRAME_SIZE, IMM_PURPOSE_ADD, OP_SIZE_NATIVE));
324 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
329 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x08, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
330 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
331 gen_address_offset();
334 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x10, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
335 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
336 gen_address_offset();
339 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x18, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
340 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
341 gen_address_offset();
344 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x20, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
345 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
346 gen_address_offset();
349 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x28, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
350 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
351 gen_address_offset();
354 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x30, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
355 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
356 gen_address_offset();
359 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x38, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
360 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
361 gen_address_offset();
364 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x40, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
365 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
366 gen_address_offset();
369 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x48, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
370 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
371 gen_address_offset();
374 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x50, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
375 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
376 gen_address_offset();
379 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x58, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
380 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
381 gen_address_offset();
384 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x60, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
385 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
386 gen_address_offset();
389 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x68, IMM_PURPOSE_STR_OFFSET, OP_SIZE_NATIVE));
390 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
391 gen_address_offset();
394 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
398 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
402 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
403 gen_one(R_TIMESTAMP);
406 gen_insn(INSN_JMP_INDIRECT, 0, 0, 0);
412 static bool attr_w gen_escape_arg(struct codegen_context *ctx, ip_t ip, uint32_t escape_label)
414 g(gen_load_constant(ctx, R_RET1, (int32_t)ip));
416 gen_insn(INSN_JMP, 0, 0, 0);
417 gen_four(escape_label);
422 static bool attr_w gen_escape(struct codegen_context *ctx)
424 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
428 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x08, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
429 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
431 gen_address_offset();
433 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x10, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
434 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
436 gen_address_offset();
438 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x18, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
439 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
441 gen_address_offset();
443 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x20, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
444 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
446 gen_address_offset();
448 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x28, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
449 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
451 gen_address_offset();
453 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x30, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
454 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
456 gen_address_offset();
458 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x38, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
459 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
461 gen_address_offset();
463 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x40, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
464 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
466 gen_address_offset();
468 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x48, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
469 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
471 gen_address_offset();
473 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x50, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
474 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
476 gen_address_offset();
478 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x58, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
479 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
481 gen_address_offset();
483 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x60, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
484 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
486 gen_address_offset();
488 g(gen_address(ctx, R_SP, FRAME_SIZE - 0x68, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
489 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
491 gen_address_offset();
493 g(gen_imm(ctx, FRAME_SIZE, IMM_PURPOSE_ADD, OP_SIZE_NATIVE));
494 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
499 gen_insn(INSN_RET, 0, 0, 0);
504 static bool attr_w gen_upcall_argument(struct codegen_context attr_unused *ctx, unsigned attr_unused arg)
509 static bool attr_w gen_get_upcall_pointer(struct codegen_context *ctx, unsigned offset, unsigned reg)
511 g(gen_address(ctx, R_UPCALL, offset, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_ADDRESS));
512 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
514 gen_address_offset();
519 static bool attr_w gen_upcall(struct codegen_context *ctx, unsigned offset, unsigned n_args)
521 g(gen_get_upcall_pointer(ctx, offset, R_SCRATCH_NA_1));
523 gen_insn(INSN_CALL_INDIRECT, OP_SIZE_ADDRESS, 0, 0);
524 gen_one(R_SCRATCH_NA_1);
526 g(gen_upcall_end(ctx, n_args));
531 static bool attr_w gen_cmp_test_jmp(struct codegen_context *ctx, unsigned insn, unsigned op_size, unsigned reg1, unsigned reg2, unsigned cond, uint32_t label);
533 static bool attr_w gen_timestamp_test(struct codegen_context *ctx, uint32_t escape_label)
535 g(gen_address(ctx, R_UPCALL, offsetof(struct cg_upcall_vector_s, ts), IMM_PURPOSE_LDR_SX_OFFSET, OP_SIZE_4));
536 gen_insn(INSN_MOVSX, OP_SIZE_4, 0, 0);
537 gen_one(R_SCRATCH_1);
538 gen_address_offset();
540 g(gen_cmp_test_jmp(ctx, INSN_CMP, OP_SIZE_NATIVE, R_SCRATCH_1, R_TIMESTAMP, COND_NE, escape_label));