2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #define PA_20 cpu_test_feature(CPU_FEATURE_pa20)
21 #if defined(ARCH_PARISC64)
22 #define ARCH_PARISC_USE_STUBS
24 #define ARCH_PARISC_USE_STUBS
27 #define OP_SIZE_NATIVE (PA_20 ? OP_SIZE_8 : OP_SIZE_4)
30 #define OP_SIZE_ADDRESS OP_SIZE_4
32 #define OP_SIZE_ADDRESS OP_SIZE_8
35 #define JMP_LIMIT JMP_EXTRA_LONG
37 #define UNALIGNED_TRAP 1
39 #define ALU_WRITES_FLAGS(alu, im) (!(im) && ((alu) == ALU_ADC || (alu) == ALU_SUB || (alu) == ALU_SBB) ? 3 : 0)
40 #define ALU1_WRITES_FLAGS(alu) ((alu) == ALU1_NEG || (alu) == ALU1_INC || (alu) == ALU1_DEC ? 3 : 0)
41 #define ROT_WRITES_FLAGS(alu, size, im) 0
42 #define COND_IS_LOGICAL(cond) 0
44 #define ARCH_PARTIAL_ALU(size) 0
45 #define ARCH_IS_3ADDRESS(alu, f) 1
46 #define ARCH_IS_3ADDRESS_IMM(alu, f) 1
47 #define ARCH_IS_3ADDRESS_ROT(alu, size) 1
48 #define ARCH_IS_3ADDRESS_ROT_IMM(alu) 1
49 #define ARCH_IS_2ADDRESS(alu) 1
50 #define ARCH_IS_3ADDRESS_FP 1
51 #define ARCH_HAS_JMP_2REGS(cond) 1
52 #define ARCH_HAS_FLAGS 0
53 #define ARCH_SUPPORTS_TRAPS 1
54 #define ARCH_TRAP_BEFORE 0
55 #define ARCH_PREFERS_SX(size) 0
56 #define ARCH_HAS_BWX 1
57 #define ARCH_HAS_MUL 0
58 #define ARCH_HAS_DIV 0
59 #define ARCH_HAS_ANDN 1
60 #define ARCH_HAS_SHIFTED_ADD(bits) ((bits) <= 3)
61 #define ARCH_HAS_BTX(btx, size, cnst) (((btx) == BTX_BTS || (btx) == BTX_BTR || (btx) == BTX_BTEXT) && (((size) >= OP_SIZE_4)))
62 #define ARCH_SHIFT_SIZE OP_SIZE_4
63 #define ARCH_BOOL_SIZE OP_SIZE_NATIVE
64 #define ARCH_HAS_FP_GP_MOV 0
65 #define ARCH_NEEDS_BARRIER 0
67 #define i_size(size) OP_SIZE_NATIVE
68 #define i_size_rot(size) maximum(size, OP_SIZE_4)
69 #define i_size_cmp(size) maximum(size, OP_SIZE_4)
104 #define R_FSTATUS 0x20
136 #define R_TIMESTAMP R_5
138 #define R_SCRATCH_1 R_26
139 #define R_SCRATCH_2 R_25
140 #define R_SCRATCH_3 R_24
141 #define R_SCRATCH_4 R_SAVED_2
143 #define R_SCRATCH_NA_1 R_22
144 #define R_SCRATCH_NA_2 R_21
145 #ifdef HAVE_BITWISE_FRAME
146 #define R_SCRATCH_NA_3 R_20
148 #define R_CMP_RESULT R_19
150 #define R_CG_SCRATCH R_31
152 #define R_SAVED_1 R_6
153 #define R_SAVED_2 R_7
160 #define R_CONST_IMM R_1
161 #define R_OFFSET_IMM R_RP
163 #define FR_SCRATCH_1 R_F4
164 #define FR_SCRATCH_2 R_F5
166 #define SUPPORTED_FP 0x6
169 #define FRAME_SIZE 0x80
171 * 0-64 - register save area
172 * 64-96 - outgoing parameters
173 * 96-128 - frame marker
176 #define FRAME_SIZE 0xd0
178 * 0-128 - register save area
180 * 128-192 - outgoing parameters
181 * 192-208 - frame marker
186 #define RP_OFFS -0x14
188 #define RP_OFFS -0x10
191 static bool reg_is_fp(unsigned reg)
193 return reg >= R_FSTATUS && reg < R_F31;
196 static const uint8_t regs_saved[] = {
197 #if !(defined(ARCH_PARISC32) && defined(__HP_cc))
200 R_9, R_10, R_11, R_12, R_13, R_14, R_15, R_16, R_17, R_18 };
201 static const uint8_t regs_volatile[] = { R_23, R_RET1,
202 #if defined(ARCH_PARISC64)
205 #ifndef HAVE_BITWISE_FRAME
209 static const uint8_t fp_saved[] = { 0 };
210 #define n_fp_saved 0U
211 static const uint8_t fp_volatile[] = { R_F6, R_F7, R_F8, R_F9, R_F10, R_F11, R_F22, R_F23, R_F24, R_F25, R_F26, R_F27, R_F28, R_F29, R_F30, R_F31 };
212 #define reg_is_saved(r) ((r) >= R_3 && (r) <= R_18)
214 static bool attr_w gen_load_constant(struct codegen_context *ctx, unsigned reg, uint64_t c)
217 int32_t c1, c2, c3, c4;
223 c2 = (c & 0xffffc000ULL) >> 14;
234 c4 = (c & 0xffffc000ULL) >> 14;
237 if (OP_SIZE_NATIVE == OP_SIZE_8) {
239 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
242 gen_eight((uint64_t)c4 << 14);
246 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
254 gen_insn(INSN_ROT, OP_SIZE_NATIVE, ROT_SHL, 0);
263 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
266 gen_eight((uint64_t)c2 << 14);
269 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
270 gen_one(R_CONST_IMM);
273 gen_eight((uint64_t)c2 << 14);
277 if (c1 || r != reg) {
278 gen_insn(INSN_ALU, OP_SIZE_NATIVE, ALU_ADD, 0);
287 static bool is_direct_const(int64_t imm, unsigned purpose, unsigned size);
289 static bool attr_w gen_address(struct codegen_context *ctx, unsigned base, int64_t imm, unsigned purpose, unsigned size)
291 ctx->base_reg = base;
292 ctx->offset_imm = imm;
293 ctx->offset_reg = false;
295 case IMM_PURPOSE_LDR_OFFSET:
296 case IMM_PURPOSE_LDR_SX_OFFSET:
297 case IMM_PURPOSE_STR_OFFSET:
298 case IMM_PURPOSE_MVI_CLI_OFFSET:
299 if (size == OP_SIZE_8) {
303 if (likely(imm >= -0x2000) && likely(imm < 0x2000))
306 case IMM_PURPOSE_VLDR_VSTR_OFFSET:
307 if (likely(imm >= -0x10) && likely(imm < 0x10))
311 if (unlikely((imm & ((1 << size) - 1)) != 0))
313 if (likely(imm >= -0x2000) && likely(imm < 0x2000))
317 internal(file_line, "gen_address: invalid purpose %u (imm %"PRIxMAX", size %u)", purpose, (uintmax_t)imm, size);
320 if (is_direct_const(imm, IMM_PURPOSE_ADD, OP_SIZE_ADDRESS)) {
321 gen_insn(INSN_ALU, OP_SIZE_ADDRESS, ALU_ADD, 0);
322 gen_one(R_OFFSET_IMM);
327 ctx->base_reg = R_OFFSET_IMM;
333 g(gen_load_constant(ctx, R_OFFSET_IMM, imm));
335 if (purpose == IMM_PURPOSE_LDR_OFFSET || purpose == IMM_PURPOSE_LDR_SX_OFFSET) {
336 ctx->offset_reg = true;
340 gen_insn(INSN_ALU, OP_SIZE_ADDRESS, ALU_ADD, 0);
341 gen_one(R_OFFSET_IMM);
342 gen_one(R_OFFSET_IMM);
345 ctx->base_reg = R_OFFSET_IMM;
351 static bool is_direct_const(int64_t imm, unsigned purpose, unsigned size)
353 int64_t imm_copy = imm;
355 case IMM_PURPOSE_STORE_VALUE:
359 case IMM_PURPOSE_SUB:
360 case IMM_PURPOSE_SUB_TRAP:
361 imm_copy = -(uint64_t)imm_copy;
363 case IMM_PURPOSE_ADD:
364 case IMM_PURPOSE_CMP:
365 case IMM_PURPOSE_CMP_LOGICAL:
366 case IMM_PURPOSE_MOVR:
367 case IMM_PURPOSE_ADD_TRAP:
368 if (likely(imm_copy >= -1024) && likely(imm_copy < 1024))
371 case IMM_PURPOSE_JMP_2REGS:
373 if (likely(imm >= -16) && likely(imm < 16))
377 case IMM_PURPOSE_AND:
379 case IMM_PURPOSE_XOR:
380 case IMM_PURPOSE_ANDN:
381 case IMM_PURPOSE_TEST:
385 case IMM_PURPOSE_BITWISE:
388 internal(file_line, "is_direct_const: invalid purpose %u (imm %"PRIxMAX", size %u)", purpose, (uintmax_t)imm, size);
393 static bool attr_w gen_imm(struct codegen_context *ctx, int64_t imm, unsigned purpose, unsigned size)
395 if (is_direct_const(imm, purpose, size)) {
396 ctx->const_imm = imm;
397 ctx->const_reg = false;
399 g(gen_load_constant(ctx, R_CONST_IMM, imm));
400 ctx->const_reg = true;
405 static bool attr_w gen_entry(struct codegen_context *ctx)
409 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
410 gen_one(ARG_ADDRESS_1);
415 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
416 gen_one(ARG_ADDRESS_1_POST_I);
418 gen_eight(FRAME_SIZE);
421 for (i = R_4; i <= R_18; i++) {
422 int offs = -FRAME_SIZE + ((i - R_3) << OP_SIZE_ADDRESS);
423 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
424 gen_one(ARG_ADDRESS_1);
430 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
434 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
438 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
439 gen_one(R_TIMESTAMP);
442 gen_insn(INSN_JMP_INDIRECT, 0, 0, 0);
448 static bool attr_w gen_escape_arg(struct codegen_context *ctx, ip_t ip, uint32_t escape_label)
450 g(gen_load_constant(ctx, R_SCRATCH_1, ip));
452 gen_insn(INSN_JMP, 0, 0, 0);
453 gen_four(escape_label);
458 static bool attr_w gen_escape(struct codegen_context *ctx)
462 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
466 gen_insn(INSN_MOV, OP_SIZE_NATIVE, 0, 0);
468 gen_one(R_SCRATCH_1);
470 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
472 gen_one(ARG_ADDRESS_1);
474 gen_eight(-FRAME_SIZE + RP_OFFS);
476 for (i = R_4; i <= R_18; i++) {
477 int offs = -FRAME_SIZE + ((i - R_3) << OP_SIZE_ADDRESS);
478 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
480 gen_one(ARG_ADDRESS_1);
485 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
487 gen_one(ARG_ADDRESS_1_PRE_I);
489 gen_eight(-FRAME_SIZE);
491 gen_insn(INSN_RET, 0, 0, 0);
496 static bool attr_w gen_upcall_argument(struct codegen_context attr_unused *ctx, unsigned attr_unused arg)
501 static bool attr_w gen_get_upcall_pointer(struct codegen_context *ctx, unsigned offset, unsigned reg)
503 g(gen_address(ctx, R_UPCALL, offset, IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
504 gen_insn(INSN_MOV, OP_SIZE_ADDRESS, 0, 0);
506 gen_address_offset();
511 static bool attr_w gen_upcall(struct codegen_context *ctx, unsigned offset, unsigned n_args)
514 uint32_t label = alloc_call_label(ctx);
515 if (unlikely(!label))
518 g(gen_get_upcall_pointer(ctx, offset, R_SCRATCH_NA_1));
520 gen_insn(INSN_CALL, 0, 0, 0);
523 g(gen_get_upcall_pointer(ctx, offset, R_DP));
525 gen_insn(INSN_CALL_INDIRECT, OP_SIZE_8, 0, 0);
528 g(gen_upcall_end(ctx, n_args));
533 static bool attr_w gen_call_millicode(struct codegen_context *ctx)
535 gen_insn(INSN_CALL_MILLICODE, 0, 0, 0);
539 static bool attr_w gen_cmp_test_jmp(struct codegen_context *ctx, unsigned insn, unsigned op_size, unsigned reg1, unsigned reg2, unsigned cond, uint32_t label);
541 static bool attr_w gen_timestamp_test(struct codegen_context *ctx, uint32_t escape_label)
543 g(gen_address(ctx, R_UPCALL, offsetof(struct cg_upcall_vector_s, ts), IMM_PURPOSE_LDR_OFFSET, OP_SIZE_NATIVE));
544 gen_insn(INSN_MOV, OP_SIZE_4, 0, 0);
545 gen_one(R_SCRATCH_1);
546 gen_address_offset();
548 g(gen_cmp_test_jmp(ctx, INSN_CMP, OP_SIZE_4, R_SCRATCH_1, R_TIMESTAMP, COND_NE, escape_label));