1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2023-2025 Free Software Foundation, Inc.
3 Contributed by Christoph Müllner (christoph.muellner@vrull.eu).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
30 #include "insn-attr.h"
38 #include "riscv-protos.h"
40 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
41 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
44 split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
46 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
48 *base_ptr
= XEXP (x
, 0);
49 *offset_ptr
= INTVAL (XEXP (x
, 1));
58 /* Output a mempair instruction with the provided OPERANDS.
59 LOAD_P is true if a we have a pair of loads (stores otherwise).
60 MODE is the access mode (DI or SI).
61 CODE is the extension code (UNKNOWN, SIGN_EXTEND or ZERO_EXTEND).
62 This instruction does not handle invalid inputs gracefully,
63 but is full of assertions to ensure that only valid instructions
67 th_mempair_output_move (rtx operands
[4], bool load_p
,
68 machine_mode mode
, RTX_CODE code
)
70 rtx reg1
, reg2
, mem1
, mem2
, base1
, base2
;
71 HOST_WIDE_INT offset1
, offset2
;
72 rtx output_operands
[5];
75 gcc_assert (mode
== SImode
|| mode
== DImode
);
77 /* Paired 64-bit access instructions have a fixed shift amount of 4.
78 Paired 32-bit access instructions have a fixed shift amount of 3. */
79 unsigned shamt
= (mode
== DImode
) ? 4 : 3;
83 reg1
= copy_rtx (operands
[0]);
84 reg2
= copy_rtx (operands
[2]);
85 mem1
= copy_rtx (operands
[1]);
86 mem2
= copy_rtx (operands
[3]);
89 if (code
== ZERO_EXTEND
)
90 format
= "th.lwud\t%0, %1, (%2), %3, %4";
91 else //SIGN_EXTEND or UNKNOWN
92 format
= "th.lwd\t%0, %1, (%2), %3, %4";
94 format
= "th.ldd\t%0, %1, (%2), %3, %4";
98 reg1
= copy_rtx (operands
[1]);
99 reg2
= copy_rtx (operands
[3]);
100 mem1
= copy_rtx (operands
[0]);
101 mem2
= copy_rtx (operands
[2]);
104 format
= "th.swd\t%z0, %z1, (%2), %3, %4";
106 format
= "th.sdd\t%z0, %z1, (%2), %3, %4";
109 split_plus (XEXP (mem1
, 0), &base1
, &offset1
);
110 split_plus (XEXP (mem2
, 0), &base2
, &offset2
);
111 gcc_assert (rtx_equal_p (base1
, base2
));
112 auto size1
= MEM_SIZE (mem1
);
113 auto size2
= MEM_SIZE (mem2
);
114 gcc_assert (known_eq (size1
, size2
));
115 gcc_assert (known_eq (offset1
+ size1
, offset2
));
117 HOST_WIDE_INT imm2
= offset1
>> shamt
;
119 /* Make sure all mempair instruction constraints are met. */
120 gcc_assert (imm2
>= 0 && imm2
< 4);
121 gcc_assert ((imm2
<< shamt
) == offset1
);
122 gcc_assert (REG_P (reg1
));
123 gcc_assert (REG_P (reg2
));
124 gcc_assert (REG_P (base1
));
127 gcc_assert (REGNO (reg1
) != REGNO (reg2
));
128 gcc_assert (REGNO (reg1
) != REGNO (base1
));
129 gcc_assert (REGNO (reg2
) != REGNO (base1
));
132 /* Output the mempair instruction. */
133 output_operands
[0] = copy_rtx (reg1
);
134 output_operands
[1] = copy_rtx (reg2
);
135 output_operands
[2] = copy_rtx (base1
);
136 output_operands
[3] = gen_rtx_CONST_INT (mode
, imm2
);
137 output_operands
[4] = gen_rtx_CONST_INT (mode
, shamt
);
138 output_asm_insn (format
, output_operands
);
143 /* Analyze if a pair of loads/stores MEM1 and MEM2 with given MODE
144 are consecutive so they can be merged into a mempair instruction.
145 RESERVED will be set to true, if a reversal of the accesses is
146 required (false otherwise). Returns true if the accesses can be
147 merged (even if reversing is necessary) and false if not. */
150 th_mempair_check_consecutive_mems (machine_mode mode
, rtx
*mem1
, rtx
*mem2
,
153 rtx base1
, base2
, offset1
, offset2
;
154 extract_base_offset_in_addr (*mem1
, &base1
, &offset1
);
155 extract_base_offset_in_addr (*mem2
, &base2
, &offset2
);
157 /* Make sure both mems are in base+offset form. */
158 if (!base1
|| !base2
)
161 /* If both mems use the same base register, just check the offsets. */
162 if (rtx_equal_p (base1
, base2
))
164 auto size
= GET_MODE_SIZE (mode
);
166 if (known_eq (UINTVAL (offset1
) + size
, UINTVAL (offset2
)))
172 if (known_eq (UINTVAL (offset2
) + size
, UINTVAL (offset1
)))
184 /* Check if the given MEM can be used to define the address of a mempair
188 th_mempair_operand_p (rtx mem
, machine_mode mode
)
190 if (!MEM_SIZE_KNOWN_P (mem
))
193 /* Only DI or SI mempair instructions exist. */
194 gcc_assert (mode
== SImode
|| mode
== DImode
);
195 auto mem_sz
= MEM_SIZE (mem
);
196 auto mode_sz
= GET_MODE_SIZE (mode
);
197 if (!known_eq (mem_sz
, mode_sz
))
200 /* Paired 64-bit access instructions have a fixed shift amount of 4.
201 Paired 32-bit access instructions have a fixed shift amount of 3. */
202 machine_mode mem_mode
= GET_MODE (mem
);
203 unsigned shamt
= (mem_mode
== DImode
) ? 4 : 3;
206 HOST_WIDE_INT offset
;
207 split_plus (XEXP (mem
, 0), &base
, &offset
);
208 HOST_WIDE_INT imm2
= offset
>> shamt
;
210 if (imm2
< 0 || imm2
>= 4)
213 if ((imm2
<< shamt
) != offset
)
220 th_mempair_load_overlap_p (rtx reg1
, rtx reg2
, rtx mem
)
222 if (REGNO (reg1
) == REGNO (reg2
))
225 if (reg_overlap_mentioned_p (reg1
, mem
))
229 HOST_WIDE_INT offset
;
230 split_plus (XEXP (mem
, 0), &base
, &offset
);
237 if (REGNO (base
) == REGNO (reg1
)
238 || REGNO (base
) == REGNO (reg2
))
245 /* Given OPERANDS of consecutive load/store, check if we can merge
246 them into load-pair or store-pair instructions.
247 LOAD is true if they are load instructions.
248 MODE is the mode of memory operation. */
251 th_mempair_operands_p (rtx operands
[4], bool load_p
,
254 rtx mem_1
, mem_2
, reg_1
, reg_2
;
262 if (!REG_P (reg_1
) || !REG_P (reg_2
))
264 if (th_mempair_load_overlap_p (reg_1
, reg_2
, mem_1
))
266 if (th_mempair_load_overlap_p (reg_1
, reg_2
, mem_2
))
277 /* Check if the registers are GP registers. */
278 if (!REG_P (reg_1
) || !GP_REG_P (REGNO (reg_1
))
279 || !REG_P (reg_2
) || !GP_REG_P (REGNO (reg_2
)))
282 /* The mems cannot be volatile. */
283 if (!MEM_P (mem_1
) || !MEM_P (mem_2
))
285 if (MEM_VOLATILE_P (mem_1
) || MEM_VOLATILE_P (mem_2
))
289 /* Check if the addresses are in the form of [base+offset]. */
290 bool reversed
= false;
291 if (!th_mempair_check_consecutive_mems (mode
, &mem_1
, &mem_2
, &reversed
))
294 /* If necessary, reverse the local copy of the operands to simplify
295 testing of alignments and mempair operand. */
298 std::swap (mem_1
, mem_2
);
299 std::swap (reg_1
, reg_2
);
302 /* If we have slow unaligned access, we only accept aligned memory. */
303 if (riscv_slow_unaligned_access_p
304 && known_lt (MEM_ALIGN (mem_1
), GET_MODE_SIZE (mode
) * BITS_PER_UNIT
))
307 /* The first memory accesses must be a mempair operand. */
308 if (!th_mempair_operand_p (mem_1
, mode
))
311 /* The operands must be of the same size. */
312 gcc_assert (known_eq (GET_MODE_SIZE (GET_MODE (mem_1
)),
313 GET_MODE_SIZE (GET_MODE (mem_2
))));
318 /* Given OPERANDS of consecutive load/store that can be merged,
319 swap them if they are not in ascending order. */
322 th_mempair_order_operands (rtx operands
[4], bool load_p
, machine_mode mode
)
324 int mem_op
= load_p
? 1 : 0;
325 bool reversed
= false;
326 if (!th_mempair_check_consecutive_mems (mode
,
328 operands
+ mem_op
+ 2,
334 /* Irrespective of whether this is a load or a store,
335 we do the same swap. */
336 std::swap (operands
[0], operands
[2]);
337 std::swap (operands
[1], operands
[3]);
341 /* Similar like riscv_save_reg, but saves two registers to memory
342 and marks the resulting instruction as frame-related. */
345 th_mempair_save_regs (rtx operands
[4])
347 rtx set1
= gen_rtx_SET (operands
[0], operands
[1]);
348 rtx set2
= gen_rtx_SET (operands
[2], operands
[3]);
349 rtx dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (2));
350 rtx insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set1
, set2
)));
351 RTX_FRAME_RELATED_P (insn
) = 1;
353 XVECEXP (dwarf
, 0, 0) = copy_rtx (set1
);
354 XVECEXP (dwarf
, 0, 1) = copy_rtx (set2
);
355 RTX_FRAME_RELATED_P (XVECEXP (dwarf
, 0, 0)) = 1;
356 RTX_FRAME_RELATED_P (XVECEXP (dwarf
, 0, 1)) = 1;
357 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, dwarf
);
360 /* Similar like riscv_restore_reg, but restores two registers from memory
361 and marks the instruction frame-related. */
364 th_mempair_restore_regs (rtx operands
[4])
366 rtx set1
= gen_rtx_SET (operands
[0], operands
[1]);
367 rtx set2
= gen_rtx_SET (operands
[2], operands
[3]);
368 rtx insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set1
, set2
)));
369 RTX_FRAME_RELATED_P (insn
) = 1;
370 add_reg_note (insn
, REG_CFA_RESTORE
, operands
[0]);
371 add_reg_note (insn
, REG_CFA_RESTORE
, operands
[2]);
374 /* Prepare the OPERANDS array to emit a mempair instruction using the
375 provided information. No checks are performed, the resulting array
376 should be validated using th_mempair_operands_p(). */
379 th_mempair_prepare_save_restore_operands (rtx operands
[4],
380 bool load_p
, machine_mode mode
,
381 int regno
, HOST_WIDE_INT offset
,
382 int regno2
, HOST_WIDE_INT offset2
)
384 int reg_op
= load_p
? 0 : 1;
385 int mem_op
= load_p
? 1 : 0;
387 rtx mem1
= plus_constant (mode
, stack_pointer_rtx
, offset
);
388 mem1
= gen_frame_mem (mode
, mem1
);
389 rtx mem2
= plus_constant (mode
, stack_pointer_rtx
, offset2
);
390 mem2
= gen_frame_mem (mode
, mem2
);
392 operands
[reg_op
] = gen_rtx_REG (mode
, regno
);
393 operands
[mem_op
] = mem1
;
394 operands
[2 + reg_op
] = gen_rtx_REG (mode
, regno2
);
395 operands
[2 + mem_op
] = mem2
;
398 /* Emit a mempair instruction to save/restore two registers to/from stack. */
401 th_mempair_save_restore_regs (rtx operands
[4], bool load_p
,
404 gcc_assert (th_mempair_operands_p (operands
, load_p
, mode
));
406 th_mempair_order_operands (operands
, load_p
, mode
);
409 th_mempair_restore_regs (operands
);
411 th_mempair_save_regs (operands
);
414 /* Return true if X can be represented as signed immediate of NBITS bits.
415 The immediate is assumed to be shifted by LSHAMT bits left. */
418 valid_signed_immediate (rtx x
, unsigned nbits
, unsigned lshamt
)
420 if (GET_CODE (x
) != CONST_INT
)
423 HOST_WIDE_INT v
= INTVAL (x
);
425 HOST_WIDE_INT vunshifted
= v
>> lshamt
;
427 /* Make sure we did not shift out any bits. */
428 if (vunshifted
<< lshamt
!= v
)
431 unsigned HOST_WIDE_INT imm_reach
= 1LL << nbits
;
432 return ((unsigned HOST_WIDE_INT
) vunshifted
+ imm_reach
/2 < imm_reach
);
435 /* Return the address RTX of a move to/from memory
439 th_get_move_mem_addr (rtx dest
, rtx src
, bool load
)
448 gcc_assert (GET_CODE (mem
) == MEM
);
449 return XEXP (mem
, 0);
452 /* Return true if X is a valid address for T-Head's memory addressing modes
453 with pre/post modification for machine mode MODE.
454 If it is, fill in INFO appropriately (if non-NULL).
455 If STRICT_P is true then REG_OK_STRICT is in effect. */
458 th_memidx_classify_address_modify (struct riscv_address_info
*info
, rtx x
,
459 machine_mode mode
, bool strict_p
)
461 if (!TARGET_XTHEADMEMIDX
)
464 if (GET_MODE_CLASS (mode
) != MODE_INT
465 || GET_MODE_SIZE (mode
).to_constant () > UNITS_PER_WORD
)
468 if (GET_CODE (x
) != POST_MODIFY
469 && GET_CODE (x
) != PRE_MODIFY
)
472 rtx reg
= XEXP (x
, 0);
473 rtx exp
= XEXP (x
, 1);
474 rtx expreg
= XEXP (exp
, 0);
475 rtx expoff
= XEXP (exp
, 1);
477 if (GET_CODE (exp
) != PLUS
478 || !rtx_equal_p (expreg
, reg
)
479 || !CONST_INT_P (expoff
)
480 || !riscv_valid_base_register_p (reg
, mode
, strict_p
))
483 /* The offset is calculated as (sign_extend(imm5) << imm2) */
484 const int shamt_bits
= 2;
485 for (int shamt
= 0; shamt
< (1 << shamt_bits
); shamt
++)
488 if (valid_signed_immediate (expoff
, nbits
, shamt
))
492 info
->type
= ADDRESS_REG_WB
;
494 info
->offset
= expoff
;
504 /* Return TRUE if X is a MEM with a legitimate modify address. */
507 th_memidx_legitimate_modify_p (rtx x
)
512 /* Get the mode from the MEM and unpack it. */
513 machine_mode mode
= GET_MODE (x
);
516 return th_memidx_classify_address_modify (NULL
, x
, mode
, reload_completed
);
519 /* Return TRUE if X is a MEM with a legitimate modify address
520 and the address is POST_MODIFY (if POST is true) or a PRE_MODIFY
524 th_memidx_legitimate_modify_p (rtx x
, bool post
)
526 if (!th_memidx_legitimate_modify_p (x
))
529 /* Unpack the MEM and check the code. */
532 return GET_CODE (x
) == POST_MODIFY
;
534 return GET_CODE (x
) == PRE_MODIFY
;
537 /* Provide a buffer for a th.lXia/th.lXib/th.sXia/th.sXib instruction
538 for the given MODE. If LOAD is true, a load instruction will be
539 provided (otherwise, a store instruction). If X is not suitable
543 th_memidx_output_modify (rtx dest
, rtx src
, machine_mode mode
, bool load
)
546 rtx output_operands
[2];
547 rtx x
= th_get_move_mem_addr (dest
, src
, load
);
550 if (!th_memidx_classify_address_modify (NULL
, x
, mode
, reload_completed
))
553 int index
= exact_log2 (GET_MODE_SIZE (mode
).to_constant ());
554 bool post
= GET_CODE (x
) == POST_MODIFY
;
556 const char *const insn
[][4] = {
558 "th.sbi%s\t%%z1,%%0",
559 "th.shi%s\t%%z1,%%0",
560 "th.swi%s\t%%z1,%%0",
564 "th.lbui%s\t%%0,%%1",
565 "th.lhui%s\t%%0,%%1",
571 snprintf (format
, sizeof (format
), insn
[load
][index
], post
? "a" : "b");
572 output_operands
[0] = dest
;
573 output_operands
[1] = src
;
574 output_asm_insn (format
, output_operands
);
579 is_memidx_mode (machine_mode mode
)
581 if (mode
== QImode
|| mode
== HImode
|| mode
== SImode
)
584 if (mode
== DImode
&& TARGET_64BIT
)
591 is_fmemidx_mode (machine_mode mode
)
593 if (mode
== SFmode
&& TARGET_HARD_FLOAT
)
596 if (mode
== DFmode
&& TARGET_DOUBLE_FLOAT
)
602 /* Return true if X is a valid address for T-Head's memory addressing modes
603 with scaled register offsets for machine mode MODE.
604 If it is, fill in INFO appropriately (if non-NULL).
605 If STRICT_P is true then REG_OK_STRICT is in effect. */
608 th_memidx_classify_address_index (struct riscv_address_info
*info
, rtx x
,
609 machine_mode mode
, bool strict_p
)
611 /* Ensure that the mode is supported. */
612 if (!(TARGET_XTHEADMEMIDX
&& is_memidx_mode (mode
))
613 && !(TARGET_XTHEADMEMIDX
614 && TARGET_XTHEADFMEMIDX
&& is_fmemidx_mode (mode
)))
617 if (GET_CODE (x
) != PLUS
)
620 rtx op0
= XEXP (x
, 0);
621 rtx op1
= XEXP (x
, 1);
622 enum riscv_address_type type
;
627 if (!riscv_valid_base_register_p (reg
, mode
, strict_p
))
631 if (!riscv_valid_base_register_p (reg
, mode
, strict_p
))
636 if ((REG_P (offset
) || SUBREG_P (offset
))
637 && GET_MODE (offset
) == Xmode
)
639 type
= ADDRESS_REG_REG
;
643 /* (any_extend:DI (reg:SI)) */
644 else if (TARGET_64BIT
645 && (GET_CODE (offset
) == SIGN_EXTEND
646 || GET_CODE (offset
) == ZERO_EXTEND
)
647 && GET_MODE (offset
) == DImode
648 && GET_MODE (XEXP (offset
, 0)) == SImode
)
650 type
= (GET_CODE (offset
) == SIGN_EXTEND
)
651 ? ADDRESS_REG_REG
: ADDRESS_REG_UREG
;
653 offset
= XEXP (offset
, 0);
655 /* (mult:X (reg:X) (const_int scale)) */
656 else if (GET_CODE (offset
) == MULT
657 && GET_MODE (offset
) == Xmode
658 && REG_P (XEXP (offset
, 0))
659 && GET_MODE (XEXP (offset
, 0)) == Xmode
660 && CONST_INT_P (XEXP (offset
, 1))
661 && pow2p_hwi (INTVAL (XEXP (offset
, 1)))
662 && IN_RANGE (exact_log2 (INTVAL (XEXP (offset
, 1))), 1, 3))
664 type
= ADDRESS_REG_REG
;
665 shift
= exact_log2 (INTVAL (XEXP (offset
, 1)));
666 offset
= XEXP (offset
, 0);
668 /* (mult:DI (any_extend:DI (reg:SI)) (const_int scale)) */
669 else if (TARGET_64BIT
670 && GET_CODE (offset
) == MULT
671 && GET_MODE (offset
) == DImode
672 && (GET_CODE (XEXP (offset
, 0)) == SIGN_EXTEND
673 || GET_CODE (XEXP (offset
, 0)) == ZERO_EXTEND
)
674 && GET_MODE (XEXP (offset
, 0)) == DImode
675 && REG_P (XEXP (XEXP (offset
, 0), 0))
676 && GET_MODE (XEXP (XEXP (offset
, 0), 0)) == SImode
677 && CONST_INT_P (XEXP (offset
, 1)))
679 type
= (GET_CODE (XEXP (offset
, 0)) == SIGN_EXTEND
)
680 ? ADDRESS_REG_REG
: ADDRESS_REG_UREG
;
681 shift
= exact_log2 (INTVAL (XEXP (x
, 1)));
682 offset
= XEXP (XEXP (x
, 0), 0);
684 /* (ashift:X (reg:X) (const_int shift)) */
685 else if (GET_CODE (offset
) == ASHIFT
686 && GET_MODE (offset
) == Xmode
687 && REG_P (XEXP (offset
, 0))
688 && GET_MODE (XEXP (offset
, 0)) == Xmode
689 && CONST_INT_P (XEXP (offset
, 1))
690 && IN_RANGE (INTVAL (XEXP (offset
, 1)), 0, 3))
692 type
= ADDRESS_REG_REG
;
693 shift
= INTVAL (XEXP (offset
, 1));
694 offset
= XEXP (offset
, 0);
696 /* (ashift:DI (any_extend:DI (reg:SI)) (const_int shift)) */
697 else if (TARGET_64BIT
698 && GET_CODE (offset
) == ASHIFT
699 && GET_MODE (offset
) == DImode
700 && (GET_CODE (XEXP (offset
, 0)) == SIGN_EXTEND
701 || GET_CODE (XEXP (offset
, 0)) == ZERO_EXTEND
)
702 && GET_MODE (XEXP (offset
, 0)) == DImode
703 && GET_MODE (XEXP (XEXP (offset
, 0), 0)) == SImode
704 && CONST_INT_P (XEXP (offset
, 1))
705 && IN_RANGE(INTVAL (XEXP (offset
, 1)), 0, 3))
707 type
= (GET_CODE (XEXP (offset
, 0)) == SIGN_EXTEND
)
708 ? ADDRESS_REG_REG
: ADDRESS_REG_UREG
;
709 shift
= INTVAL (XEXP (offset
, 1));
710 offset
= XEXP (XEXP (offset
, 0), 0);
712 /* (and:X (mult:X (reg:X) (const_int scale)) (const_int mask)) */
713 else if (TARGET_64BIT
714 && GET_CODE (offset
) == AND
715 && GET_MODE (offset
) == DImode
716 && GET_CODE (XEXP (offset
, 0)) == MULT
717 && GET_MODE (XEXP (offset
, 0)) == DImode
718 && REG_P (XEXP (XEXP (offset
, 0), 0))
719 && GET_MODE (XEXP (XEXP (offset
, 0), 0)) == DImode
720 && CONST_INT_P (XEXP (XEXP (offset
, 0), 1))
721 && pow2p_hwi (INTVAL (XEXP (XEXP (offset
, 0), 1)))
722 && IN_RANGE (exact_log2 (INTVAL (XEXP (XEXP (offset
, 0), 1))), 1, 3)
723 && CONST_INT_P (XEXP (offset
, 1))
724 && INTVAL (XEXP (offset
, 1))
725 >> exact_log2 (INTVAL (XEXP (XEXP (offset
, 0), 1))) == 0xffffffff)
727 type
= ADDRESS_REG_UREG
;
728 shift
= exact_log2 (INTVAL (XEXP (XEXP (offset
, 0), 1)));
729 offset
= XEXP (XEXP (offset
, 0), 0);
734 if (!strict_p
&& SUBREG_P (offset
)
735 && GET_MODE (SUBREG_REG (offset
)) == SImode
)
736 offset
= SUBREG_REG (offset
);
739 || !riscv_regno_mode_ok_for_base_p (REGNO (offset
), mode
, strict_p
))
746 info
->offset
= offset
;
752 /* Return TRUE if X is a MEM with a legitimate indexed address. */
755 th_memidx_legitimate_index_p (rtx x
)
760 /* Get the mode from the MEM and unpack it. */
761 machine_mode mode
= GET_MODE (x
);
764 return th_memidx_classify_address_index (NULL
, x
, mode
, reload_completed
);
767 /* Return TRUE if X is a MEM with a legitimate indexed address
768 and the offset register is zero-extended (if UINDEX is true)
769 or sign-extended (otherwise). */
772 th_memidx_legitimate_index_p (rtx x
, bool uindex
)
777 /* Get the mode from the MEM and unpack it. */
778 machine_mode mode
= GET_MODE (x
);
781 struct riscv_address_info info
;
782 if (!th_memidx_classify_address_index (&info
, x
, mode
, reload_completed
))
786 return info
.type
== ADDRESS_REG_UREG
;
788 return info
.type
== ADDRESS_REG_REG
;
791 /* Provide a buffer for a th.lrX/th.lurX/th.srX/th.surX instruction
792 for the given MODE. If LOAD is true, a load instruction will be
793 provided (otherwise, a store instruction). If X is not suitable
797 th_memidx_output_index (rtx dest
, rtx src
, machine_mode mode
, bool load
)
799 struct riscv_address_info info
;
801 rtx output_operands
[2];
802 rtx x
= th_get_move_mem_addr (dest
, src
, load
);
805 if (!th_memidx_classify_address_index (&info
, x
, mode
, reload_completed
))
808 int index
= exact_log2 (GET_MODE_SIZE (mode
).to_constant ());
809 bool uindex
= info
.type
== ADDRESS_REG_UREG
;
811 const char *const insn
[][4] = {
813 "th.s%srb\t%%z1,%%0",
814 "th.s%srh\t%%z1,%%0",
815 "th.s%srw\t%%z1,%%0",
819 "th.l%srbu\t%%0,%%1",
820 "th.l%srhu\t%%0,%%1",
826 snprintf (format
, sizeof (format
), insn
[load
][index
], uindex
? "u" : "");
827 output_operands
[0] = dest
;
828 output_operands
[1] = src
;
829 output_asm_insn (format
, output_operands
);
833 /* Provide a buffer for a th.flX/th.fluX/th.fsX/th.fsuX instruction
834 for the given MODE. If LOAD is true, a load instruction will be
835 provided (otherwise, a store instruction). If X is not suitable
839 th_fmemidx_output_index (rtx dest
, rtx src
, machine_mode mode
, bool load
)
841 struct riscv_address_info info
;
843 rtx output_operands
[2];
844 rtx x
= th_get_move_mem_addr (dest
, src
, load
);
847 if (!th_memidx_classify_address_index (&info
, x
, mode
, reload_completed
))
850 int index
= exact_log2 (GET_MODE_SIZE (mode
).to_constant ()) - 2;
851 bool uindex
= info
.type
== ADDRESS_REG_UREG
;
853 const char *const insn
[][2] = {
855 "th.fs%srw\t%%z1,%%0",
856 "th.fs%srd\t%%z1,%%0"
859 "th.fl%srw\t%%0,%%1",
864 snprintf (format
, sizeof (format
), insn
[load
][index
], uindex
? "u" : "");
865 output_operands
[0] = dest
;
866 output_operands
[1] = src
;
867 output_asm_insn (format
, output_operands
);
871 /* Return true if X is a valid address for T-Head's memory addressing modes
872 for machine mode MODE. If it is, fill in INFO appropriately (if non-NULL).
873 If STRICT_P is true then REG_OK_STRICT is in effect. */
876 th_classify_address (struct riscv_address_info
*info
, rtx x
,
877 machine_mode mode
, bool strict_p
)
879 switch (GET_CODE (x
))
882 if (th_memidx_classify_address_index (info
, x
, mode
, strict_p
))
888 if (th_memidx_classify_address_modify (info
, x
, mode
, strict_p
))
899 /* Provide a string containing a XTheadMemIdx instruction for the given
900 MODE from the provided SRC to the provided DEST.
901 A pointer to a NULL-terminated string containing the instruction will
902 be returned if a suitable instruction is available. Otherwise, this
903 function returns NULL. */
906 th_output_move (rtx dest
, rtx src
)
908 enum rtx_code dest_code
, src_code
;
910 const char *insn
= NULL
;
912 dest_code
= GET_CODE (dest
);
913 src_code
= GET_CODE (src
);
914 mode
= GET_MODE (dest
);
916 if (!(mode
== GET_MODE (src
) || src
== CONST0_RTX (mode
)))
919 if (dest_code
== REG
&& src_code
== MEM
)
921 if (GET_MODE_CLASS (mode
) == MODE_INT
922 || (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GP_REG_P (REGNO (dest
))))
924 if ((insn
= th_memidx_output_index (dest
, src
, mode
, true)))
926 if ((insn
= th_memidx_output_modify (dest
, src
, mode
, true)))
929 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& HARDFP_REG_P (REGNO (dest
)))
931 if ((insn
= th_fmemidx_output_index (dest
, src
, mode
, true)))
935 else if (dest_code
== MEM
&& (src_code
== REG
|| src
== CONST0_RTX (mode
)))
937 if (GET_MODE_CLASS (mode
) == MODE_INT
938 || src
== CONST0_RTX (mode
)
939 || (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GP_REG_P (REGNO (src
))))
941 if ((insn
= th_memidx_output_index (dest
, src
, mode
, false)))
943 if ((insn
= th_memidx_output_modify (dest
, src
, mode
, false)))
946 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& HARDFP_REG_P (REGNO (src
)))
948 if ((insn
= th_fmemidx_output_index (dest
, src
, mode
, false)))
955 /* Define ASM_OUTPUT_OPCODE to do anything special before
956 emitting an opcode. */
958 th_asm_output_opcode (FILE *asm_out_file
, const char *p
)
960 /* We need to add th. prefix to all the xtheadvector
962 if (current_output_insn
!= NULL
)
964 if (get_attr_type (current_output_insn
) == TYPE_VSETVL
)
966 if (strstr (p
, "zero"))
968 if (strstr (p
, "zero,zero"))
969 return "th.vsetvli\tzero,zero,e%0,%m1";
971 return "th.vsetvli\tzero,%z0,e%1,%m2";
975 return "th.vsetvli\t%z0,%z1,e%2,%m3";
979 if (get_attr_type (current_output_insn
) == TYPE_VLDE
||
980 get_attr_type (current_output_insn
) == TYPE_VSTE
||
981 get_attr_type (current_output_insn
) == TYPE_VLDFF
)
983 if (strstr (p
, "e8") || strstr (p
, "e16") ||
984 strstr (p
, "e32") || strstr (p
, "e64"))
986 get_attr_type (current_output_insn
) == TYPE_VSTE
987 ? fputs ("th.vse", asm_out_file
)
988 : fputs ("th.vle", asm_out_file
);
989 if (strstr (p
, "e8"))
996 if (get_attr_type (current_output_insn
) == TYPE_VLDS
||
997 get_attr_type (current_output_insn
) == TYPE_VSTS
)
999 if (strstr (p
, "vle8") || strstr (p
, "vse8") ||
1000 strstr (p
, "vle16") || strstr (p
, "vse16") ||
1001 strstr (p
, "vle32") || strstr (p
, "vse32") ||
1002 strstr (p
, "vle64") || strstr (p
, "vse64"))
1004 get_attr_type (current_output_insn
) == TYPE_VSTS
1005 ? fputs ("th.vse", asm_out_file
)
1006 : fputs ("th.vle", asm_out_file
);
1007 if (strstr (p
, "e8"))
1012 else if (strstr (p
, "vlse8") || strstr (p
, "vsse8") ||
1013 strstr (p
, "vlse16") || strstr (p
, "vsse16") ||
1014 strstr (p
, "vlse32") || strstr (p
, "vsse32") ||
1015 strstr (p
, "vlse64") || strstr (p
, "vsse64"))
1017 get_attr_type (current_output_insn
) == TYPE_VSTS
1018 ? fputs ("th.vsse", asm_out_file
)
1019 : fputs ("th.vlse", asm_out_file
);
1020 if (strstr (p
, "e8"))
1027 if (get_attr_type (current_output_insn
) == TYPE_VLDUX
||
1028 get_attr_type (current_output_insn
) == TYPE_VLDOX
)
1030 if (strstr (p
, "ei"))
1032 fputs ("th.vlxe", asm_out_file
);
1033 if (strstr (p
, "ei8"))
1040 if (get_attr_type (current_output_insn
) == TYPE_VSTUX
||
1041 get_attr_type (current_output_insn
) == TYPE_VSTOX
)
1043 if (strstr (p
, "ei"))
1045 get_attr_type (current_output_insn
) == TYPE_VSTUX
1046 ? fputs ("th.vsuxe", asm_out_file
)
1047 : fputs ("th.vsxe", asm_out_file
);
1048 if (strstr (p
, "ei8"))
1055 if (get_attr_type (current_output_insn
) == TYPE_VLSEGDE
||
1056 get_attr_type (current_output_insn
) == TYPE_VSSEGTE
||
1057 get_attr_type (current_output_insn
) == TYPE_VLSEGDFF
)
1059 get_attr_type (current_output_insn
) == TYPE_VSSEGTE
1060 ? fputs ("th.vsseg", asm_out_file
)
1061 : fputs ("th.vlseg", asm_out_file
);
1062 asm_fprintf (asm_out_file
, "%c", p
[5]);
1063 fputs ("e", asm_out_file
);
1064 if (strstr (p
, "e8"))
1070 if (get_attr_type (current_output_insn
) == TYPE_VLSEGDS
||
1071 get_attr_type (current_output_insn
) == TYPE_VSSEGTS
)
1073 get_attr_type (current_output_insn
) == TYPE_VSSEGTS
1074 ? fputs ("th.vssseg", asm_out_file
)
1075 : fputs ("th.vlsseg", asm_out_file
);
1076 asm_fprintf (asm_out_file
, "%c", p
[6]);
1077 fputs ("e", asm_out_file
);
1078 if (strstr (p
, "e8"))
1084 if (get_attr_type (current_output_insn
) == TYPE_VLSEGDUX
||
1085 get_attr_type (current_output_insn
) == TYPE_VLSEGDOX
)
1087 fputs ("th.vlxseg", asm_out_file
);
1088 asm_fprintf (asm_out_file
, "%c", p
[7]);
1089 fputs ("e", asm_out_file
);
1090 if (strstr (p
, "ei8"))
1096 if (get_attr_type (current_output_insn
) == TYPE_VSSEGTUX
||
1097 get_attr_type (current_output_insn
) == TYPE_VSSEGTOX
)
1099 fputs ("th.vsxseg", asm_out_file
);
1100 asm_fprintf (asm_out_file
, "%c", p
[7]);
1101 fputs ("e", asm_out_file
);
1102 if (strstr (p
, "ei8"))
1108 if (get_attr_type (current_output_insn
) == TYPE_VNSHIFT
)
1110 if (strstr (p
, "vncvt"))
1112 fputs ("th.vncvt.x.x.v", asm_out_file
);
1116 strstr (p
, "vnsrl") ? fputs ("th.vnsrl.v", asm_out_file
)
1117 : fputs ("th.vnsra.v", asm_out_file
);
1121 if (get_attr_type (current_output_insn
) == TYPE_VNCLIP
)
1123 if (strstr (p
, "vnclipu"))
1125 fputs ("th.vnclipu.v", asm_out_file
);
1130 fputs ("th.vnclip.v", asm_out_file
);
1135 if (get_attr_type (current_output_insn
) == TYPE_VMPOP
)
1137 fputs ("th.vmpopc", asm_out_file
);
1141 if (get_attr_type (current_output_insn
) == TYPE_VMFFS
)
1143 fputs ("th.vmfirst", asm_out_file
);
1147 if (get_attr_type (current_output_insn
) == TYPE_VFNCVTFTOI
||
1148 get_attr_type (current_output_insn
) == TYPE_VFNCVTITOF
)
1150 if (strstr (p
, "xu"))
1152 get_attr_type (current_output_insn
) == TYPE_VFNCVTFTOI
1153 ? fputs ("th.vfncvt.xu.f.v", asm_out_file
)
1154 : fputs ("th.vfncvt.f.xu.v", asm_out_file
);
1159 get_attr_type (current_output_insn
) == TYPE_VFNCVTFTOI
1160 ? fputs ("th.vfncvt.x.f.v", asm_out_file
)
1161 : fputs ("th.vfncvt.f.x.v", asm_out_file
);
1166 if (get_attr_type (current_output_insn
) == TYPE_VFNCVTFTOF
)
1168 fputs ("th.vfncvt.f.f.v", asm_out_file
);
1172 if (get_attr_type (current_output_insn
) == TYPE_VFREDU
1173 && strstr (p
, "sum"))
1175 fputs ("th.vfredsum", asm_out_file
);
1179 if (get_attr_type (current_output_insn
) == TYPE_VFWREDU
1180 && strstr (p
, "sum"))
1182 fputs ("th.vfwredsum", asm_out_file
);
1187 fputs ("th.", asm_out_file
);
1193 /* Implement TARGET_PRINT_OPERAND_ADDRESS for XTheadMemIdx. */
1196 th_print_operand_address (FILE *file
, machine_mode mode
, rtx x
)
1198 struct riscv_address_info addr
;
1200 if (!th_classify_address (&addr
, x
, mode
, reload_completed
))
1205 case ADDRESS_REG_REG
:
1206 case ADDRESS_REG_UREG
:
1207 fprintf (file
, "%s,%s,%u", reg_names
[REGNO (addr
.reg
)],
1208 reg_names
[REGNO (addr
.offset
)], addr
.shift
);
1211 case ADDRESS_REG_WB
:
1212 fprintf (file
, "(%s)," HOST_WIDE_INT_PRINT_DEC
",%u",
1213 reg_names
[REGNO (addr
.reg
)],
1214 INTVAL (addr
.offset
) >> addr
.shift
, addr
.shift
);
1224 /* Number array of registers X1, X5-X7, X10-X17, X28-X31, to be
1225 operated on by instruction th.ipush/th.ipop in XTheadInt. */
1227 int th_int_regs
[] ={
1229 T0_REGNUM
, T1_REGNUM
, T2_REGNUM
,
1230 A0_REGNUM
, A1_REGNUM
, A2_REGNUM
, A3_REGNUM
,
1231 A4_REGNUM
, A5_REGNUM
, A6_REGNUM
, A7_REGNUM
,
1232 T3_REGNUM
, T4_REGNUM
, T5_REGNUM
, T6_REGNUM
,
1235 /* If MASK contains registers X1, X5-X7, X10-X17, X28-X31, then
1236 return the mask composed of these registers, otherwise return
1240 th_int_get_mask (unsigned int mask
)
1242 unsigned int xtheadint_mask
= 0;
1244 if (!TARGET_XTHEADINT
|| TARGET_64BIT
)
1247 for (unsigned int i
= 0; i
< ARRAY_SIZE (th_int_regs
); i
++)
1249 if (!BITSET_P (mask
, th_int_regs
[i
]))
1252 xtheadint_mask
|= (1 << th_int_regs
[i
]);
1255 return xtheadint_mask
; /* Usually 0xf003fce2. */
1258 /* Returns the occupied frame needed to save registers X1, X5-X7,
1259 X10-X17, X28-X31. */
1262 th_int_get_save_adjustment (void)
1264 gcc_assert (TARGET_XTHEADINT
&& !TARGET_64BIT
);
1265 return ARRAY_SIZE (th_int_regs
) * UNITS_PER_WORD
;
1269 th_int_adjust_cfi_prologue (unsigned int mask
)
1271 gcc_assert (TARGET_XTHEADINT
&& !TARGET_64BIT
);
1273 rtx dwarf
= NULL_RTX
;
1274 rtx adjust_sp_rtx
, reg
, mem
, insn
;
1275 int saved_size
= ARRAY_SIZE (th_int_regs
) * UNITS_PER_WORD
;
1276 int offset
= saved_size
;
1278 for (int regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
1279 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
1281 offset
-= UNITS_PER_WORD
;
1282 reg
= gen_rtx_REG (SImode
, regno
);
1283 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
1287 insn
= gen_rtx_SET (mem
, reg
);
1288 dwarf
= alloc_reg_note (REG_CFA_OFFSET
, insn
, dwarf
);
1291 /* Debug info for adjust sp. */
1293 gen_rtx_SET (stack_pointer_rtx
,
1294 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1295 stack_pointer_rtx
, GEN_INT (-saved_size
)));
1296 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, adjust_sp_rtx
, dwarf
);