1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
2 ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
3 ; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
4 ; RUN: | FileCheck -check-prefix=RV64I %s
6 ; This file contains tests that should have identical output for the lp64 and
9 define i64 @callee_double_in_regs(i64 %a, double %b) nounwind {
10 ; RV64I-LABEL: name: callee_double_in_regs
11 ; RV64I: bb.1 (%ir-block.0):
12 ; RV64I-NEXT: liveins: $x10, $f10_d
14 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
15 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
16 ; RV64I-NEXT: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY1]](s64)
17 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
18 ; RV64I-NEXT: $x10 = COPY [[ADD]](s64)
19 ; RV64I-NEXT: PseudoRET implicit $x10
20 %b_fptosi = fptosi double %b to i64
21 %1 = add i64 %a, %b_fptosi
25 define i64 @caller_double_in_regs() nounwind {
26 ; RV64I-LABEL: name: caller_double_in_regs
27 ; RV64I: bb.1 (%ir-block.0):
28 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
29 ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
30 ; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
31 ; RV64I-NEXT: $x10 = COPY [[C]](s64)
32 ; RV64I-NEXT: $f10_d = COPY [[C1]](s64)
33 ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
34 ; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
35 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
36 ; RV64I-NEXT: $x10 = COPY [[COPY]](s64)
37 ; RV64I-NEXT: PseudoRET implicit $x10
38 %1 = call i64 @callee_double_in_regs(i64 1, double 2.0)
42 ; Must keep define on a single line due to an update_llc_test_checks.py limitation
43 define i64 @callee_double_in_fpr_exhausted_gprs(i128 %a, i128 %b, i128 %c, i128 %d, i64 %e, double %f) nounwind {
44 ; RV64I-LABEL: name: callee_double_in_fpr_exhausted_gprs
45 ; RV64I: bb.1 (%ir-block.0):
46 ; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_d
48 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
49 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
50 ; RV64I-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
51 ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
52 ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
53 ; RV64I-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
54 ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
55 ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
56 ; RV64I-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY4]](s64), [[COPY5]](s64)
57 ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
58 ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
59 ; RV64I-NEXT: [[MV3:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY6]](s64), [[COPY7]](s64)
60 ; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
61 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
62 ; RV64I-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $f10_d
63 ; RV64I-NEXT: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY8]](s64)
64 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[FPTOSI]]
65 ; RV64I-NEXT: $x10 = COPY [[ADD]](s64)
66 ; RV64I-NEXT: PseudoRET implicit $x10
67 %f_fptosi = fptosi double %f to i64
68 %1 = add i64 %e, %f_fptosi
72 define i64 @caller_double_in_fpr_exhausted_gprs() nounwind {
73 ; RV64I-LABEL: name: caller_double_in_fpr_exhausted_gprs
74 ; RV64I: bb.1 (%ir-block.0):
75 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 1
76 ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
77 ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s128) = G_CONSTANT i128 3
78 ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 4
79 ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
80 ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
81 ; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
82 ; RV64I-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s128)
83 ; RV64I-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C1]](s128)
84 ; RV64I-NEXT: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C2]](s128)
85 ; RV64I-NEXT: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C3]](s128)
86 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
87 ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
88 ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
89 ; RV64I-NEXT: G_STORE [[C4]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
90 ; RV64I-NEXT: $x10 = COPY [[UV]](s64)
91 ; RV64I-NEXT: $x11 = COPY [[UV1]](s64)
92 ; RV64I-NEXT: $x12 = COPY [[UV2]](s64)
93 ; RV64I-NEXT: $x13 = COPY [[UV3]](s64)
94 ; RV64I-NEXT: $x14 = COPY [[UV4]](s64)
95 ; RV64I-NEXT: $x15 = COPY [[UV5]](s64)
96 ; RV64I-NEXT: $x16 = COPY [[UV6]](s64)
97 ; RV64I-NEXT: $x17 = COPY [[UV7]](s64)
98 ; RV64I-NEXT: $f10_d = COPY [[C5]](s64)
99 ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_d, implicit-def $x10
100 ; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
101 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
102 ; RV64I-NEXT: $x10 = COPY [[COPY1]](s64)
103 ; RV64I-NEXT: PseudoRET implicit $x10
104 %1 = call i64 @callee_double_in_fpr_exhausted_gprs(
105 i128 1, i128 2, i128 3, i128 4, i64 5, double 6.0)
109 ; Must keep define on a single line due to an update_llc_test_checks.py limitation
110 define i32 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind {
111 ; RV64I-LABEL: name: callee_double_in_gpr_exhausted_fprs
112 ; RV64I: bb.1 (%ir-block.0):
113 ; RV64I-NEXT: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d, $f14_d, $f15_d, $f16_d, $f17_d
115 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
116 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f11_d
117 ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $f12_d
118 ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $f13_d
119 ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $f14_d
120 ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $f15_d
121 ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $f16_d
122 ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $f17_d
123 ; RV64I-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
124 ; RV64I-NEXT: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY7]](s64)
125 ; RV64I-NEXT: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY8]](s64)
126 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[FPTOSI]], [[FPTOSI1]]
127 ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
128 ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
129 ; RV64I-NEXT: PseudoRET implicit $x10
130 %h_fptosi = fptosi double %h to i32
131 %i_fptosi = fptosi double %i to i32
132 %1 = add i32 %h_fptosi, %i_fptosi
136 define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
137 ; RV64I-LABEL: name: caller_double_in_gpr_exhausted_fprs
138 ; RV64I: bb.1 (%ir-block.0):
139 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
140 ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
141 ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 3.000000e+00
142 ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
143 ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e+00
144 ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
145 ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_FCONSTANT double 7.000000e+00
146 ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_FCONSTANT double 8.000000e+00
147 ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 9.000000e+00
148 ; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
149 ; RV64I-NEXT: $f10_d = COPY [[C]](s64)
150 ; RV64I-NEXT: $f11_d = COPY [[C1]](s64)
151 ; RV64I-NEXT: $f12_d = COPY [[C2]](s64)
152 ; RV64I-NEXT: $f13_d = COPY [[C3]](s64)
153 ; RV64I-NEXT: $f14_d = COPY [[C4]](s64)
154 ; RV64I-NEXT: $f15_d = COPY [[C5]](s64)
155 ; RV64I-NEXT: $f16_d = COPY [[C6]](s64)
156 ; RV64I-NEXT: $f17_d = COPY [[C7]](s64)
157 ; RV64I-NEXT: $x10 = COPY [[C8]](s64)
158 ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit-def $x10
159 ; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
160 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
161 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
162 ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
163 ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
164 ; RV64I-NEXT: PseudoRET implicit $x10
165 %1 = call i32 @callee_double_in_gpr_exhausted_fprs(
166 double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
167 double 7.0, double 8.0, double 9.0)
171 ; Must keep define on a single line due to an update_llc_test_checks.py limitation
172 define i64 @callee_double_on_stack_exhausted_gprs_fprs(i128 %a, double %b, i128 %c, double %d, i128 %e, double %f, i128 %g, double %h, double %i, double %j, double %k, double %l, double %m) nounwind {
173 ; RV64I-LABEL: name: callee_double_on_stack_exhausted_gprs_fprs
174 ; RV64I: bb.1 (%ir-block.0):
175 ; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $f10_d, $f11_d, $f12_d, $f13_d, $f14_d, $f15_d, $f16_d, $f17_d
177 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
178 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
179 ; RV64I-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
180 ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $f10_d
181 ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x12
182 ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x13
183 ; RV64I-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY3]](s64), [[COPY4]](s64)
184 ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $f11_d
185 ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x14
186 ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x15
187 ; RV64I-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY6]](s64), [[COPY7]](s64)
188 ; RV64I-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $f12_d
189 ; RV64I-NEXT: [[COPY9:%[0-9]+]]:_(s64) = COPY $x16
190 ; RV64I-NEXT: [[COPY10:%[0-9]+]]:_(s64) = COPY $x17
191 ; RV64I-NEXT: [[MV3:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY9]](s64), [[COPY10]](s64)
192 ; RV64I-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY $f13_d
193 ; RV64I-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY $f14_d
194 ; RV64I-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY $f15_d
195 ; RV64I-NEXT: [[COPY14:%[0-9]+]]:_(s64) = COPY $f16_d
196 ; RV64I-NEXT: [[COPY15:%[0-9]+]]:_(s64) = COPY $f17_d
197 ; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
198 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.0, align 16)
199 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[MV3]](s128)
200 ; RV64I-NEXT: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[LOAD]](s64)
201 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[TRUNC]], [[FPTOSI]]
202 ; RV64I-NEXT: $x10 = COPY [[ADD]](s64)
203 ; RV64I-NEXT: PseudoRET implicit $x10
204 %g_trunc = trunc i128 %g to i64
205 %m_fptosi = fptosi double %m to i64
206 %1 = add i64 %g_trunc, %m_fptosi
210 define i64 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
211 ; RV64I-LABEL: name: caller_double_on_stack_exhausted_gprs_fprs
212 ; RV64I: bb.1 (%ir-block.0):
213 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 1
214 ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
215 ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s128) = G_CONSTANT i128 3
216 ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
217 ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s128) = G_CONSTANT i128 5
218 ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
219 ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 7
220 ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_FCONSTANT double 8.000000e+00
221 ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 9.000000e+00
222 ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+01
223 ; RV64I-NEXT: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.100000e+01
224 ; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.200000e+01
225 ; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.300000e+01
226 ; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
227 ; RV64I-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s128)
228 ; RV64I-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C2]](s128)
229 ; RV64I-NEXT: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C4]](s128)
230 ; RV64I-NEXT: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C6]](s128)
231 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
232 ; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
233 ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
234 ; RV64I-NEXT: G_STORE [[C12]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
235 ; RV64I-NEXT: $x10 = COPY [[UV]](s64)
236 ; RV64I-NEXT: $x11 = COPY [[UV1]](s64)
237 ; RV64I-NEXT: $f10_d = COPY [[C1]](s64)
238 ; RV64I-NEXT: $x12 = COPY [[UV2]](s64)
239 ; RV64I-NEXT: $x13 = COPY [[UV3]](s64)
240 ; RV64I-NEXT: $f11_d = COPY [[C3]](s64)
241 ; RV64I-NEXT: $x14 = COPY [[UV4]](s64)
242 ; RV64I-NEXT: $x15 = COPY [[UV5]](s64)
243 ; RV64I-NEXT: $f12_d = COPY [[C5]](s64)
244 ; RV64I-NEXT: $x16 = COPY [[UV6]](s64)
245 ; RV64I-NEXT: $x17 = COPY [[UV7]](s64)
246 ; RV64I-NEXT: $f13_d = COPY [[C7]](s64)
247 ; RV64I-NEXT: $f14_d = COPY [[C8]](s64)
248 ; RV64I-NEXT: $f15_d = COPY [[C9]](s64)
249 ; RV64I-NEXT: $f16_d = COPY [[C10]](s64)
250 ; RV64I-NEXT: $f17_d = COPY [[C11]](s64)
251 ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $x17, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit-def $x10
252 ; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
253 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
254 ; RV64I-NEXT: $x10 = COPY [[COPY1]](s64)
255 ; RV64I-NEXT: PseudoRET implicit $x10
256 %1 = call i64 @callee_double_on_stack_exhausted_gprs_fprs(
257 i128 1, double 2.0, i128 3, double 4.0, i128 5, double 6.0, i128 7, double 8.0,
258 double 9.0, double 10.0, double 11.0, double 12.0, double 13.0)
262 define double @callee_double_ret() nounwind {
263 ; RV64I-LABEL: name: callee_double_ret
264 ; RV64I: bb.1 (%ir-block.0):
265 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
266 ; RV64I-NEXT: $f10_d = COPY [[C]](s64)
267 ; RV64I-NEXT: PseudoRET implicit $f10_d
271 define i64 @caller_double_ret() nounwind {
272 ; RV64I-LABEL: name: caller_double_ret
273 ; RV64I: bb.1 (%ir-block.0):
274 ; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
275 ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_d
276 ; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
277 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
278 ; RV64I-NEXT: $x10 = COPY [[COPY]](s64)
279 ; RV64I-NEXT: PseudoRET implicit $x10
280 %1 = call double @callee_double_ret()
281 %2 = bitcast double %1 to i64