1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
2 ; RUN: llc -mtriple=riscv64 \
3 ; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
4 ; RUN: | FileCheck -check-prefixes=RV64I,LP64 %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f \
6 ; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
7 ; RUN: | FileCheck -check-prefixes=RV64I,LP64F %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
9 ; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
10 ; RUN: | FileCheck -check-prefixes=RV64I,LP64D %s
12 ; This file contains tests that should have identical output for the lp64,
13 ; lp64f, and lp64d ABIs. i.e. where no arguments are passed according to
14 ; the floating point ABI.
16 ; Check that on RV64, i128 is passed in a pair of registers. Unlike
17 ; the convention for varargs, this need not be an aligned pair.
19 define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind {
20 ; RV64I-LABEL: name: callee_i128_in_regs
21 ; RV64I: bb.1 (%ir-block.0):
22 ; RV64I-NEXT: liveins: $x10, $x11, $x12
24 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
25 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
26 ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
27 ; RV64I-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s64), [[COPY2]](s64)
28 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[MV]](s128)
29 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[TRUNC]]
30 ; RV64I-NEXT: $x10 = COPY [[ADD]](s64)
31 ; RV64I-NEXT: PseudoRET implicit $x10
32 %b_trunc = trunc i128 %b to i64
33 %1 = add i64 %a, %b_trunc
37 define i64 @caller_i128_in_regs() nounwind {
38 ; LP64-LABEL: name: caller_i128_in_regs
39 ; LP64: bb.1 (%ir-block.0):
40 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
41 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
42 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
43 ; LP64-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C1]](s128)
44 ; LP64-NEXT: $x10 = COPY [[C]](s64)
45 ; LP64-NEXT: $x11 = COPY [[UV]](s64)
46 ; LP64-NEXT: $x12 = COPY [[UV1]](s64)
47 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
48 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
49 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
50 ; LP64-NEXT: $x10 = COPY [[COPY]](s64)
51 ; LP64-NEXT: PseudoRET implicit $x10
53 ; LP64F-LABEL: name: caller_i128_in_regs
54 ; LP64F: bb.1 (%ir-block.0):
55 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
56 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
57 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
58 ; LP64F-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C1]](s128)
59 ; LP64F-NEXT: $x10 = COPY [[C]](s64)
60 ; LP64F-NEXT: $x11 = COPY [[UV]](s64)
61 ; LP64F-NEXT: $x12 = COPY [[UV1]](s64)
62 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
63 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
64 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
65 ; LP64F-NEXT: $x10 = COPY [[COPY]](s64)
66 ; LP64F-NEXT: PseudoRET implicit $x10
68 ; LP64D-LABEL: name: caller_i128_in_regs
69 ; LP64D: bb.1 (%ir-block.0):
70 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
71 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
72 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
73 ; LP64D-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C1]](s128)
74 ; LP64D-NEXT: $x10 = COPY [[C]](s64)
75 ; LP64D-NEXT: $x11 = COPY [[UV]](s64)
76 ; LP64D-NEXT: $x12 = COPY [[UV1]](s64)
77 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
78 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
79 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
80 ; LP64D-NEXT: $x10 = COPY [[COPY]](s64)
81 ; LP64D-NEXT: PseudoRET implicit $x10
82 %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
86 ; Check the correct handling of passing of values that are larger that 2*XLen.
88 define i32 @caller_i256_indirect_reference_in_stack() {
89 ; LP64-LABEL: name: caller_i256_indirect_reference_in_stack
90 ; LP64: bb.1 (%ir-block.0):
91 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
92 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
93 ; LP64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
94 ; LP64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
95 ; LP64-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
96 ; LP64-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
97 ; LP64-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
98 ; LP64-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
99 ; LP64-NEXT: [[C8:%[0-9]+]]:_(s256) = G_CONSTANT i256 42
100 ; LP64-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
101 ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
102 ; LP64-NEXT: G_STORE [[C8]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16)
103 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
104 ; LP64-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
105 ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
106 ; LP64-NEXT: G_STORE [[FRAME_INDEX]](p0), [[PTR_ADD]](p0) :: (store (p0) into stack, align 16)
107 ; LP64-NEXT: $x10 = COPY [[C]](s64)
108 ; LP64-NEXT: $x11 = COPY [[C1]](s64)
109 ; LP64-NEXT: $x12 = COPY [[C2]](s64)
110 ; LP64-NEXT: $x13 = COPY [[C3]](s64)
111 ; LP64-NEXT: $x14 = COPY [[C4]](s64)
112 ; LP64-NEXT: $x15 = COPY [[C5]](s64)
113 ; LP64-NEXT: $x16 = COPY [[C6]](s64)
114 ; LP64-NEXT: $x17 = COPY [[C7]](s64)
115 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_i256_indirect_reference_in_stack, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
116 ; LP64-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
117 ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
118 ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
119 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
120 ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
121 ; LP64-NEXT: PseudoRET implicit $x10
123 ; LP64F-LABEL: name: caller_i256_indirect_reference_in_stack
124 ; LP64F: bb.1 (%ir-block.0):
125 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
126 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
127 ; LP64F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
128 ; LP64F-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
129 ; LP64F-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
130 ; LP64F-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
131 ; LP64F-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
132 ; LP64F-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
133 ; LP64F-NEXT: [[C8:%[0-9]+]]:_(s256) = G_CONSTANT i256 42
134 ; LP64F-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
135 ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
136 ; LP64F-NEXT: G_STORE [[C8]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16)
137 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
138 ; LP64F-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
139 ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
140 ; LP64F-NEXT: G_STORE [[FRAME_INDEX]](p0), [[PTR_ADD]](p0) :: (store (p0) into stack, align 16)
141 ; LP64F-NEXT: $x10 = COPY [[C]](s64)
142 ; LP64F-NEXT: $x11 = COPY [[C1]](s64)
143 ; LP64F-NEXT: $x12 = COPY [[C2]](s64)
144 ; LP64F-NEXT: $x13 = COPY [[C3]](s64)
145 ; LP64F-NEXT: $x14 = COPY [[C4]](s64)
146 ; LP64F-NEXT: $x15 = COPY [[C5]](s64)
147 ; LP64F-NEXT: $x16 = COPY [[C6]](s64)
148 ; LP64F-NEXT: $x17 = COPY [[C7]](s64)
149 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_i256_indirect_reference_in_stack, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
150 ; LP64F-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
151 ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
152 ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
153 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
154 ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
155 ; LP64F-NEXT: PseudoRET implicit $x10
157 ; LP64D-LABEL: name: caller_i256_indirect_reference_in_stack
158 ; LP64D: bb.1 (%ir-block.0):
159 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
160 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
161 ; LP64D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
162 ; LP64D-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
163 ; LP64D-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
164 ; LP64D-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
165 ; LP64D-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
166 ; LP64D-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
167 ; LP64D-NEXT: [[C8:%[0-9]+]]:_(s256) = G_CONSTANT i256 42
168 ; LP64D-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
169 ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
170 ; LP64D-NEXT: G_STORE [[C8]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16)
171 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
172 ; LP64D-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
173 ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
174 ; LP64D-NEXT: G_STORE [[FRAME_INDEX]](p0), [[PTR_ADD]](p0) :: (store (p0) into stack, align 16)
175 ; LP64D-NEXT: $x10 = COPY [[C]](s64)
176 ; LP64D-NEXT: $x11 = COPY [[C1]](s64)
177 ; LP64D-NEXT: $x12 = COPY [[C2]](s64)
178 ; LP64D-NEXT: $x13 = COPY [[C3]](s64)
179 ; LP64D-NEXT: $x14 = COPY [[C4]](s64)
180 ; LP64D-NEXT: $x15 = COPY [[C5]](s64)
181 ; LP64D-NEXT: $x16 = COPY [[C6]](s64)
182 ; LP64D-NEXT: $x17 = COPY [[C7]](s64)
183 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_i256_indirect_reference_in_stack, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
184 ; LP64D-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
185 ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
186 ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
187 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
188 ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
189 ; LP64D-NEXT: PseudoRET implicit $x10
190 %1 = call i64 @callee_i256_indirect_reference_in_stack(i64 1, i64 2, i64 3, i64 4,i64 5,i64 6,i64 7,i64 8, i256 42)
191 %2 = trunc i64 %1 to i32
196 define i64 @callee_i256_indirect_reference_in_stack(i64 %x1, i64 %x2, i64 %x3, i64 %x4, i64 %x5, i64 %x6, i64 %x7, i64 %x8, i256 %y) {
197 ; RV64I-LABEL: name: callee_i256_indirect_reference_in_stack
198 ; RV64I: bb.1 (%ir-block.0):
199 ; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
201 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
202 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
203 ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
204 ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
205 ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
206 ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
207 ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
208 ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
209 ; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
210 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 16)
211 ; RV64I-NEXT: [[LOAD1:%[0-9]+]]:_(s256) = G_LOAD [[LOAD]](p0) :: (load (s256), align 16)
212 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[LOAD1]](s256)
213 ; RV64I-NEXT: $x10 = COPY [[TRUNC]](s64)
214 ; RV64I-NEXT: PseudoRET implicit $x10
215 %2 = trunc i256 %y to i64
220 define i64 @callee_i256_indirect_reference_in_reg(i256 %x, i256 %y) {
221 ; RV64I-LABEL: name: callee_i256_indirect_reference_in_reg
222 ; RV64I: bb.1 (%ir-block.0):
223 ; RV64I-NEXT: liveins: $x10, $x11
225 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
226 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s256) = G_LOAD [[COPY]](p0) :: (load (s256), align 16)
227 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
228 ; RV64I-NEXT: [[LOAD1:%[0-9]+]]:_(s256) = G_LOAD [[COPY1]](p0) :: (load (s256), align 16)
229 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s256) = G_ADD [[LOAD]], [[LOAD1]]
230 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[ADD]](s256)
231 ; RV64I-NEXT: $x10 = COPY [[TRUNC]](s64)
232 ; RV64I-NEXT: PseudoRET implicit $x10
234 %2 = trunc i256 %1 to i64
238 define i32 @caller_i256_indirect_reference_in_reg() {
239 ; LP64-LABEL: name: caller_i256_indirect_reference_in_reg
240 ; LP64: bb.1 (%ir-block.0):
241 ; LP64-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
242 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s256) = G_CONSTANT i256 2
243 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
244 ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
245 ; LP64-NEXT: G_STORE [[C]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16)
246 ; LP64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
247 ; LP64-NEXT: G_STORE [[C1]](s256), [[FRAME_INDEX1]](p0) :: (store (s256) into %stack.1, align 16)
248 ; LP64-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
249 ; LP64-NEXT: $x11 = COPY [[FRAME_INDEX1]](p0)
250 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_i256_indirect_reference_in_reg, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
251 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
252 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
253 ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
254 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
255 ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
256 ; LP64-NEXT: PseudoRET implicit $x10
258 ; LP64F-LABEL: name: caller_i256_indirect_reference_in_reg
259 ; LP64F: bb.1 (%ir-block.0):
260 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
261 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s256) = G_CONSTANT i256 2
262 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
263 ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
264 ; LP64F-NEXT: G_STORE [[C]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16)
265 ; LP64F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
266 ; LP64F-NEXT: G_STORE [[C1]](s256), [[FRAME_INDEX1]](p0) :: (store (s256) into %stack.1, align 16)
267 ; LP64F-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
268 ; LP64F-NEXT: $x11 = COPY [[FRAME_INDEX1]](p0)
269 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_i256_indirect_reference_in_reg, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
270 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
271 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
272 ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
273 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
274 ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
275 ; LP64F-NEXT: PseudoRET implicit $x10
277 ; LP64D-LABEL: name: caller_i256_indirect_reference_in_reg
278 ; LP64D: bb.1 (%ir-block.0):
279 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
280 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s256) = G_CONSTANT i256 2
281 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
282 ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
283 ; LP64D-NEXT: G_STORE [[C]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16)
284 ; LP64D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
285 ; LP64D-NEXT: G_STORE [[C1]](s256), [[FRAME_INDEX1]](p0) :: (store (s256) into %stack.1, align 16)
286 ; LP64D-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
287 ; LP64D-NEXT: $x11 = COPY [[FRAME_INDEX1]](p0)
288 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_i256_indirect_reference_in_reg, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
289 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
290 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
291 ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
292 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
293 ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
294 ; LP64D-NEXT: PseudoRET implicit $x10
295 %1 = call i64 @callee_i256_indirect_reference_in_reg(i256 1, i256 2)
296 %2 = trunc i64 %1 to i32
300 ; Check that the stack is used once the GPRs are exhausted
302 define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f, i128 %g, i32 %h) nounwind {
303 ; RV64I-LABEL: name: callee_many_scalars
304 ; RV64I: bb.1 (%ir-block.0):
305 ; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
307 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
308 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
309 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
310 ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
311 ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
312 ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
313 ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
314 ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
315 ; RV64I-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY3]](s64), [[COPY4]](s64)
316 ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
317 ; RV64I-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
318 ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
319 ; RV64I-NEXT: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
320 ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
321 ; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
322 ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.1, align 16)
323 ; RV64I-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY7]](s64), [[LOAD]](s64)
324 ; RV64I-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
325 ; RV64I-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s64) from %fixed-stack.0)
326 ; RV64I-NEXT: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[LOAD1]](s64)
327 ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
328 ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
329 ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
330 ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[TRUNC2]]
331 ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[MV]](s128), [[MV1]]
332 ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
333 ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ADD1]]
334 ; RV64I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[TRUNC3]]
335 ; RV64I-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[TRUNC4]]
336 ; RV64I-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[TRUNC5]]
337 ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD5]](s32)
338 ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
339 ; RV64I-NEXT: PseudoRET implicit $x10
340 %a_ext = zext i8 %a to i32
341 %b_ext = zext i16 %b to i32
342 %1 = add i32 %a_ext, %b_ext
344 %3 = icmp eq i128 %d, %g
345 %4 = zext i1 %3 to i32
353 define i32 @caller_many_scalars() nounwind {
354 ; LP64-LABEL: name: caller_many_scalars
355 ; LP64: bb.1 (%ir-block.0):
356 ; LP64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
357 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
358 ; LP64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
359 ; LP64-NEXT: [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 4
360 ; LP64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
361 ; LP64-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
362 ; LP64-NEXT: [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 7
363 ; LP64-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
364 ; LP64-NEXT: ADJCALLSTACKDOWN 16, 0, implicit-def $x2, implicit $x2
365 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8)
366 ; LP64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
367 ; LP64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
368 ; LP64-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C3]](s128)
369 ; LP64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
370 ; LP64-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
371 ; LP64-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C6]](s128)
372 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
373 ; LP64-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
374 ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
375 ; LP64-NEXT: G_STORE [[UV3]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
376 ; LP64-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
377 ; LP64-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
378 ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
379 ; LP64-NEXT: G_STORE [[ANYEXT5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into stack + 8)
380 ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
381 ; LP64-NEXT: $x11 = COPY [[ANYEXT1]](s64)
382 ; LP64-NEXT: $x12 = COPY [[ANYEXT2]](s64)
383 ; LP64-NEXT: $x13 = COPY [[UV]](s64)
384 ; LP64-NEXT: $x14 = COPY [[UV1]](s64)
385 ; LP64-NEXT: $x15 = COPY [[ANYEXT3]](s64)
386 ; LP64-NEXT: $x16 = COPY [[ANYEXT4]](s64)
387 ; LP64-NEXT: $x17 = COPY [[UV2]](s64)
388 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
389 ; LP64-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $x2, implicit $x2
390 ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
391 ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
392 ; LP64-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
393 ; LP64-NEXT: $x10 = COPY [[ANYEXT6]](s64)
394 ; LP64-NEXT: PseudoRET implicit $x10
396 ; LP64F-LABEL: name: caller_many_scalars
397 ; LP64F: bb.1 (%ir-block.0):
398 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
399 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
400 ; LP64F-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
401 ; LP64F-NEXT: [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 4
402 ; LP64F-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
403 ; LP64F-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
404 ; LP64F-NEXT: [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 7
405 ; LP64F-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
406 ; LP64F-NEXT: ADJCALLSTACKDOWN 16, 0, implicit-def $x2, implicit $x2
407 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8)
408 ; LP64F-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
409 ; LP64F-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
410 ; LP64F-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C3]](s128)
411 ; LP64F-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
412 ; LP64F-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
413 ; LP64F-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C6]](s128)
414 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
415 ; LP64F-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
416 ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
417 ; LP64F-NEXT: G_STORE [[UV3]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
418 ; LP64F-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
419 ; LP64F-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
420 ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
421 ; LP64F-NEXT: G_STORE [[ANYEXT5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into stack + 8)
422 ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
423 ; LP64F-NEXT: $x11 = COPY [[ANYEXT1]](s64)
424 ; LP64F-NEXT: $x12 = COPY [[ANYEXT2]](s64)
425 ; LP64F-NEXT: $x13 = COPY [[UV]](s64)
426 ; LP64F-NEXT: $x14 = COPY [[UV1]](s64)
427 ; LP64F-NEXT: $x15 = COPY [[ANYEXT3]](s64)
428 ; LP64F-NEXT: $x16 = COPY [[ANYEXT4]](s64)
429 ; LP64F-NEXT: $x17 = COPY [[UV2]](s64)
430 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
431 ; LP64F-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $x2, implicit $x2
432 ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
433 ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
434 ; LP64F-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
435 ; LP64F-NEXT: $x10 = COPY [[ANYEXT6]](s64)
436 ; LP64F-NEXT: PseudoRET implicit $x10
438 ; LP64D-LABEL: name: caller_many_scalars
439 ; LP64D: bb.1 (%ir-block.0):
440 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
441 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
442 ; LP64D-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
443 ; LP64D-NEXT: [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 4
444 ; LP64D-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
445 ; LP64D-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
446 ; LP64D-NEXT: [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 7
447 ; LP64D-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
448 ; LP64D-NEXT: ADJCALLSTACKDOWN 16, 0, implicit-def $x2, implicit $x2
449 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8)
450 ; LP64D-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
451 ; LP64D-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
452 ; LP64D-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C3]](s128)
453 ; LP64D-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
454 ; LP64D-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
455 ; LP64D-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C6]](s128)
456 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
457 ; LP64D-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
458 ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
459 ; LP64D-NEXT: G_STORE [[UV3]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
460 ; LP64D-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
461 ; LP64D-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
462 ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
463 ; LP64D-NEXT: G_STORE [[ANYEXT5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into stack + 8)
464 ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
465 ; LP64D-NEXT: $x11 = COPY [[ANYEXT1]](s64)
466 ; LP64D-NEXT: $x12 = COPY [[ANYEXT2]](s64)
467 ; LP64D-NEXT: $x13 = COPY [[UV]](s64)
468 ; LP64D-NEXT: $x14 = COPY [[UV1]](s64)
469 ; LP64D-NEXT: $x15 = COPY [[ANYEXT3]](s64)
470 ; LP64D-NEXT: $x16 = COPY [[ANYEXT4]](s64)
471 ; LP64D-NEXT: $x17 = COPY [[UV2]](s64)
472 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
473 ; LP64D-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $x2, implicit $x2
474 ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
475 ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
476 ; LP64D-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
477 ; LP64D-NEXT: $x10 = COPY [[ANYEXT6]](s64)
478 ; LP64D-NEXT: PseudoRET implicit $x10
479 %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i128 4, i32 5, i32 6, i128 7, i32 8)
483 ; Check return of 2x xlen scalars
485 define i128 @callee_small_scalar_ret() nounwind {
486 ; RV64I-LABEL: name: callee_small_scalar_ret
487 ; RV64I: bb.1 (%ir-block.0):
488 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -1
489 ; RV64I-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s128)
490 ; RV64I-NEXT: $x10 = COPY [[UV]](s64)
491 ; RV64I-NEXT: $x11 = COPY [[UV1]](s64)
492 ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
496 define i64 @caller_small_scalar_ret() nounwind {
497 ; LP64-LABEL: name: caller_small_scalar_ret
498 ; LP64: bb.1 (%ir-block.0):
499 ; LP64-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
500 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
501 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
502 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
503 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
504 ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
505 ; LP64-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
506 ; LP64-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[MV]]
507 ; LP64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1)
508 ; LP64-NEXT: $x10 = COPY [[ZEXT]](s64)
509 ; LP64-NEXT: PseudoRET implicit $x10
511 ; LP64F-LABEL: name: caller_small_scalar_ret
512 ; LP64F: bb.1 (%ir-block.0):
513 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
514 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
515 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
516 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
517 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
518 ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
519 ; LP64F-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
520 ; LP64F-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[MV]]
521 ; LP64F-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1)
522 ; LP64F-NEXT: $x10 = COPY [[ZEXT]](s64)
523 ; LP64F-NEXT: PseudoRET implicit $x10
525 ; LP64D-LABEL: name: caller_small_scalar_ret
526 ; LP64D: bb.1 (%ir-block.0):
527 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
528 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
529 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
530 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
531 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
532 ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
533 ; LP64D-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
534 ; LP64D-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[MV]]
535 ; LP64D-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1)
536 ; LP64D-NEXT: $x10 = COPY [[ZEXT]](s64)
537 ; LP64D-NEXT: PseudoRET implicit $x10
538 %1 = call i128 @callee_small_scalar_ret()
539 %2 = icmp eq i128 -2, %1
540 %3 = zext i1 %2 to i64
544 ; Check return of 2x xlen structs
546 %struct.small = type { i64, ptr }
548 define %struct.small @callee_small_struct_ret() nounwind {
549 ; RV64I-LABEL: name: callee_small_struct_ret
550 ; RV64I: bb.1 (%ir-block.0):
551 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
552 ; RV64I-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
553 ; RV64I-NEXT: $x10 = COPY [[C]](s64)
554 ; RV64I-NEXT: $x11 = COPY [[C1]](p0)
555 ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
556 ret %struct.small { i64 1, ptr null }
559 define i64 @caller_small_struct_ret() nounwind {
560 ; LP64-LABEL: name: caller_small_struct_ret
561 ; LP64: bb.1 (%ir-block.0):
562 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
563 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
564 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
565 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
566 ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
567 ; LP64-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0)
568 ; LP64-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[PTRTOINT]]
569 ; LP64-NEXT: $x10 = COPY [[ADD]](s64)
570 ; LP64-NEXT: PseudoRET implicit $x10
572 ; LP64F-LABEL: name: caller_small_struct_ret
573 ; LP64F: bb.1 (%ir-block.0):
574 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
575 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
576 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
577 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
578 ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
579 ; LP64F-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0)
580 ; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[PTRTOINT]]
581 ; LP64F-NEXT: $x10 = COPY [[ADD]](s64)
582 ; LP64F-NEXT: PseudoRET implicit $x10
584 ; LP64D-LABEL: name: caller_small_struct_ret
585 ; LP64D: bb.1 (%ir-block.0):
586 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
587 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
588 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
589 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
590 ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
591 ; LP64D-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0)
592 ; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[PTRTOINT]]
593 ; LP64D-NEXT: $x10 = COPY [[ADD]](s64)
594 ; LP64D-NEXT: PseudoRET implicit $x10
595 %1 = call %struct.small @callee_small_struct_ret()
596 %2 = extractvalue %struct.small %1, 0
597 %3 = extractvalue %struct.small %1, 1
598 %4 = ptrtoint ptr %3 to i64
603 ; Check return of >2x xlen structs
605 %struct.large = type { i64, i64, i64, i64 }
607 define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind {
608 ; RV64I-LABEL: name: callee_large_struct_ret
609 ; RV64I: bb.1 (%ir-block.0):
610 ; RV64I-NEXT: liveins: $x10
612 ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
613 ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
614 ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
615 ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
616 ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
617 ; RV64I-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.agg.result, align 4)
618 ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
619 ; RV64I-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C4]](s64)
620 ; RV64I-NEXT: G_STORE [[C1]](s64), %3(p0) :: (store (s64) into %ir.b, align 4)
621 ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
622 ; RV64I-NEXT: %6:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C5]](s64)
623 ; RV64I-NEXT: G_STORE [[C2]](s64), %6(p0) :: (store (s64) into %ir.c, align 4)
624 ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
625 ; RV64I-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C6]](s64)
626 ; RV64I-NEXT: G_STORE [[C3]](s64), %9(p0) :: (store (s64) into %ir.d, align 4)
627 ; RV64I-NEXT: PseudoRET
628 store i64 1, ptr %agg.result, align 4
629 %b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1
630 store i64 2, ptr %b, align 4
631 %c = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 2
632 store i64 3, ptr %c, align 4
633 %d = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 3
634 store i64 4, ptr %d, align 4
638 define i64 @caller_large_struct_ret() nounwind {
639 ; LP64-LABEL: name: caller_large_struct_ret
640 ; LP64: bb.1 (%ir-block.0):
641 ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
642 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
643 ; LP64-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
644 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit $x10
645 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
646 ; LP64-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
647 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
648 ; LP64-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
649 ; LP64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64) from %ir.3)
650 ; LP64-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
651 ; LP64-NEXT: $x10 = COPY [[ADD]](s64)
652 ; LP64-NEXT: PseudoRET implicit $x10
654 ; LP64F-LABEL: name: caller_large_struct_ret
655 ; LP64F: bb.1 (%ir-block.0):
656 ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
657 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
658 ; LP64F-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
659 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
660 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
661 ; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
662 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
663 ; LP64F-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
664 ; LP64F-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64) from %ir.3)
665 ; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
666 ; LP64F-NEXT: $x10 = COPY [[ADD]](s64)
667 ; LP64F-NEXT: PseudoRET implicit $x10
669 ; LP64D-LABEL: name: caller_large_struct_ret
670 ; LP64D: bb.1 (%ir-block.0):
671 ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
672 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
673 ; LP64D-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
674 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
675 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
676 ; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
677 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
678 ; LP64D-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
679 ; LP64D-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64) from %ir.3)
680 ; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
681 ; LP64D-NEXT: $x10 = COPY [[ADD]](s64)
682 ; LP64D-NEXT: PseudoRET implicit $x10
683 %1 = alloca %struct.large
684 call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
685 %2 = load i64, ptr %1
686 %3 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 3
687 %4 = load i64, ptr %3