1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV64I %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f \
5 ; RUN: -verify-machineinstrs < %s \
6 ; RUN: | FileCheck -check-prefix=RV64I %s
7 ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
8 ; RUN: -verify-machineinstrs < %s \
9 ; RUN: | FileCheck -check-prefix=RV64I %s
11 ; This file contains tests that should have identical output for the lp64,
12 ; lp64f, and lp64d ABIs. i.e. where no arguments are passed according to
13 ; the floating point ABI. It doesn't check codegen when frame pointer
14 ; elimination is disabled, as there is sufficient coverage for this case in
17 ; Check that on RV64, i128 is passed in a pair of registers. Unlike
18 ; the convention for varargs, this need not be an aligned pair.
20 define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind {
21 ; RV64I-LABEL: callee_i128_in_regs:
23 ; RV64I-NEXT: add a0, a0, a1
25 %b_trunc = trunc i128 %b to i64
26 %1 = add i64 %a, %b_trunc
30 define i64 @caller_i128_in_regs() nounwind {
31 ; RV64I-LABEL: caller_i128_in_regs:
33 ; RV64I-NEXT: addi sp, sp, -16
34 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
35 ; RV64I-NEXT: li a0, 1
36 ; RV64I-NEXT: li a1, 2
37 ; RV64I-NEXT: li a2, 0
38 ; RV64I-NEXT: call callee_i128_in_regs@plt
39 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
40 ; RV64I-NEXT: addi sp, sp, 16
42 %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
46 ; Check that the stack is used once the GPRs are exhausted
48 define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f, i128 %g, i32 %h) nounwind {
49 ; RV64I-LABEL: callee_many_scalars:
51 ; RV64I-NEXT: lw t0, 8(sp)
52 ; RV64I-NEXT: ld t1, 0(sp)
53 ; RV64I-NEXT: andi a0, a0, 255
54 ; RV64I-NEXT: slli a1, a1, 48
55 ; RV64I-NEXT: srli a1, a1, 48
56 ; RV64I-NEXT: addw a0, a0, a1
57 ; RV64I-NEXT: addw a0, a0, a2
58 ; RV64I-NEXT: xor a1, a4, t1
59 ; RV64I-NEXT: xor a2, a3, a7
60 ; RV64I-NEXT: or a1, a2, a1
61 ; RV64I-NEXT: seqz a1, a1
62 ; RV64I-NEXT: addw a0, a1, a0
63 ; RV64I-NEXT: addw a0, a0, a5
64 ; RV64I-NEXT: addw a0, a0, a6
65 ; RV64I-NEXT: addw a0, a0, t0
67 %a_ext = zext i8 %a to i32
68 %b_ext = zext i16 %b to i32
69 %1 = add i32 %a_ext, %b_ext
71 %3 = icmp eq i128 %d, %g
72 %4 = zext i1 %3 to i32
80 define i32 @caller_many_scalars() nounwind {
81 ; RV64I-LABEL: caller_many_scalars:
83 ; RV64I-NEXT: addi sp, sp, -32
84 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
85 ; RV64I-NEXT: li a0, 8
86 ; RV64I-NEXT: sd a0, 8(sp)
87 ; RV64I-NEXT: li a0, 1
88 ; RV64I-NEXT: li a1, 2
89 ; RV64I-NEXT: li a2, 3
90 ; RV64I-NEXT: li a3, 4
91 ; RV64I-NEXT: li a5, 5
92 ; RV64I-NEXT: li a6, 6
93 ; RV64I-NEXT: li a7, 7
94 ; RV64I-NEXT: sd zero, 0(sp)
95 ; RV64I-NEXT: li a4, 0
96 ; RV64I-NEXT: call callee_many_scalars@plt
97 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
98 ; RV64I-NEXT: addi sp, sp, 32
100 %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i128 4, i32 5, i32 6, i128 7, i32 8)
104 ; Check that i256 is passed indirectly.
106 define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind {
107 ; RV64I-LABEL: callee_large_scalars:
109 ; RV64I-NEXT: ld a2, 0(a1)
110 ; RV64I-NEXT: ld a3, 0(a0)
111 ; RV64I-NEXT: ld a4, 8(a1)
112 ; RV64I-NEXT: ld a5, 24(a1)
113 ; RV64I-NEXT: ld a6, 24(a0)
114 ; RV64I-NEXT: ld a7, 8(a0)
115 ; RV64I-NEXT: ld a1, 16(a1)
116 ; RV64I-NEXT: ld a0, 16(a0)
117 ; RV64I-NEXT: xor a5, a6, a5
118 ; RV64I-NEXT: xor a4, a7, a4
119 ; RV64I-NEXT: or a4, a4, a5
120 ; RV64I-NEXT: xor a0, a0, a1
121 ; RV64I-NEXT: xor a1, a3, a2
122 ; RV64I-NEXT: or a0, a1, a0
123 ; RV64I-NEXT: or a0, a0, a4
124 ; RV64I-NEXT: seqz a0, a0
126 %1 = icmp eq i256 %a, %b
127 %2 = zext i1 %1 to i64
131 define i64 @caller_large_scalars() nounwind {
132 ; RV64I-LABEL: caller_large_scalars:
134 ; RV64I-NEXT: addi sp, sp, -80
135 ; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
136 ; RV64I-NEXT: sd zero, 24(sp)
137 ; RV64I-NEXT: sd zero, 16(sp)
138 ; RV64I-NEXT: sd zero, 8(sp)
139 ; RV64I-NEXT: li a0, 2
140 ; RV64I-NEXT: sd a0, 0(sp)
141 ; RV64I-NEXT: sd zero, 56(sp)
142 ; RV64I-NEXT: sd zero, 48(sp)
143 ; RV64I-NEXT: sd zero, 40(sp)
144 ; RV64I-NEXT: li a2, 1
145 ; RV64I-NEXT: addi a0, sp, 32
146 ; RV64I-NEXT: mv a1, sp
147 ; RV64I-NEXT: sd a2, 32(sp)
148 ; RV64I-NEXT: call callee_large_scalars@plt
149 ; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
150 ; RV64I-NEXT: addi sp, sp, 80
152 %1 = call i64 @callee_large_scalars(i256 1, i256 2)
156 ; Check that arguments larger than 2*xlen are handled correctly when their
157 ; address is passed on the stack rather than in memory
159 ; Must keep define on a single line due to an update_llc_test_checks.py limitation
160 define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i256 %h, i64 %i, i256 %j) nounwind {
161 ; RV64I-LABEL: callee_large_scalars_exhausted_regs:
163 ; RV64I-NEXT: ld a0, 8(sp)
164 ; RV64I-NEXT: ld a1, 0(a0)
165 ; RV64I-NEXT: ld a2, 0(a7)
166 ; RV64I-NEXT: ld a3, 8(a0)
167 ; RV64I-NEXT: ld a4, 24(a0)
168 ; RV64I-NEXT: ld a5, 24(a7)
169 ; RV64I-NEXT: ld a6, 8(a7)
170 ; RV64I-NEXT: ld a0, 16(a0)
171 ; RV64I-NEXT: ld a7, 16(a7)
172 ; RV64I-NEXT: xor a4, a5, a4
173 ; RV64I-NEXT: xor a3, a6, a3
174 ; RV64I-NEXT: or a3, a3, a4
175 ; RV64I-NEXT: xor a0, a7, a0
176 ; RV64I-NEXT: xor a1, a2, a1
177 ; RV64I-NEXT: or a0, a1, a0
178 ; RV64I-NEXT: or a0, a0, a3
179 ; RV64I-NEXT: seqz a0, a0
181 %1 = icmp eq i256 %h, %j
182 %2 = zext i1 %1 to i64
186 define i64 @caller_large_scalars_exhausted_regs() nounwind {
187 ; RV64I-LABEL: caller_large_scalars_exhausted_regs:
189 ; RV64I-NEXT: addi sp, sp, -96
190 ; RV64I-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
191 ; RV64I-NEXT: addi a0, sp, 16
192 ; RV64I-NEXT: sd a0, 8(sp)
193 ; RV64I-NEXT: li a0, 9
194 ; RV64I-NEXT: sd a0, 0(sp)
195 ; RV64I-NEXT: sd zero, 40(sp)
196 ; RV64I-NEXT: sd zero, 32(sp)
197 ; RV64I-NEXT: sd zero, 24(sp)
198 ; RV64I-NEXT: li a0, 10
199 ; RV64I-NEXT: sd a0, 16(sp)
200 ; RV64I-NEXT: sd zero, 72(sp)
201 ; RV64I-NEXT: sd zero, 64(sp)
202 ; RV64I-NEXT: sd zero, 56(sp)
203 ; RV64I-NEXT: li t0, 8
204 ; RV64I-NEXT: li a0, 1
205 ; RV64I-NEXT: li a1, 2
206 ; RV64I-NEXT: li a2, 3
207 ; RV64I-NEXT: li a3, 4
208 ; RV64I-NEXT: li a4, 5
209 ; RV64I-NEXT: li a5, 6
210 ; RV64I-NEXT: li a6, 7
211 ; RV64I-NEXT: addi a7, sp, 48
212 ; RV64I-NEXT: sd t0, 48(sp)
213 ; RV64I-NEXT: call callee_large_scalars_exhausted_regs@plt
214 ; RV64I-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
215 ; RV64I-NEXT: addi sp, sp, 96
217 %1 = call i64 @callee_large_scalars_exhausted_regs(
218 i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i256 8, i64 9,
223 ; Ensure that libcalls generated in the middle-end obey the calling convention
225 define i64 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
226 ; RV64I-LABEL: caller_mixed_scalar_libcalls:
228 ; RV64I-NEXT: addi sp, sp, -16
229 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
230 ; RV64I-NEXT: call __floatditf@plt
231 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
232 ; RV64I-NEXT: addi sp, sp, 16
234 %1 = sitofp i64 %a to fp128
235 %2 = bitcast fp128 %1 to i128
236 %3 = trunc i128 %2 to i64
240 ; Check passing of coerced integer arrays
242 %struct.small = type { i64, i64* }
244 define i64 @callee_small_coerced_struct([2 x i64] %a.coerce) nounwind {
245 ; RV64I-LABEL: callee_small_coerced_struct:
247 ; RV64I-NEXT: xor a0, a0, a1
248 ; RV64I-NEXT: seqz a0, a0
250 %1 = extractvalue [2 x i64] %a.coerce, 0
251 %2 = extractvalue [2 x i64] %a.coerce, 1
252 %3 = icmp eq i64 %1, %2
253 %4 = zext i1 %3 to i64
257 define i64 @caller_small_coerced_struct() nounwind {
258 ; RV64I-LABEL: caller_small_coerced_struct:
260 ; RV64I-NEXT: addi sp, sp, -16
261 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
262 ; RV64I-NEXT: li a0, 1
263 ; RV64I-NEXT: li a1, 2
264 ; RV64I-NEXT: call callee_small_coerced_struct@plt
265 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
266 ; RV64I-NEXT: addi sp, sp, 16
268 %1 = call i64 @callee_small_coerced_struct([2 x i64] [i64 1, i64 2])
272 ; Check large struct arguments, which are passed byval
274 %struct.large = type { i64, i64, i64, i64 }
276 define i64 @callee_large_struct(%struct.large* byval(%struct.large) align 8 %a) nounwind {
277 ; RV64I-LABEL: callee_large_struct:
279 ; RV64I-NEXT: ld a1, 0(a0)
280 ; RV64I-NEXT: ld a0, 24(a0)
281 ; RV64I-NEXT: add a0, a1, a0
283 %1 = getelementptr inbounds %struct.large, %struct.large* %a, i64 0, i32 0
284 %2 = getelementptr inbounds %struct.large, %struct.large* %a, i64 0, i32 3
285 %3 = load i64, i64* %1
286 %4 = load i64, i64* %2
291 define i64 @caller_large_struct() nounwind {
292 ; RV64I-LABEL: caller_large_struct:
294 ; RV64I-NEXT: addi sp, sp, -80
295 ; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
296 ; RV64I-NEXT: li a0, 1
297 ; RV64I-NEXT: sd a0, 40(sp)
298 ; RV64I-NEXT: li a1, 2
299 ; RV64I-NEXT: sd a1, 48(sp)
300 ; RV64I-NEXT: li a2, 3
301 ; RV64I-NEXT: sd a2, 56(sp)
302 ; RV64I-NEXT: li a3, 4
303 ; RV64I-NEXT: sd a3, 64(sp)
304 ; RV64I-NEXT: sd a0, 8(sp)
305 ; RV64I-NEXT: sd a1, 16(sp)
306 ; RV64I-NEXT: sd a2, 24(sp)
307 ; RV64I-NEXT: sd a3, 32(sp)
308 ; RV64I-NEXT: addi a0, sp, 8
309 ; RV64I-NEXT: call callee_large_struct@plt
310 ; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
311 ; RV64I-NEXT: addi sp, sp, 80
313 %ls = alloca %struct.large, align 8
314 %1 = bitcast %struct.large* %ls to i8*
315 %a = getelementptr inbounds %struct.large, %struct.large* %ls, i64 0, i32 0
317 %b = getelementptr inbounds %struct.large, %struct.large* %ls, i64 0, i32 1
319 %c = getelementptr inbounds %struct.large, %struct.large* %ls, i64 0, i32 2
321 %d = getelementptr inbounds %struct.large, %struct.large* %ls, i64 0, i32 3
323 %2 = call i64 @callee_large_struct(%struct.large* byval(%struct.large) align 8 %ls)
327 ; Check 2x*xlen values are aligned appropriately when passed on the stack
328 ; Must keep define on a single line due to an update_llc_test_checks.py limitation
329 define i64 @callee_aligned_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i128 %f, i64 %g, i64 %h, i128 %i, i64 %j, [2 x i64] %k) nounwind {
330 ; The i128 should be 16-byte aligned on the stack, but the two-element array
331 ; should only be 8-byte aligned
332 ; RV64I-LABEL: callee_aligned_stack:
334 ; RV64I-NEXT: ld a0, 40(sp)
335 ; RV64I-NEXT: ld a1, 0(sp)
336 ; RV64I-NEXT: ld a2, 16(sp)
337 ; RV64I-NEXT: ld a3, 32(sp)
338 ; RV64I-NEXT: add a4, a5, a7
339 ; RV64I-NEXT: add a1, a4, a1
340 ; RV64I-NEXT: add a1, a1, a2
341 ; RV64I-NEXT: add a1, a1, a3
342 ; RV64I-NEXT: add a0, a1, a0
344 %f_trunc = trunc i128 %f to i64
345 %1 = add i64 %f_trunc, %g
347 %3 = trunc i128 %i to i64
350 %6 = extractvalue [2 x i64] %k, 0
355 define void @caller_aligned_stack() nounwind {
356 ; The i128 should be 16-byte aligned on the stack, but the two-element array
357 ; should only be 8-byte aligned
358 ; RV64I-LABEL: caller_aligned_stack:
360 ; RV64I-NEXT: addi sp, sp, -64
361 ; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
362 ; RV64I-NEXT: li a0, 12
363 ; RV64I-NEXT: sd a0, 48(sp)
364 ; RV64I-NEXT: li a0, 11
365 ; RV64I-NEXT: sd a0, 40(sp)
366 ; RV64I-NEXT: li a0, 10
367 ; RV64I-NEXT: sd a0, 32(sp)
368 ; RV64I-NEXT: sd zero, 24(sp)
369 ; RV64I-NEXT: li a0, 9
370 ; RV64I-NEXT: sd a0, 16(sp)
371 ; RV64I-NEXT: li a6, 8
372 ; RV64I-NEXT: li a0, 1
373 ; RV64I-NEXT: li a1, 2
374 ; RV64I-NEXT: li a2, 3
375 ; RV64I-NEXT: li a3, 4
376 ; RV64I-NEXT: li a4, 5
377 ; RV64I-NEXT: li a5, 6
378 ; RV64I-NEXT: li a7, 7
379 ; RV64I-NEXT: sd a6, 0(sp)
380 ; RV64I-NEXT: li a6, 0
381 ; RV64I-NEXT: call callee_aligned_stack@plt
382 ; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
383 ; RV64I-NEXT: addi sp, sp, 64
385 %1 = call i64 @callee_aligned_stack(i64 1, i64 2, i64 3, i64 4, i64 5,
386 i128 6, i64 7, i64 8, i128 9, i64 10, [2 x i64] [i64 11, i64 12])
390 ; Check return of 2x xlen scalars
392 define i128 @callee_small_scalar_ret() nounwind {
393 ; RV64I-LABEL: callee_small_scalar_ret:
395 ; RV64I-NEXT: li a0, -1
396 ; RV64I-NEXT: li a1, -1
401 define i64 @caller_small_scalar_ret() nounwind {
402 ; RV64I-LABEL: caller_small_scalar_ret:
404 ; RV64I-NEXT: addi sp, sp, -16
405 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
406 ; RV64I-NEXT: call callee_small_scalar_ret@plt
407 ; RV64I-NEXT: not a1, a1
408 ; RV64I-NEXT: xori a0, a0, -2
409 ; RV64I-NEXT: or a0, a0, a1
410 ; RV64I-NEXT: seqz a0, a0
411 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
412 ; RV64I-NEXT: addi sp, sp, 16
414 %1 = call i128 @callee_small_scalar_ret()
415 %2 = icmp eq i128 -2, %1
416 %3 = zext i1 %2 to i64
420 ; Check return of 2x xlen structs
422 define %struct.small @callee_small_struct_ret() nounwind {
423 ; RV64I-LABEL: callee_small_struct_ret:
425 ; RV64I-NEXT: li a0, 1
426 ; RV64I-NEXT: li a1, 0
428 ret %struct.small { i64 1, i64* null }
431 define i64 @caller_small_struct_ret() nounwind {
432 ; RV64I-LABEL: caller_small_struct_ret:
434 ; RV64I-NEXT: addi sp, sp, -16
435 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
436 ; RV64I-NEXT: call callee_small_struct_ret@plt
437 ; RV64I-NEXT: add a0, a0, a1
438 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
439 ; RV64I-NEXT: addi sp, sp, 16
441 %1 = call %struct.small @callee_small_struct_ret()
442 %2 = extractvalue %struct.small %1, 0
443 %3 = extractvalue %struct.small %1, 1
444 %4 = ptrtoint i64* %3 to i64
449 ; Check return of >2x xlen scalars
451 define i256 @callee_large_scalar_ret() nounwind {
452 ; RV64I-LABEL: callee_large_scalar_ret:
454 ; RV64I-NEXT: li a1, -1
455 ; RV64I-NEXT: sd a1, 24(a0)
456 ; RV64I-NEXT: sd a1, 16(a0)
457 ; RV64I-NEXT: sd a1, 8(a0)
458 ; RV64I-NEXT: lui a1, 1018435
459 ; RV64I-NEXT: addiw a1, a1, 747
460 ; RV64I-NEXT: sd a1, 0(a0)
465 define void @caller_large_scalar_ret() nounwind {
466 ; RV64I-LABEL: caller_large_scalar_ret:
468 ; RV64I-NEXT: addi sp, sp, -48
469 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
470 ; RV64I-NEXT: mv a0, sp
471 ; RV64I-NEXT: call callee_large_scalar_ret@plt
472 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
473 ; RV64I-NEXT: addi sp, sp, 48
475 %1 = call i256 @callee_large_scalar_ret()
479 ; Check return of >2x xlen structs
481 define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large) %agg.result) nounwind {
482 ; RV64I-LABEL: callee_large_struct_ret:
484 ; RV64I-NEXT: sw zero, 4(a0)
485 ; RV64I-NEXT: li a1, 1
486 ; RV64I-NEXT: sw a1, 0(a0)
487 ; RV64I-NEXT: sw zero, 12(a0)
488 ; RV64I-NEXT: li a1, 2
489 ; RV64I-NEXT: sw a1, 8(a0)
490 ; RV64I-NEXT: sw zero, 20(a0)
491 ; RV64I-NEXT: li a1, 3
492 ; RV64I-NEXT: sw a1, 16(a0)
493 ; RV64I-NEXT: sw zero, 28(a0)
494 ; RV64I-NEXT: li a1, 4
495 ; RV64I-NEXT: sw a1, 24(a0)
497 %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 0
498 store i64 1, i64* %a, align 4
499 %b = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 1
500 store i64 2, i64* %b, align 4
501 %c = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 2
502 store i64 3, i64* %c, align 4
503 %d = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 3
504 store i64 4, i64* %d, align 4
508 define i64 @caller_large_struct_ret() nounwind {
509 ; RV64I-LABEL: caller_large_struct_ret:
511 ; RV64I-NEXT: addi sp, sp, -48
512 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
513 ; RV64I-NEXT: addi a0, sp, 8
514 ; RV64I-NEXT: call callee_large_struct_ret@plt
515 ; RV64I-NEXT: ld a0, 8(sp)
516 ; RV64I-NEXT: ld a1, 32(sp)
517 ; RV64I-NEXT: add a0, a0, a1
518 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
519 ; RV64I-NEXT: addi sp, sp, 48
521 %1 = alloca %struct.large
522 call void @callee_large_struct_ret(%struct.large* sret(%struct.large) %1)
523 %2 = getelementptr inbounds %struct.large, %struct.large* %1, i64 0, i32 0
524 %3 = load i64, i64* %2
525 %4 = getelementptr inbounds %struct.large, %struct.large* %1, i64 0, i32 3
526 %5 = load i64, i64* %4