1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple aarch64-linux-gnu -mattr=+sve | FileCheck %s
4 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
5 target triple = "aarch64-unknown-linux-gnu"
7 ; Make sure callers set up the arguments correctly - tests AArch64ISelLowering::LowerCALL
9 define float @foo1(double* %x0, double* %x1, double* %x2) nounwind {
11 ; CHECK: // %bb.0: // %entry
12 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
13 ; CHECK-NEXT: addvl sp, sp, #-4
14 ; CHECK-NEXT: ptrue p0.b
15 ; CHECK-NEXT: ld4d { z1.d, z2.d, z3.d, z4.d }, p0/z, [x0]
16 ; CHECK-NEXT: ld4d { z16.d, z17.d, z18.d, z19.d }, p0/z, [x1]
17 ; CHECK-NEXT: ld1d { z5.d }, p0/z, [x2]
18 ; CHECK-NEXT: ptrue p0.d
19 ; CHECK-NEXT: mov x8, sp
20 ; CHECK-NEXT: fmov s0, #1.00000000
21 ; CHECK-NEXT: mov x0, sp
22 ; CHECK-NEXT: st1d { z16.d }, p0, [sp]
23 ; CHECK-NEXT: st1d { z17.d }, p0, [x8, #1, mul vl]
24 ; CHECK-NEXT: st1d { z18.d }, p0, [x8, #2, mul vl]
25 ; CHECK-NEXT: st1d { z19.d }, p0, [x8, #3, mul vl]
26 ; CHECK-NEXT: bl callee1
27 ; CHECK-NEXT: addvl sp, sp, #4
28 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
31 %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
32 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
33 %2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
34 %3 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
35 %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, double* %x2)
36 %call = call float @callee1(float 1.000000e+00, <vscale x 8 x double> %2, <vscale x 8 x double> %3, <vscale x 2 x double> %4)
40 define float @foo2(double* %x0, double* %x1) nounwind {
42 ; CHECK: // %bb.0: // %entry
43 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
44 ; CHECK-NEXT: addvl sp, sp, #-4
45 ; CHECK-NEXT: sub sp, sp, #16
46 ; CHECK-NEXT: ptrue p0.b
47 ; CHECK-NEXT: ld4d { z1.d, z2.d, z3.d, z4.d }, p0/z, [x0]
48 ; CHECK-NEXT: ld4d { z16.d, z17.d, z18.d, z19.d }, p0/z, [x1]
49 ; CHECK-NEXT: ptrue p0.d
50 ; CHECK-NEXT: add x8, sp, #16
51 ; CHECK-NEXT: add x9, sp, #16
52 ; CHECK-NEXT: fmov s0, #1.00000000
53 ; CHECK-NEXT: mov w1, #1
54 ; CHECK-NEXT: mov w2, #2
55 ; CHECK-NEXT: mov w3, #3
56 ; CHECK-NEXT: mov w4, #4
57 ; CHECK-NEXT: mov w5, #5
58 ; CHECK-NEXT: mov w6, #6
59 ; CHECK-NEXT: mov w7, #7
60 ; CHECK-NEXT: mov w0, wzr
61 ; CHECK-NEXT: st1d { z16.d }, p0, [x9]
62 ; CHECK-NEXT: st1d { z17.d }, p0, [x8, #1, mul vl]
63 ; CHECK-NEXT: st1d { z18.d }, p0, [x8, #2, mul vl]
64 ; CHECK-NEXT: st1d { z19.d }, p0, [x8, #3, mul vl]
65 ; CHECK-NEXT: str x8, [sp]
66 ; CHECK-NEXT: bl callee2
67 ; CHECK-NEXT: addvl sp, sp, #4
68 ; CHECK-NEXT: add sp, sp, #16
69 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
72 %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
73 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
74 %2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
75 %3 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
76 %call = call float @callee2(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, float 1.000000e+00, <vscale x 8 x double> %2, <vscale x 8 x double> %3)
80 define float @foo3(double* %x0, double* %x1, double* %x2) nounwind {
82 ; CHECK: // %bb.0: // %entry
83 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
84 ; CHECK-NEXT: addvl sp, sp, #-3
85 ; CHECK-NEXT: ptrue p0.b
86 ; CHECK-NEXT: ld4d { z2.d, z3.d, z4.d, z5.d }, p0/z, [x0]
87 ; CHECK-NEXT: ld3d { z16.d, z17.d, z18.d }, p0/z, [x1]
88 ; CHECK-NEXT: ld1d { z6.d }, p0/z, [x2]
89 ; CHECK-NEXT: ptrue p0.d
90 ; CHECK-NEXT: mov x8, sp
91 ; CHECK-NEXT: fmov s0, #1.00000000
92 ; CHECK-NEXT: fmov s1, #2.00000000
93 ; CHECK-NEXT: mov x0, sp
94 ; CHECK-NEXT: st1d { z16.d }, p0, [sp]
95 ; CHECK-NEXT: st1d { z17.d }, p0, [x8, #1, mul vl]
96 ; CHECK-NEXT: st1d { z18.d }, p0, [x8, #2, mul vl]
97 ; CHECK-NEXT: bl callee3
98 ; CHECK-NEXT: addvl sp, sp, #3
99 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
102 %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
103 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
104 %2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
105 %3 = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
106 %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, double* %x2)
107 %call = call float @callee3(float 1.000000e+00, float 2.000000e+00, <vscale x 8 x double> %2, <vscale x 6 x double> %3, <vscale x 2 x double> %4)
111 ; Make sure callees read the arguments correctly - tests AArch64ISelLowering::LowerFormalArguments
113 define double @foo4(double %x0, double * %ptr1, double * %ptr2, double * %ptr3, <vscale x 8 x double> %x1, <vscale x 8 x double> %x2, <vscale x 2 x double> %x3) nounwind {
115 ; CHECK: // %bb.0: // %entry
116 ; CHECK-NEXT: ptrue p0.d
117 ; CHECK-NEXT: ld1d { z6.d }, p0/z, [x3, #1, mul vl]
118 ; CHECK-NEXT: ld1d { z7.d }, p0/z, [x3]
119 ; CHECK-NEXT: ld1d { z24.d }, p0/z, [x3, #3, mul vl]
120 ; CHECK-NEXT: ld1d { z25.d }, p0/z, [x3, #2, mul vl]
121 ; CHECK-NEXT: st1d { z4.d }, p0, [x0, #3, mul vl]
122 ; CHECK-NEXT: st1d { z3.d }, p0, [x0, #2, mul vl]
123 ; CHECK-NEXT: st1d { z2.d }, p0, [x0, #1, mul vl]
124 ; CHECK-NEXT: st1d { z1.d }, p0, [x0]
125 ; CHECK-NEXT: st1d { z25.d }, p0, [x1, #2, mul vl]
126 ; CHECK-NEXT: st1d { z24.d }, p0, [x1, #3, mul vl]
127 ; CHECK-NEXT: st1d { z7.d }, p0, [x1]
128 ; CHECK-NEXT: st1d { z6.d }, p0, [x1, #1, mul vl]
129 ; CHECK-NEXT: st1d { z5.d }, p0, [x2]
132 %ptr1.bc = bitcast double * %ptr1 to <vscale x 8 x double> *
133 store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1.bc
134 %ptr2.bc = bitcast double * %ptr2 to <vscale x 8 x double> *
135 store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2.bc
136 %ptr3.bc = bitcast double * %ptr3 to <vscale x 2 x double> *
137 store volatile <vscale x 2 x double> %x3, <vscale x 2 x double>* %ptr3.bc
141 define double @foo5(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, double * %ptr1, double * %ptr2, double %x0, <vscale x 8 x double> %x1, <vscale x 8 x double> %x2) nounwind {
143 ; CHECK: // %bb.0: // %entry
144 ; CHECK-NEXT: ldr x8, [sp]
145 ; CHECK-NEXT: ptrue p0.d
146 ; CHECK-NEXT: ld1d { z5.d }, p0/z, [x8, #1, mul vl]
147 ; CHECK-NEXT: ld1d { z6.d }, p0/z, [x8]
148 ; CHECK-NEXT: ld1d { z7.d }, p0/z, [x8, #3, mul vl]
149 ; CHECK-NEXT: ld1d { z24.d }, p0/z, [x8, #2, mul vl]
150 ; CHECK-NEXT: st1d { z4.d }, p0, [x6, #3, mul vl]
151 ; CHECK-NEXT: st1d { z3.d }, p0, [x6, #2, mul vl]
152 ; CHECK-NEXT: st1d { z2.d }, p0, [x6, #1, mul vl]
153 ; CHECK-NEXT: st1d { z1.d }, p0, [x6]
154 ; CHECK-NEXT: st1d { z24.d }, p0, [x7, #2, mul vl]
155 ; CHECK-NEXT: st1d { z7.d }, p0, [x7, #3, mul vl]
156 ; CHECK-NEXT: st1d { z6.d }, p0, [x7]
157 ; CHECK-NEXT: st1d { z5.d }, p0, [x7, #1, mul vl]
160 %ptr1.bc = bitcast double * %ptr1 to <vscale x 8 x double> *
161 store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1.bc
162 %ptr2.bc = bitcast double * %ptr2 to <vscale x 8 x double> *
163 store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2.bc
167 define double @foo6(double %x0, double %x1, double * %ptr1, double * %ptr2, <vscale x 8 x double> %x2, <vscale x 6 x double> %x3) nounwind {
169 ; CHECK: // %bb.0: // %entry
170 ; CHECK-NEXT: ptrue p0.d
171 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x2]
172 ; CHECK-NEXT: ld1d { z6.d }, p0/z, [x2, #2, mul vl]
173 ; CHECK-NEXT: ld1d { z7.d }, p0/z, [x2, #1, mul vl]
174 ; CHECK-NEXT: st1d { z5.d }, p0, [x0, #3, mul vl]
175 ; CHECK-NEXT: st1d { z4.d }, p0, [x0, #2, mul vl]
176 ; CHECK-NEXT: st1d { z3.d }, p0, [x0, #1, mul vl]
177 ; CHECK-NEXT: st1d { z2.d }, p0, [x0]
178 ; CHECK-NEXT: st1d { z7.d }, p0, [x1, #1, mul vl]
179 ; CHECK-NEXT: st1d { z6.d }, p0, [x1, #2, mul vl]
180 ; CHECK-NEXT: st1d { z1.d }, p0, [x1]
183 %ptr1.bc = bitcast double * %ptr1 to <vscale x 8 x double> *
184 store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr1.bc
185 %ptr2.bc = bitcast double * %ptr2 to <vscale x 6 x double> *
186 store volatile <vscale x 6 x double> %x3, <vscale x 6 x double>* %ptr2.bc
190 declare float @callee1(float, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 2 x double>)
191 declare float @callee2(i32, i32, i32, i32, i32, i32, i32, i32, float, <vscale x 8 x double>, <vscale x 8 x double>)
192 declare float @callee3(float, float, <vscale x 8 x double>, <vscale x 6 x double>, <vscale x 2 x double>)
194 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
195 declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
196 declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1>, double*)
197 declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1(<vscale x 2 x i1>, double*)
198 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
199 declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
200 declare <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64(<vscale x 8 x double>, i32 immarg)
201 declare <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv6f64(<vscale x 6 x double>, i32 immarg)