1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s
4 ; == 8 to 64-bit elements ==
6 define { <vscale x 16 x i8>, <vscale x 16 x i8> } @sel_x2_i8(target("aarch64.svcount") %pn, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zm1, <vscale x 16 x i8> %zm2) nounwind {
7 ; CHECK-LABEL: sel_x2_i8:
9 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
10 ; CHECK-NEXT: addvl sp, sp, #-1
11 ; CHECK-NEXT: mov z5.d, z4.d
12 ; CHECK-NEXT: mov z7.d, z2.d
13 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
14 ; CHECK-NEXT: mov z4.d, z3.d
15 ; CHECK-NEXT: mov z6.d, z1.d
16 ; CHECK-NEXT: mov p8.b, p0.b
17 ; CHECK-NEXT: sel { z0.b, z1.b }, pn8, { z6.b, z7.b }, { z4.b, z5.b }
18 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
19 ; CHECK-NEXT: addvl sp, sp, #1
20 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
22 %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.sel.x2.nxv16i8(target("aarch64.svcount") %pn, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zm1, <vscale x 16 x i8> %zm2)
23 ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
26 define { <vscale x 8 x i16>, <vscale x 8 x i16> } @sel_x2_i16(target("aarch64.svcount") %pn, <vscale x 8 x i16> %unused, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2) nounwind {
27 ; CHECK-LABEL: sel_x2_i16:
29 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
30 ; CHECK-NEXT: addvl sp, sp, #-1
31 ; CHECK-NEXT: mov z5.d, z4.d
32 ; CHECK-NEXT: mov z7.d, z2.d
33 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
34 ; CHECK-NEXT: mov z4.d, z3.d
35 ; CHECK-NEXT: mov z6.d, z1.d
36 ; CHECK-NEXT: mov p8.b, p0.b
37 ; CHECK-NEXT: sel { z0.h, z1.h }, pn8, { z6.h, z7.h }, { z4.h, z5.h }
38 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
39 ; CHECK-NEXT: addvl sp, sp, #1
40 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
42 %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.sel.x2.nxv8i16(target("aarch64.svcount") %pn, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2)
43 ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
46 define { <vscale x 8 x half>, <vscale x 8 x half> } @sel_x2_f16(target("aarch64.svcount") %pn, <vscale x 8 x half> %unused, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2) nounwind {
47 ; CHECK-LABEL: sel_x2_f16:
49 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
50 ; CHECK-NEXT: addvl sp, sp, #-1
51 ; CHECK-NEXT: mov z5.d, z4.d
52 ; CHECK-NEXT: mov z7.d, z2.d
53 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
54 ; CHECK-NEXT: mov z4.d, z3.d
55 ; CHECK-NEXT: mov z6.d, z1.d
56 ; CHECK-NEXT: mov p8.b, p0.b
57 ; CHECK-NEXT: sel { z0.h, z1.h }, pn8, { z6.h, z7.h }, { z4.h, z5.h }
58 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
59 ; CHECK-NEXT: addvl sp, sp, #1
60 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
62 %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.sel.x2.nxv8f16(target("aarch64.svcount") %pn, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2)
63 ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
66 define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @sel_x2_bf16(target("aarch64.svcount") %pn, <vscale x 8 x bfloat> %unused, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2) nounwind {
67 ; CHECK-LABEL: sel_x2_bf16:
69 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
70 ; CHECK-NEXT: addvl sp, sp, #-1
71 ; CHECK-NEXT: mov z5.d, z4.d
72 ; CHECK-NEXT: mov z7.d, z2.d
73 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
74 ; CHECK-NEXT: mov z4.d, z3.d
75 ; CHECK-NEXT: mov z6.d, z1.d
76 ; CHECK-NEXT: mov p8.b, p0.b
77 ; CHECK-NEXT: sel { z0.h, z1.h }, pn8, { z6.h, z7.h }, { z4.h, z5.h }
78 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
79 ; CHECK-NEXT: addvl sp, sp, #1
80 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
82 %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.sel.x2.nxv8bf16(target("aarch64.svcount") %pn, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2)
83 ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
86 define { <vscale x 4 x i32>, <vscale x 4 x i32> } @sel_x2_i32(target("aarch64.svcount") %pn, <vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) nounwind {
87 ; CHECK-LABEL: sel_x2_i32:
89 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
90 ; CHECK-NEXT: addvl sp, sp, #-1
91 ; CHECK-NEXT: mov z5.d, z4.d
92 ; CHECK-NEXT: mov z7.d, z2.d
93 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
94 ; CHECK-NEXT: mov z4.d, z3.d
95 ; CHECK-NEXT: mov z6.d, z1.d
96 ; CHECK-NEXT: mov p8.b, p0.b
97 ; CHECK-NEXT: sel { z0.s, z1.s }, pn8, { z6.s, z7.s }, { z4.s, z5.s }
98 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
99 ; CHECK-NEXT: addvl sp, sp, #1
100 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
102 %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.sel.x2.nxv4i32(target("aarch64.svcount") %pn, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
103 ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
106 define { <vscale x 4 x float>, <vscale x 4 x float> } @sel_x2_f32(target("aarch64.svcount") %pn, <vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2) nounwind {
107 ; CHECK-LABEL: sel_x2_f32:
109 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
110 ; CHECK-NEXT: addvl sp, sp, #-1
111 ; CHECK-NEXT: mov z5.d, z4.d
112 ; CHECK-NEXT: mov z7.d, z2.d
113 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
114 ; CHECK-NEXT: mov z4.d, z3.d
115 ; CHECK-NEXT: mov z6.d, z1.d
116 ; CHECK-NEXT: mov p8.b, p0.b
117 ; CHECK-NEXT: sel { z0.s, z1.s }, pn8, { z6.s, z7.s }, { z4.s, z5.s }
118 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
119 ; CHECK-NEXT: addvl sp, sp, #1
120 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
122 %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.sel.x2.nxv4f32(target("aarch64.svcount") %pn, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2)
123 ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
126 define { <vscale x 2 x i64>, <vscale x 2 x i64> } @sel_x2_i64(target("aarch64.svcount") %pn, <vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2) nounwind {
127 ; CHECK-LABEL: sel_x2_i64:
129 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
130 ; CHECK-NEXT: addvl sp, sp, #-1
131 ; CHECK-NEXT: mov z5.d, z4.d
132 ; CHECK-NEXT: mov z7.d, z2.d
133 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
134 ; CHECK-NEXT: mov z4.d, z3.d
135 ; CHECK-NEXT: mov z6.d, z1.d
136 ; CHECK-NEXT: mov p8.b, p0.b
137 ; CHECK-NEXT: sel { z0.d, z1.d }, pn8, { z6.d, z7.d }, { z4.d, z5.d }
138 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
139 ; CHECK-NEXT: addvl sp, sp, #1
140 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
142 %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.sel.x2.nxv2i64(target("aarch64.svcount") %pn, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2)
143 ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
146 define { <vscale x 2 x double>, <vscale x 2 x double> } @sel_x2_f64(target("aarch64.svcount") %pn, <vscale x 2 x double> %unused, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2) nounwind {
147 ; CHECK-LABEL: sel_x2_f64:
149 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
150 ; CHECK-NEXT: addvl sp, sp, #-1
151 ; CHECK-NEXT: mov z5.d, z4.d
152 ; CHECK-NEXT: mov z7.d, z2.d
153 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
154 ; CHECK-NEXT: mov z4.d, z3.d
155 ; CHECK-NEXT: mov z6.d, z1.d
156 ; CHECK-NEXT: mov p8.b, p0.b
157 ; CHECK-NEXT: sel { z0.d, z1.d }, pn8, { z6.d, z7.d }, { z4.d, z5.d }
158 ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
159 ; CHECK-NEXT: addvl sp, sp, #1
160 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
162 %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.sel.x2.nxv2f64(target("aarch64.svcount") %pn, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2)
163 ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
166 ; == 8 to 64-bit elements ==
167 declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.sel.x2.nxv16i8(target("aarch64.svcount") %pn, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zm1, <vscale x 16 x i8> %zm2)
168 declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.sel.x2.nxv8i16(target("aarch64.svcount") %pn, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zm1, <vscale x 8 x i16> %zm2)
169 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.sel.x2.nxv4i32(target("aarch64.svcount") %pn, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
170 declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.sel.x2.nxv2i64(target("aarch64.svcount") %pn, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2)
171 declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.sel.x2.nxv8f16(target("aarch64.svcount") %pn, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2)
172 declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.sel.x2.nxv8bf16(target("aarch64.svcount") %pn, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zm1, <vscale x 8 x bfloat> %zm2)
173 declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.sel.x2.nxv4f32(target("aarch64.svcount") %pn, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2)
174 declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.sel.x2.nxv2f64(target("aarch64.svcount") %pn, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2)