1 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
2 ; RUN: -mcpu=pwr8 < %s | FileCheck %s
3 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
4 ; RUN: -mcpu=pwr8 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-LE
5 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
6 ; RUN: -mcpu=pwr7 -mattr=+crypto < %s | FileCheck %s
7 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
8 ; RUN: -mcpu=pwr9 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-LE
9 ; FIXME: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
10 ; FIXME: The original intent was to add a check-next for the blr after every check.
11 ; However, this currently fails since we don't eliminate stores of the unused
12 ; locals. These stores are sometimes scheduled after the crypto instruction
14 ; Function Attrs: nounwind
15 define <16 x i8> @test_vpmsumb() #0 {
17 %a = alloca <16 x i8>, align 16
18 %b = alloca <16 x i8>, align 16
19 store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, ptr %a, align 16
20 store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, ptr %b, align 16
21 %0 = load <16 x i8>, ptr %a, align 16
22 %1 = load <16 x i8>, ptr %b, align 16
23 %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
28 ; Function Attrs: nounwind readnone
29 declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
31 ; Function Attrs: nounwind
32 define <8 x i16> @test_vpmsumh() #0 {
34 %a = alloca <8 x i16>, align 16
35 %b = alloca <8 x i16>, align 16
36 store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, ptr %a, align 16
37 store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, ptr %b, align 16
38 %0 = load <8 x i16>, ptr %a, align 16
39 %1 = load <8 x i16>, ptr %b, align 16
40 %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
45 ; Function Attrs: nounwind readnone
46 declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
48 ; Function Attrs: nounwind
49 define <4 x i32> @test_vpmsumw() #0 {
51 %a = alloca <4 x i32>, align 16
52 %b = alloca <4 x i32>, align 16
53 store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, ptr %a, align 16
54 store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, ptr %b, align 16
55 %0 = load <4 x i32>, ptr %a, align 16
56 %1 = load <4 x i32>, ptr %b, align 16
57 %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
62 ; Function Attrs: nounwind readnone
63 declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
65 ; Function Attrs: nounwind
66 define <2 x i64> @test_vpmsumd() #0 {
68 %a = alloca <2 x i64>, align 16
69 %b = alloca <2 x i64>, align 16
70 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
71 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
72 %0 = load <2 x i64>, ptr %a, align 16
73 %1 = load <2 x i64>, ptr %b, align 16
74 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
79 ; Function Attrs: nounwind readnone
80 declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
82 ; Function Attrs: nounwind
83 define <2 x i64> @test_vsbox() #0 {
85 %a = alloca <2 x i64>, align 16
86 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
87 %0 = load <2 x i64>, ptr %a, align 16
88 %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
93 ; Function Attrs: nounwind readnone
94 declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
96 ; Function Attrs: nounwind
97 define <16 x i8> @test_vpermxorb() #0 {
99 %a = alloca <16 x i8>, align 16
100 %b = alloca <16 x i8>, align 16
101 %c = alloca <16 x i8>, align 16
102 store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, ptr %a, align 16
103 store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, ptr %b, align 16
104 store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, ptr %c, align 16
105 %0 = load <16 x i8>, ptr %a, align 16
106 %1 = load <16 x i8>, ptr %b, align 16
107 %2 = load <16 x i8>, ptr %c, align 16
108 %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
114 ; Function Attrs: nounwind readnone
115 declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8>, <16 x i8>, <16 x i8>) #1
117 ; Function Attrs: nounwind
118 define <8 x i16> @test_vpermxorh() #0 {
120 %a = alloca <8 x i16>, align 16
121 %b = alloca <8 x i16>, align 16
122 %c = alloca <8 x i16>, align 16
123 store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, ptr %a, align 16
124 store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, ptr %b, align 16
125 store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, ptr %c, align 16
126 %0 = load <8 x i16>, ptr %a, align 16
127 %1 = bitcast <8 x i16> %0 to <16 x i8>
128 %2 = load <8 x i16>, ptr %b, align 16
129 %3 = bitcast <8 x i16> %2 to <16 x i8>
130 %4 = load <8 x i16>, ptr %c, align 16
131 %5 = bitcast <8 x i16> %4 to <16 x i8>
132 %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
133 %7 = bitcast <16 x i8> %6 to <8 x i16>
139 ; Function Attrs: nounwind
140 define <4 x i32> @test_vpermxorw() #0 {
142 %a = alloca <4 x i32>, align 16
143 %b = alloca <4 x i32>, align 16
144 %c = alloca <4 x i32>, align 16
145 store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, ptr %a, align 16
146 store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, ptr %b, align 16
147 store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, ptr %c, align 16
148 %0 = load <4 x i32>, ptr %a, align 16
149 %1 = bitcast <4 x i32> %0 to <16 x i8>
150 %2 = load <4 x i32>, ptr %b, align 16
151 %3 = bitcast <4 x i32> %2 to <16 x i8>
152 %4 = load <4 x i32>, ptr %c, align 16
153 %5 = bitcast <4 x i32> %4 to <16 x i8>
154 %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
155 %7 = bitcast <16 x i8> %6 to <4 x i32>
161 ; Function Attrs: nounwind
162 define <2 x i64> @test_vpermxord() #0 {
164 %a = alloca <2 x i64>, align 16
165 %b = alloca <2 x i64>, align 16
166 %c = alloca <2 x i64>, align 16
167 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
168 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
169 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %c, align 16
170 %0 = load <2 x i64>, ptr %a, align 16
171 %1 = bitcast <2 x i64> %0 to <16 x i8>
172 %2 = load <2 x i64>, ptr %b, align 16
173 %3 = bitcast <2 x i64> %2 to <16 x i8>
174 %4 = load <2 x i64>, ptr %c, align 16
175 %5 = bitcast <2 x i64> %4 to <16 x i8>
176 %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
177 %7 = bitcast <16 x i8> %6 to <2 x i64>
183 ; Function Attrs: nounwind
184 define <2 x i64> @test_vcipher() #0 {
186 %a = alloca <2 x i64>, align 16
187 %b = alloca <2 x i64>, align 16
188 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
189 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
190 %0 = load <2 x i64>, ptr %a, align 16
191 %1 = load <2 x i64>, ptr %b, align 16
192 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> %0, <2 x i64> %1)
197 ; Function Attrs: nounwind readnone
198 declare <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64>, <2 x i64>) #1
200 ; Function Attrs: nounwind
201 define <2 x i64> @test_vcipherlast() #0 {
203 %a = alloca <2 x i64>, align 16
204 %b = alloca <2 x i64>, align 16
205 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
206 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
207 %0 = load <2 x i64>, ptr %a, align 16
208 %1 = load <2 x i64>, ptr %b, align 16
209 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> %0, <2 x i64> %1)
211 ; CHECK: vcipherlast 2,
214 ; Function Attrs: nounwind readnone
215 declare <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64>, <2 x i64>) #1
217 ; Function Attrs: nounwind
218 define <2 x i64> @test_vncipher() #0 {
220 %a = alloca <2 x i64>, align 16
221 %b = alloca <2 x i64>, align 16
222 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
223 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
224 %0 = load <2 x i64>, ptr %a, align 16
225 %1 = load <2 x i64>, ptr %b, align 16
226 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> %0, <2 x i64> %1)
231 ; Function Attrs: nounwind readnone
232 declare <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64>, <2 x i64>) #1
234 ; Function Attrs: nounwind
235 define <2 x i64> @test_vncipherlast() #0 {
237 %a = alloca <2 x i64>, align 16
238 %b = alloca <2 x i64>, align 16
239 store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
240 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
241 %0 = load <2 x i64>, ptr %a, align 16
242 %1 = load <2 x i64>, ptr %b, align 16
243 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> %0, <2 x i64> %1)
245 ; CHECK: vncipherlast 2,
248 ; Function Attrs: nounwind readnone
249 declare <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64>, <2 x i64>) #1
251 ; Function Attrs: nounwind
252 define <4 x i32> @test_vshasigmaw() #0 {
254 %a = alloca <4 x i32>, align 16
255 store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, ptr %a, align 16
256 %0 = load <4 x i32>, ptr %a, align 16
257 %1 = call <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32> %0, i32 1, i32 15)
259 ; CHECK: vshasigmaw 2,
262 ; Function Attrs: nounwind readnone
263 declare <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32>, i32, i32) #1
265 ; Function Attrs: nounwind
266 define <2 x i64> @test_vshasigmad() #0 {
268 %a = alloca <2 x i64>, align 16
269 store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %a, align 16
270 %0 = load <2 x i64>, ptr %a, align 16
271 %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64> %0, i32 1, i32 15)
273 ; CHECK: vshasigmad 2,
276 ; Function Attrs: nounwind readnone
277 declare <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64>, i32, i32) #1
279 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
280 attributes #1 = { nounwind readnone }
284 !0 = !{!"clang version 3.7.0 (trunk 230949) (llvm/trunk 230946)"}