1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes='default<O2>' -mtriple=powerpc64le-- -o - %s | \
7 define dso_local void @test(ptr %arg) #0 {
10 ; CHECK-NEXT: br label [[BB16:%.*]]
12 ; CHECK-NEXT: [[I20:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
13 ; CHECK-NEXT: [[I24:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20]])
14 ; CHECK-NEXT: [[I24_ELT:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24]], 0
15 ; CHECK-NEXT: store <16 x i8> [[I24_ELT]], ptr inttoptr (i64 48 to ptr), align 16
16 ; CHECK-NEXT: [[I24_ELT1:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24]], 1
17 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1]], ptr inttoptr (i64 64 to ptr), align 64
18 ; CHECK-NEXT: [[I20_1:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
19 ; CHECK-NEXT: [[I24_1:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_1]])
20 ; CHECK-NEXT: [[I24_ELT_1:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_1]], 0
21 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_1]], ptr inttoptr (i64 48 to ptr), align 16
22 ; CHECK-NEXT: [[I24_ELT1_1:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_1]], 1
23 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_1]], ptr inttoptr (i64 64 to ptr), align 64
24 ; CHECK-NEXT: [[I20_2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
25 ; CHECK-NEXT: [[I24_2:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_2]])
26 ; CHECK-NEXT: [[I24_ELT_2:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_2]], 0
27 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_2]], ptr inttoptr (i64 48 to ptr), align 16
28 ; CHECK-NEXT: [[I24_ELT1_2:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_2]], 1
29 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_2]], ptr inttoptr (i64 64 to ptr), align 64
30 ; CHECK-NEXT: [[I20_3:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
31 ; CHECK-NEXT: [[I24_3:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_3]])
32 ; CHECK-NEXT: [[I24_ELT_3:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_3]], 0
33 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_3]], ptr inttoptr (i64 48 to ptr), align 16
34 ; CHECK-NEXT: [[I24_ELT1_3:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_3]], 1
35 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_3]], ptr inttoptr (i64 64 to ptr), align 64
36 ; CHECK-NEXT: [[I20_4:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
37 ; CHECK-NEXT: [[I24_4:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_4]])
38 ; CHECK-NEXT: [[I24_ELT_4:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_4]], 0
39 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_4]], ptr inttoptr (i64 48 to ptr), align 16
40 ; CHECK-NEXT: [[I24_ELT1_4:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_4]], 1
41 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_4]], ptr inttoptr (i64 64 to ptr), align 64
42 ; CHECK-NEXT: [[I20_5:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
43 ; CHECK-NEXT: [[I24_5:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_5]])
44 ; CHECK-NEXT: [[I24_ELT_5:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_5]], 0
45 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_5]], ptr inttoptr (i64 48 to ptr), align 16
46 ; CHECK-NEXT: [[I24_ELT1_5:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_5]], 1
47 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_5]], ptr inttoptr (i64 64 to ptr), align 64
48 ; CHECK-NEXT: [[I20_6:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
49 ; CHECK-NEXT: [[I24_6:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_6]])
50 ; CHECK-NEXT: [[I24_ELT_6:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_6]], 0
51 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_6]], ptr inttoptr (i64 48 to ptr), align 16
52 ; CHECK-NEXT: [[I24_ELT1_6:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_6]], 1
53 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_6]], ptr inttoptr (i64 64 to ptr), align 64
54 ; CHECK-NEXT: [[I20_7:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
55 ; CHECK-NEXT: [[I24_7:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_7]])
56 ; CHECK-NEXT: [[I24_ELT_7:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_7]], 0
57 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_7]], ptr inttoptr (i64 48 to ptr), align 16
58 ; CHECK-NEXT: [[I24_ELT1_7:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_7]], 1
59 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_7]], ptr inttoptr (i64 64 to ptr), align 64
60 ; CHECK-NEXT: [[I20_8:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
61 ; CHECK-NEXT: [[I24_8:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_8]])
62 ; CHECK-NEXT: [[I24_ELT_8:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_8]], 0
63 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_8]], ptr inttoptr (i64 48 to ptr), align 16
64 ; CHECK-NEXT: [[I24_ELT1_8:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_8]], 1
65 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_8]], ptr inttoptr (i64 64 to ptr), align 64
66 ; CHECK-NEXT: [[I20_9:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
67 ; CHECK-NEXT: [[I24_9:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_9]])
68 ; CHECK-NEXT: [[I24_ELT_9:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_9]], 0
69 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_9]], ptr inttoptr (i64 48 to ptr), align 16
70 ; CHECK-NEXT: [[I24_ELT1_9:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_9]], 1
71 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_9]], ptr inttoptr (i64 64 to ptr), align 64
72 ; CHECK-NEXT: [[I20_10:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
73 ; CHECK-NEXT: [[I24_10:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_10]])
74 ; CHECK-NEXT: [[I24_ELT_10:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_10]], 0
75 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_10]], ptr inttoptr (i64 48 to ptr), align 16
76 ; CHECK-NEXT: [[I24_ELT1_10:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_10]], 1
77 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_10]], ptr inttoptr (i64 64 to ptr), align 64
78 ; CHECK-NEXT: [[I20_11:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
79 ; CHECK-NEXT: [[I24_11:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_11]])
80 ; CHECK-NEXT: [[I24_ELT_11:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_11]], 0
81 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_11]], ptr inttoptr (i64 48 to ptr), align 16
82 ; CHECK-NEXT: [[I24_ELT1_11:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_11]], 1
83 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_11]], ptr inttoptr (i64 64 to ptr), align 64
84 ; CHECK-NEXT: [[I20_12:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
85 ; CHECK-NEXT: [[I24_12:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_12]])
86 ; CHECK-NEXT: [[I24_ELT_12:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_12]], 0
87 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_12]], ptr inttoptr (i64 48 to ptr), align 16
88 ; CHECK-NEXT: [[I24_ELT1_12:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_12]], 1
89 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_12]], ptr inttoptr (i64 64 to ptr), align 64
90 ; CHECK-NEXT: [[I20_13:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
91 ; CHECK-NEXT: [[I24_13:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_13]])
92 ; CHECK-NEXT: [[I24_ELT_13:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_13]], 0
93 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_13]], ptr inttoptr (i64 48 to ptr), align 16
94 ; CHECK-NEXT: [[I24_ELT1_13:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_13]], 1
95 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_13]], ptr inttoptr (i64 64 to ptr), align 64
96 ; CHECK-NEXT: [[I20_14:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
97 ; CHECK-NEXT: [[I24_14:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_14]])
98 ; CHECK-NEXT: [[I24_ELT_14:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_14]], 0
99 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_14]], ptr inttoptr (i64 48 to ptr), align 16
100 ; CHECK-NEXT: [[I24_ELT1_14:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_14]], 1
101 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_14]], ptr inttoptr (i64 64 to ptr), align 64
102 ; CHECK-NEXT: [[I20_15:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr nonnull inttoptr (i64 -32 to ptr))
103 ; CHECK-NEXT: [[I24_15:%.*]] = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> [[I20_15]])
104 ; CHECK-NEXT: [[I24_ELT_15:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_15]], 0
105 ; CHECK-NEXT: store <16 x i8> [[I24_ELT_15]], ptr inttoptr (i64 48 to ptr), align 16
106 ; CHECK-NEXT: [[I24_ELT1_15:%.*]] = extractvalue { <16 x i8>, <16 x i8> } [[I24_15]], 1
107 ; CHECK-NEXT: store <16 x i8> [[I24_ELT1_15]], ptr inttoptr (i64 64 to ptr), align 64
108 ; CHECK-NEXT: br label [[BB16]], !llvm.loop [[LOOP0:![0-9]+]]
111 %i = alloca ptr, align 8
112 store ptr %arg, ptr %i, align 8
113 %i1 = alloca ptr, align 8
114 %i2 = alloca ptr, align 8
115 %i3 = alloca i32, align 4
116 %i4 = alloca i32, align 4
117 %i5 = alloca i64, align 8
118 %i6 = alloca i64, align 8
119 %i7 = alloca <256 x i1>, align 32
120 %i8 = load ptr, ptr %i, align 8
121 %i9 = load i32, ptr %i8, align 4
122 %i10 = sub nsw i32 %i9, 0
123 store i32 %i10, ptr %i4, align 4
124 %i11 = load i32, ptr %i4, align 4
125 %i12 = ashr i32 %i11, 5
126 %i13 = sext i32 %i12 to i64
127 %i14 = load i64, ptr %i6, align 8
128 %i15 = sub nsw i64 %i14, 1
131 bb16: ; preds = %bb16, %bb
132 %i17 = load i64, ptr %i5, align 8
133 %i18 = icmp sge i64 %i17, 1
134 %i19 = getelementptr i8, ptr null, i64 -32
135 %i20 = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %i19)
136 store <256 x i1> %i20, ptr %i7, align 32
137 %i21 = getelementptr inbounds i8, ptr null, i64 48
138 %i23 = load <256 x i1>, ptr %i7, align 32
139 %i24 = call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %i23)
140 store { <16 x i8>, <16 x i8> } %i24, ptr %i21, align 16
141 br label %bb16, !llvm.loop !1
144 ; Function Attrs: argmemonly nounwind readonly
145 declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr) #1
147 ; Function Attrs: nounwind readnone
148 declare { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1>) #2
150 attributes #0 = { "no-trapping-math"="true" }
151 attributes #1 = { argmemonly nounwind readonly }
152 attributes #2 = { nounwind readnone }
154 !1 = distinct !{!1, !2, !3, !4}
155 !2 = !{!"llvm.loop.vectorize.width", i32 1}
156 !3 = !{!"llvm.loop.interleave.count", i32 1}
157 !4 = !{!"llvm.loop.unroll.count", i32 16}