1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=GFX8
3 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx906 -O0 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=GFX9
9 liveins: $vgpr0, $vgpr1
11 ; GFX8-LABEL: name: test_umulo_s32
12 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
13 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
14 ; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY1]]
15 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
16 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
17 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH]](s32), [[C]]
18 ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
19 ; GFX8: $vgpr0 = COPY [[MUL]](s32)
20 ; GFX8: $vgpr1 = COPY [[ZEXT]](s32)
21 ; GFX9-LABEL: name: test_umulo_s32
22 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
23 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
24 ; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY1]]
25 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
26 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
27 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH]](s32), [[C]]
28 ; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
29 ; GFX9: $vgpr0 = COPY [[MUL]](s32)
30 ; GFX9: $vgpr1 = COPY [[ZEXT]](s32)
31 %0:_(s32) = COPY $vgpr0
32 %1:_(s32) = COPY $vgpr1
33 %2:_(s32), %3:_(s1) = G_UMULO %0, %1
40 name: test_umulo_v2s32
43 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
45 ; GFX8-LABEL: name: test_umulo_v2s32
46 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
47 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
48 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
49 ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
50 ; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
51 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
52 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
53 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH]](s32), [[C]]
54 ; GFX8: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
55 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
56 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH1]](s32), [[C]]
57 ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
58 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
59 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
60 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
61 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
62 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
63 ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
64 ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
65 ; GFX8: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
66 ; GFX9-LABEL: name: test_umulo_v2s32
67 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
68 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
69 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
70 ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
71 ; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
72 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
73 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
74 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH]](s32), [[C]]
75 ; GFX9: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
76 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
77 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH1]](s32), [[C]]
78 ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
79 ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
80 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
81 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
82 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
83 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
84 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
85 ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
86 ; GFX9: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
87 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
88 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
89 %2:_(<2 x s32>), %3:_(<2 x s1>) = G_UMULO %0, %1
90 %4:_(<2 x s32>) = G_ZEXT %3
91 $vgpr0_vgpr1 = COPY %2
92 $vgpr2_vgpr3 = COPY %4
99 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
101 ; GFX8-LABEL: name: test_umulo_s64
102 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
103 ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
104 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
105 ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
106 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
107 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
108 ; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
109 ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
110 ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
111 ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
112 ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
113 ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
114 ; GFX8: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
115 ; GFX8: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV2]]
116 ; GFX8: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV3]]
117 ; GFX8: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
118 ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
119 ; GFX8: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
120 ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
121 ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
122 ; GFX8: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
123 ; GFX8: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
124 ; GFX8: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
125 ; GFX8: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
126 ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
127 ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
128 ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
129 ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
130 ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
131 ; GFX8: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV6]]
132 ; GFX8: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
133 ; GFX8: [[MUL5:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
134 ; GFX8: [[UMULH4:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV6]]
135 ; GFX8: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[MUL4]], [[MUL5]]
136 ; GFX8: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH4]]
137 ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL3]](s32), [[ADD5]](s32)
138 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MV]](s64), [[C]]
139 ; GFX8: [[ZEXT5:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1)
140 ; GFX8: $vgpr0_vgpr1 = COPY [[MV1]](s64)
141 ; GFX8: $vgpr2_vgpr3 = COPY [[ZEXT5]](s64)
142 ; GFX9-LABEL: name: test_umulo_s64
143 ; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
144 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
145 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
146 ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
147 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
148 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
149 ; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
150 ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
151 ; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
152 ; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
153 ; GFX9: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
154 ; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
155 ; GFX9: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
156 ; GFX9: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV2]]
157 ; GFX9: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV3]]
158 ; GFX9: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
159 ; GFX9: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
160 ; GFX9: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
161 ; GFX9: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
162 ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
163 ; GFX9: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
164 ; GFX9: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
165 ; GFX9: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
166 ; GFX9: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
167 ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
168 ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
169 ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
170 ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
171 ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
172 ; GFX9: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV6]]
173 ; GFX9: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
174 ; GFX9: [[MUL5:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
175 ; GFX9: [[UMULH4:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV6]]
176 ; GFX9: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[MUL4]], [[MUL5]]
177 ; GFX9: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH4]]
178 ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL3]](s32), [[ADD5]](s32)
179 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MV]](s64), [[C]]
180 ; GFX9: [[ZEXT5:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1)
181 ; GFX9: $vgpr0_vgpr1 = COPY [[MV1]](s64)
182 ; GFX9: $vgpr2_vgpr3 = COPY [[ZEXT5]](s64)
183 %0:_(s64) = COPY $vgpr0_vgpr1
184 %1:_(s64) = COPY $vgpr2_vgpr3
185 %2:_(s64), %3:_(s1) = G_UMULO %0, %1
186 %4:_(s64) = G_ZEXT %3
187 $vgpr0_vgpr1 = COPY %2
188 $vgpr2_vgpr3 = COPY %4
192 name: test_umulo_v2s64
195 liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
197 ; GFX8-LABEL: name: test_umulo_v2s64
198 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
199 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
200 ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
201 ; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
202 ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
203 ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
204 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
205 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
206 ; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV6]]
207 ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
208 ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
209 ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
210 ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
211 ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
212 ; GFX8: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV7]]
213 ; GFX8: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV6]]
214 ; GFX8: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV7]]
215 ; GFX8: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
216 ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
217 ; GFX8: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
218 ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
219 ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
220 ; GFX8: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
221 ; GFX8: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
222 ; GFX8: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
223 ; GFX8: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV7]]
224 ; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
225 ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
226 ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
227 ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
228 ; GFX8: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
229 ; GFX8: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV8]], [[UV10]]
230 ; GFX8: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[UV9]], [[UV10]]
231 ; GFX8: [[MUL5:%[0-9]+]]:_(s32) = G_MUL [[UV8]], [[UV11]]
232 ; GFX8: [[UMULH4:%[0-9]+]]:_(s32) = G_UMULH [[UV8]], [[UV10]]
233 ; GFX8: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[MUL4]], [[MUL5]]
234 ; GFX8: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH4]]
235 ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL3]](s32), [[ADD5]](s32)
236 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MV]](s64), [[C]]
237 ; GFX8: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
238 ; GFX8: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
239 ; GFX8: [[MUL6:%[0-9]+]]:_(s32) = G_MUL [[UV13]], [[UV14]]
240 ; GFX8: [[MUL7:%[0-9]+]]:_(s32) = G_MUL [[UV12]], [[UV15]]
241 ; GFX8: [[UMULH5:%[0-9]+]]:_(s32) = G_UMULH [[UV12]], [[UV14]]
242 ; GFX8: [[UADDO10:%[0-9]+]]:_(s32), [[UADDO11:%[0-9]+]]:_(s1) = G_UADDO [[MUL6]], [[MUL7]]
243 ; GFX8: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO11]](s1)
244 ; GFX8: [[UADDO12:%[0-9]+]]:_(s32), [[UADDO13:%[0-9]+]]:_(s1) = G_UADDO [[UADDO10]], [[UMULH5]]
245 ; GFX8: [[ZEXT6:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO13]](s1)
246 ; GFX8: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ZEXT5]], [[ZEXT6]]
247 ; GFX8: [[MUL8:%[0-9]+]]:_(s32) = G_MUL [[UV13]], [[UV15]]
248 ; GFX8: [[UMULH6:%[0-9]+]]:_(s32) = G_UMULH [[UV13]], [[UV14]]
249 ; GFX8: [[UMULH7:%[0-9]+]]:_(s32) = G_UMULH [[UV12]], [[UV15]]
250 ; GFX8: [[UADDO14:%[0-9]+]]:_(s32), [[UADDO15:%[0-9]+]]:_(s1) = G_UADDO [[MUL8]], [[UMULH6]]
251 ; GFX8: [[ZEXT7:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO15]](s1)
252 ; GFX8: [[UADDO16:%[0-9]+]]:_(s32), [[UADDO17:%[0-9]+]]:_(s1) = G_UADDO [[UADDO14]], [[UMULH7]]
253 ; GFX8: [[ZEXT8:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO17]](s1)
254 ; GFX8: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ZEXT7]], [[ZEXT8]]
255 ; GFX8: [[UADDO18:%[0-9]+]]:_(s32), [[UADDO19:%[0-9]+]]:_(s1) = G_UADDO [[UADDO16]], [[ADD6]]
256 ; GFX8: [[ZEXT9:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO19]](s1)
257 ; GFX8: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[ADD7]], [[ZEXT9]]
258 ; GFX8: [[UMULH8:%[0-9]+]]:_(s32) = G_UMULH [[UV13]], [[UV15]]
259 ; GFX8: [[ADD9:%[0-9]+]]:_(s32) = G_ADD [[UMULH8]], [[ADD8]]
260 ; GFX8: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO18]](s32), [[ADD9]](s32)
261 ; GFX8: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
262 ; GFX8: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
263 ; GFX8: [[MUL9:%[0-9]+]]:_(s32) = G_MUL [[UV16]], [[UV18]]
264 ; GFX8: [[MUL10:%[0-9]+]]:_(s32) = G_MUL [[UV17]], [[UV18]]
265 ; GFX8: [[MUL11:%[0-9]+]]:_(s32) = G_MUL [[UV16]], [[UV19]]
266 ; GFX8: [[UMULH9:%[0-9]+]]:_(s32) = G_UMULH [[UV16]], [[UV18]]
267 ; GFX8: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[MUL10]], [[MUL11]]
268 ; GFX8: [[ADD11:%[0-9]+]]:_(s32) = G_ADD [[ADD10]], [[UMULH9]]
269 ; GFX8: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL9]](s32), [[ADD11]](s32)
270 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MV2]](s64), [[C]]
271 ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV1]](s64), [[MV3]](s64)
272 ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
273 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s1)
274 ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
275 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP1]](s1)
276 ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
277 ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[AND]](s64), [[AND1]](s64)
278 ; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
279 ; GFX8: $vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR1]](<2 x s64>)
280 ; GFX9-LABEL: name: test_umulo_v2s64
281 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
282 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
283 ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
284 ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
285 ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
286 ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
287 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
288 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
289 ; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV6]]
290 ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
291 ; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
292 ; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
293 ; GFX9: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
294 ; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
295 ; GFX9: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV7]]
296 ; GFX9: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV6]]
297 ; GFX9: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV7]]
298 ; GFX9: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
299 ; GFX9: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
300 ; GFX9: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
301 ; GFX9: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
302 ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
303 ; GFX9: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
304 ; GFX9: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
305 ; GFX9: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
306 ; GFX9: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV7]]
307 ; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
308 ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
309 ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
310 ; GFX9: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
311 ; GFX9: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
312 ; GFX9: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV8]], [[UV10]]
313 ; GFX9: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[UV9]], [[UV10]]
314 ; GFX9: [[MUL5:%[0-9]+]]:_(s32) = G_MUL [[UV8]], [[UV11]]
315 ; GFX9: [[UMULH4:%[0-9]+]]:_(s32) = G_UMULH [[UV8]], [[UV10]]
316 ; GFX9: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[MUL4]], [[MUL5]]
317 ; GFX9: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH4]]
318 ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL3]](s32), [[ADD5]](s32)
319 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MV]](s64), [[C]]
320 ; GFX9: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
321 ; GFX9: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
322 ; GFX9: [[MUL6:%[0-9]+]]:_(s32) = G_MUL [[UV13]], [[UV14]]
323 ; GFX9: [[MUL7:%[0-9]+]]:_(s32) = G_MUL [[UV12]], [[UV15]]
324 ; GFX9: [[UMULH5:%[0-9]+]]:_(s32) = G_UMULH [[UV12]], [[UV14]]
325 ; GFX9: [[UADDO10:%[0-9]+]]:_(s32), [[UADDO11:%[0-9]+]]:_(s1) = G_UADDO [[MUL6]], [[MUL7]]
326 ; GFX9: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO11]](s1)
327 ; GFX9: [[UADDO12:%[0-9]+]]:_(s32), [[UADDO13:%[0-9]+]]:_(s1) = G_UADDO [[UADDO10]], [[UMULH5]]
328 ; GFX9: [[ZEXT6:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO13]](s1)
329 ; GFX9: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ZEXT5]], [[ZEXT6]]
330 ; GFX9: [[MUL8:%[0-9]+]]:_(s32) = G_MUL [[UV13]], [[UV15]]
331 ; GFX9: [[UMULH6:%[0-9]+]]:_(s32) = G_UMULH [[UV13]], [[UV14]]
332 ; GFX9: [[UMULH7:%[0-9]+]]:_(s32) = G_UMULH [[UV12]], [[UV15]]
333 ; GFX9: [[UADDO14:%[0-9]+]]:_(s32), [[UADDO15:%[0-9]+]]:_(s1) = G_UADDO [[MUL8]], [[UMULH6]]
334 ; GFX9: [[ZEXT7:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO15]](s1)
335 ; GFX9: [[UADDO16:%[0-9]+]]:_(s32), [[UADDO17:%[0-9]+]]:_(s1) = G_UADDO [[UADDO14]], [[UMULH7]]
336 ; GFX9: [[ZEXT8:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO17]](s1)
337 ; GFX9: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ZEXT7]], [[ZEXT8]]
338 ; GFX9: [[UADDO18:%[0-9]+]]:_(s32), [[UADDO19:%[0-9]+]]:_(s1) = G_UADDO [[UADDO16]], [[ADD6]]
339 ; GFX9: [[ZEXT9:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO19]](s1)
340 ; GFX9: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[ADD7]], [[ZEXT9]]
341 ; GFX9: [[UMULH8:%[0-9]+]]:_(s32) = G_UMULH [[UV13]], [[UV15]]
342 ; GFX9: [[ADD9:%[0-9]+]]:_(s32) = G_ADD [[UMULH8]], [[ADD8]]
343 ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO18]](s32), [[ADD9]](s32)
344 ; GFX9: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
345 ; GFX9: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
346 ; GFX9: [[MUL9:%[0-9]+]]:_(s32) = G_MUL [[UV16]], [[UV18]]
347 ; GFX9: [[MUL10:%[0-9]+]]:_(s32) = G_MUL [[UV17]], [[UV18]]
348 ; GFX9: [[MUL11:%[0-9]+]]:_(s32) = G_MUL [[UV16]], [[UV19]]
349 ; GFX9: [[UMULH9:%[0-9]+]]:_(s32) = G_UMULH [[UV16]], [[UV18]]
350 ; GFX9: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[MUL10]], [[MUL11]]
351 ; GFX9: [[ADD11:%[0-9]+]]:_(s32) = G_ADD [[ADD10]], [[UMULH9]]
352 ; GFX9: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL9]](s32), [[ADD11]](s32)
353 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MV2]](s64), [[C]]
354 ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV1]](s64), [[MV3]](s64)
355 ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
356 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s1)
357 ; GFX9: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
358 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP1]](s1)
359 ; GFX9: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
360 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[AND]](s64), [[AND1]](s64)
361 ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
362 ; GFX9: $vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR1]](<2 x s64>)
363 %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
364 %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
365 %2:_(<2 x s64>), %3:_(<2 x s1>) = G_UMULO %0, %1
366 %4:_(<2 x s64>) = G_ZEXT %3
367 $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
368 $vgpr4_vgpr5_vgpr6_vgpr7 = COPY %4
375 liveins: $vgpr0, $vgpr1
377 ; GFX8-LABEL: name: test_umulo_s24
378 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
379 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
380 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
381 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
382 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
383 ; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[AND]], [[AND1]]
384 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
385 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
386 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH]](s32), [[C1]]
387 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
388 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
389 ; GFX8: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
390 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
391 ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s1)
392 ; GFX8: $vgpr0 = COPY [[AND3]](s32)
393 ; GFX8: $vgpr1 = COPY [[ZEXT]](s32)
394 ; GFX9-LABEL: name: test_umulo_s24
395 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
396 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
397 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
398 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
399 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
400 ; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[AND]], [[AND1]]
401 ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
402 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
403 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UMULH]](s32), [[C1]]
404 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
405 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
406 ; GFX9: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
407 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
408 ; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s1)
409 ; GFX9: $vgpr0 = COPY [[AND3]](s32)
410 ; GFX9: $vgpr1 = COPY [[ZEXT]](s32)
411 %0:_(s32) = COPY $vgpr0
412 %1:_(s32) = COPY $vgpr1
413 %2:_(s24) = G_TRUNC %0
414 %3:_(s24) = G_TRUNC %1
415 %4:_(s24), %6:_(s1) = G_UMULO %2, %3
416 %5:_(s32) = G_ZEXT %4
417 %7:_(s32) = G_ZEXT %6
427 liveins: $vgpr0, $vgpr1
429 ; GFX8-LABEL: name: test_umulo_s16
430 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
431 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
432 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
433 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
434 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
435 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
436 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
437 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
438 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
439 ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
440 ; GFX8: $vgpr0 = COPY [[AND3]](s32)
441 ; GFX8: $vgpr1 = COPY [[ZEXT]](s32)
442 ; GFX9-LABEL: name: test_umulo_s16
443 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
444 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
445 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
446 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
447 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
448 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
449 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
450 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
451 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
452 ; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
453 ; GFX9: $vgpr0 = COPY [[AND3]](s32)
454 ; GFX9: $vgpr1 = COPY [[ZEXT]](s32)
455 %0:_(s32) = COPY $vgpr0
456 %1:_(s32) = COPY $vgpr1
457 %2:_(s16) = G_TRUNC %0
458 %3:_(s16) = G_TRUNC %1
459 %4:_(s16), %6:_(s1) = G_UMULO %2, %3
460 %5:_(s32) = G_ZEXT %4
461 %7:_(s32) = G_ZEXT %6
470 liveins: $vgpr0, $vgpr1
472 ; GFX8-LABEL: name: test_umulo_s8
473 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
474 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
475 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
476 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
477 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
478 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
479 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
480 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
481 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
482 ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
483 ; GFX8: $vgpr0 = COPY [[AND3]](s32)
484 ; GFX8: $vgpr1 = COPY [[ZEXT]](s32)
485 ; GFX9-LABEL: name: test_umulo_s8
486 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
487 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
488 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
489 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
490 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
491 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
492 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
493 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
494 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
495 ; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
496 ; GFX9: $vgpr0 = COPY [[AND3]](s32)
497 ; GFX9: $vgpr1 = COPY [[ZEXT]](s32)
498 %0:_(s32) = COPY $vgpr0
499 %1:_(s32) = COPY $vgpr1
500 %2:_(s8) = G_TRUNC %0
501 %3:_(s8) = G_TRUNC %1
502 %4:_(s8), %6:_(s1) = G_UMULO %2, %3
503 %5:_(s32) = G_ZEXT %4
504 %7:_(s32) = G_ZEXT %6
510 name: test_umulo_v2s16
513 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
514 ; GFX8-LABEL: name: test_umulo_v2s16
515 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
516 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
517 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
518 ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
519 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
520 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
521 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[C]]
522 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
523 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
524 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
525 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
526 ; GFX8: [[AND4:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C]]
527 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[AND4]]
528 ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]]
529 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[AND5]]
530 ; GFX8: [[AND6:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
531 ; GFX8: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]]
532 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
533 ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C1]](s32)
534 ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL]]
535 ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
536 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
537 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
538 ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
539 ; GFX8: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
540 ; GFX8: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
541 ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND8]](s32), [[AND9]](s32)
542 ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST]](<2 x s16>)
543 ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
544 ; GFX8: [[AND10:%[0-9]+]]:_(s32) = G_AND [[BITCAST1]], [[C]]
545 ; GFX8: [[AND11:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C]]
546 ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND10]](s32), [[AND11]](s32)
547 ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR1]](<2 x s32>)
548 ; GFX8: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s32>)
549 ; GFX9-LABEL: name: test_umulo_v2s16
550 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
551 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
552 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
553 ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
554 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
555 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
556 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[C]]
557 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
558 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
559 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
560 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
561 ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C]]
562 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[AND4]]
563 ; GFX9: [[AND5:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]]
564 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[AND5]]
565 ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[MUL]](s32), [[MUL1]](s32)
566 ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
567 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
568 ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
569 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
570 ; GFX9: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
571 ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND6]](s32), [[AND7]](s32)
572 ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[BUILD_VECTOR_TRUNC]](<2 x s16>)
573 ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
574 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C2]](s32)
575 ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C]]
576 ; GFX9: [[AND9:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C]]
577 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND8]](s32), [[AND9]](s32)
578 ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR1]](<2 x s32>)
579 ; GFX9: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s32>)
580 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
581 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
582 %2:_(<2 x s16>) = G_TRUNC %0
583 %3:_(<2 x s16>) = G_TRUNC %1
584 %4:_(<2 x s16>), %6:_(<2 x s1>) = G_UMULO %2, %3
585 %7:_(<2 x s32>) = G_ZEXT %6
586 %5:_(<2 x s32>) = G_ZEXT %4
587 $vgpr0_vgpr1 = COPY %5
588 $vgpr2_vgpr3 = COPY %7
593 name: test_umulo_v2s8
596 liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
597 ; GFX8-LABEL: name: test_umulo_v2s8
598 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
599 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
600 ; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
601 ; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
602 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
603 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
604 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
605 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
606 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
607 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
608 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
609 ; GFX8: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
610 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[AND4]]
611 ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]]
612 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[AND5]]
613 ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
614 ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[MUL]](s32)
615 ; GFX8: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]]
616 ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[MUL1]](s32)
617 ; GFX8: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C1]]
618 ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
619 ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND7]], [[C2]](s16)
620 ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND6]], [[SHL]]
621 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
622 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
623 ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
624 ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
625 ; GFX8: $vgpr1 = COPY [[ANYEXT1]](s32)
626 ; GFX8: $vgpr2 = COPY [[ANYEXT2]](s32)
627 ; GFX9-LABEL: name: test_umulo_v2s8
628 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
629 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
630 ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
631 ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
632 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
633 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
634 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
635 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
636 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
637 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
638 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
639 ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
640 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[AND4]]
641 ; GFX9: [[AND5:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]]
642 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[AND5]]
643 ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
644 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[MUL]](s32)
645 ; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]]
646 ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[MUL1]](s32)
647 ; GFX9: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C1]]
648 ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
649 ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND7]], [[C2]](s16)
650 ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND6]], [[SHL]]
651 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
652 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
653 ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
654 ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
655 ; GFX9: $vgpr1 = COPY [[ANYEXT1]](s32)
656 ; GFX9: $vgpr2 = COPY [[ANYEXT2]](s32)
657 %0:_(s32) = COPY $vgpr0
658 %1:_(s32) = COPY $vgpr1
659 %2:_(s32) = COPY $vgpr2
660 %3:_(s32) = COPY $vgpr3
661 %5:_(s8) = G_TRUNC %0
662 %6:_(s8) = G_TRUNC %1
663 %7:_(s8) = G_TRUNC %2
664 %8:_(s8) = G_TRUNC %3
665 %11:_(<2 x s8>) = G_BUILD_VECTOR %5, %6
666 %12:_(<2 x s8>) = G_BUILD_VECTOR %7, %8
667 %13:_(<2 x s8>), %19:_(<2 x s1>) = G_UMULO %11, %12
668 %20:_(<2 x s32>) = G_ZEXT %19
669 %14:_(s8), %15:_(s8) = G_UNMERGE_VALUES %13
670 %21:_(s1), %22:_(s1) = G_UNMERGE_VALUES %19
671 %17:_(s16) = G_MERGE_VALUES %14, %15
672 %18:_(s32) = G_ANYEXT %17
673 %23:_(s32) = G_ANYEXT %21
674 %24:_(s32) = G_ANYEXT %22
681 name: test_umulo_v4s8
684 liveins: $vgpr0, $vgpr1
685 ; GFX8-LABEL: name: test_umulo_v4s8
686 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
687 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
688 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
689 ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
690 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
691 ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
692 ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
693 ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
694 ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
695 ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
696 ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
697 ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
698 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
699 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
700 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
701 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C3]]
702 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
703 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C3]]
704 ; GFX8: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR3]], [[C3]]
705 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[AND4]]
706 ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
707 ; GFX8: [[AND6:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C3]]
708 ; GFX8: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[AND6]]
709 ; GFX8: [[AND7:%[0-9]+]]:_(s32) = G_AND [[LSHR2]], [[C3]]
710 ; GFX8: [[AND8:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
711 ; GFX8: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[AND8]]
712 ; GFX8: [[AND9:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C3]]
713 ; GFX8: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C3]]
714 ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND10]], [[C]](s32)
715 ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND9]], [[SHL]]
716 ; GFX8: [[AND11:%[0-9]+]]:_(s32) = G_AND [[MUL2]], [[C3]]
717 ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C1]](s32)
718 ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
719 ; GFX8: [[AND12:%[0-9]+]]:_(s32) = G_AND [[MUL3]], [[C3]]
720 ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND12]], [[C2]](s32)
721 ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
722 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
723 ; GFX8: $vgpr0 = COPY [[OR2]](s32)
724 ; GFX8: $vgpr1 = COPY [[ANYEXT]](s32)
725 ; GFX9-LABEL: name: test_umulo_v4s8
726 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
727 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
728 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
729 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
730 ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
731 ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
732 ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
733 ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
734 ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
735 ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
736 ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
737 ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
738 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
739 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
740 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
741 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C3]]
742 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
743 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C3]]
744 ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR3]], [[C3]]
745 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[AND4]]
746 ; GFX9: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
747 ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C3]]
748 ; GFX9: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[AND6]]
749 ; GFX9: [[AND7:%[0-9]+]]:_(s32) = G_AND [[LSHR2]], [[C3]]
750 ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
751 ; GFX9: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[AND8]]
752 ; GFX9: [[AND9:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C3]]
753 ; GFX9: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C3]]
754 ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND10]], [[C]](s32)
755 ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND9]], [[SHL]]
756 ; GFX9: [[AND11:%[0-9]+]]:_(s32) = G_AND [[MUL2]], [[C3]]
757 ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C1]](s32)
758 ; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
759 ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[MUL3]], [[C3]]
760 ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND12]], [[C2]](s32)
761 ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
762 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
763 ; GFX9: $vgpr0 = COPY [[OR2]](s32)
764 ; GFX9: $vgpr1 = COPY [[ANYEXT]](s32)
765 %0:_(s32) = COPY $vgpr0
766 %1:_(s32) = COPY $vgpr1
767 %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0
768 %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8) = G_UNMERGE_VALUES %1
769 %10:_(<4 x s8>) = G_BUILD_VECTOR %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8)
770 %11:_(<4 x s8>) = G_BUILD_VECTOR %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8)
771 %12:_(<4 x s8>), %18:_(<4 x s1>) = G_UMULO %10:_, %11:_
772 %13:_(s8), %14:_(s8), %15:_(s8), %16:_(s8) = G_UNMERGE_VALUES %12:_(<4 x s8>)
773 %19:_(s1), %20:_(s1), %21:_(s1), %22:_(s1) = G_UNMERGE_VALUES %18:_(<4 x s1>)
774 %17:_(s32) = G_MERGE_VALUES %13, %14, %15, %16
775 %23:_(s32) = G_ANYEXT %19