1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -S -passes=early-cse -earlycse-debug-hash | FileCheck %s
3 ; RUN: opt < %s -S -passes='early-cse<memssa>' | FileCheck %s
5 define void @test1(float %A, float %B, ptr %PA, ptr %PB) {
7 ; CHECK-NEXT: [[C:%.*]] = fadd float [[A:%.*]], [[B:%.*]]
8 ; CHECK-NEXT: store float [[C]], ptr [[PA:%.*]], align 4
9 ; CHECK-NEXT: store float [[C]], ptr [[PB:%.*]], align 4
10 ; CHECK-NEXT: ret void
12 %C = fadd float %A, %B
13 store float %C, ptr %PA
14 %D = fadd float %B, %A
15 store float %D, ptr %PB
19 define void @test2(float %A, float %B, ptr %PA, ptr %PB) {
20 ; CHECK-LABEL: @test2(
21 ; CHECK-NEXT: [[C:%.*]] = fcmp oeq float [[A:%.*]], [[B:%.*]]
22 ; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1
23 ; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1
24 ; CHECK-NEXT: ret void
26 %C = fcmp oeq float %A, %B
28 %D = fcmp oeq float %B, %A
33 define void @test3(float %A, float %B, ptr %PA, ptr %PB) {
34 ; CHECK-LABEL: @test3(
35 ; CHECK-NEXT: [[C:%.*]] = fcmp uge float [[A:%.*]], [[B:%.*]]
36 ; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1
37 ; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1
38 ; CHECK-NEXT: ret void
40 %C = fcmp uge float %A, %B
42 %D = fcmp ule float %B, %A
47 define void @test4(i32 %A, i32 %B, ptr %PA, ptr %PB) {
48 ; CHECK-LABEL: @test4(
49 ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
50 ; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1
51 ; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1
52 ; CHECK-NEXT: ret void
54 %C = icmp eq i32 %A, %B
56 %D = icmp eq i32 %B, %A
61 define void @test5(i32 %A, i32 %B, ptr %PA, ptr %PB) {
62 ; CHECK-LABEL: @test5(
63 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
64 ; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1
65 ; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1
66 ; CHECK-NEXT: ret void
68 %C = icmp sgt i32 %A, %B
70 %D = icmp slt i32 %B, %A
75 ; Test degenerate case of commuted compare of identical comparands.
77 define void @test6(float %f, ptr %p1, ptr %p2) {
78 ; CHECK-LABEL: @test6(
79 ; CHECK-NEXT: [[C1:%.*]] = fcmp ult float [[F:%.*]], [[F]]
80 ; CHECK-NEXT: store i1 [[C1]], ptr [[P1:%.*]], align 1
81 ; CHECK-NEXT: store i1 [[C1]], ptr [[P2:%.*]], align 1
82 ; CHECK-NEXT: ret void
84 %c1 = fcmp ult float %f, %f
85 %c2 = fcmp ugt float %f, %f
91 ; Min/max operands may be commuted in the compare and select.
93 define i8 @smin_commute(i8 %a, i8 %b) {
94 ; CHECK-LABEL: @smin_commute(
95 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A:%.*]], [[B:%.*]]
96 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i8 [[B]], [[A]]
97 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
98 ; CHECK-NEXT: [[R:%.*]] = mul i8 [[M1]], [[M1]]
99 ; CHECK-NEXT: ret i8 [[R]]
101 %cmp1 = icmp slt i8 %a, %b
102 %cmp2 = icmp slt i8 %b, %a
103 %m1 = select i1 %cmp1, i8 %a, i8 %b
104 %m2 = select i1 %cmp2, i8 %b, i8 %a
109 ; Min/max can also have a swapped predicate and select operands.
111 define i1 @smin_swapped(i8 %a, i8 %b) {
112 ; CHECK-LABEL: @smin_swapped(
113 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A:%.*]], [[B:%.*]]
114 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i8 [[A]], [[B]]
115 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[B]], i8 [[A]]
116 ; CHECK-NEXT: ret i1 true
118 %cmp1 = icmp sgt i8 %a, %b
119 %cmp2 = icmp slt i8 %a, %b
120 %m1 = select i1 %cmp1, i8 %b, i8 %a
121 %m2 = select i1 %cmp2, i8 %a, i8 %b
122 %r = icmp eq i8 %m2, %m1
126 ; Min/max can also have an inverted predicate and select operands.
128 define i1 @smin_inverted(i8 %a, i8 %b) {
129 ; CHECK-LABEL: @smin_inverted(
130 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A:%.*]], [[B:%.*]]
131 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true
132 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
133 ; CHECK-NEXT: ret i1 true
135 %cmp1 = icmp slt i8 %a, %b
136 %cmp2 = xor i1 %cmp1, -1
137 %m1 = select i1 %cmp1, i8 %a, i8 %b
138 %m2 = select i1 %cmp2, i8 %b, i8 %a
139 %r = icmp eq i8 %m1, %m2
143 define i8 @smax_commute(i8 %a, i8 %b) {
144 ; CHECK-LABEL: @smax_commute(
145 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A:%.*]], [[B:%.*]]
146 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[B]], [[A]]
147 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
148 ; CHECK-NEXT: ret i8 0
150 %cmp1 = icmp sgt i8 %a, %b
151 %cmp2 = icmp sgt i8 %b, %a
152 %m1 = select i1 %cmp1, i8 %a, i8 %b
153 %m2 = select i1 %cmp2, i8 %b, i8 %a
154 %r = urem i8 %m2, %m1
158 define i8 @smax_swapped(i8 %a, i8 %b) {
159 ; CHECK-LABEL: @smax_swapped(
160 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A:%.*]], [[B:%.*]]
161 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[A]], [[B]]
162 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[B]], i8 [[A]]
163 ; CHECK-NEXT: ret i8 1
165 %cmp1 = icmp slt i8 %a, %b
166 %cmp2 = icmp sgt i8 %a, %b
167 %m1 = select i1 %cmp1, i8 %b, i8 %a
168 %m2 = select i1 %cmp2, i8 %a, i8 %b
169 %r = sdiv i8 %m1, %m2
173 define i1 @smax_inverted(i8 %a, i8 %b) {
174 ; CHECK-LABEL: @smax_inverted(
175 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A:%.*]], [[B:%.*]]
176 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true
177 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
178 ; CHECK-NEXT: ret i1 true
180 %cmp1 = icmp sgt i8 %a, %b
181 %cmp2 = xor i1 %cmp1, -1
182 %m1 = select i1 %cmp1, i8 %a, i8 %b
183 %m2 = select i1 %cmp2, i8 %b, i8 %a
184 %r = icmp eq i8 %m1, %m2
188 define i8 @umin_commute(i8 %a, i8 %b) {
189 ; CHECK-LABEL: @umin_commute(
190 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i8 [[A:%.*]], [[B:%.*]]
191 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i8 [[B]], [[A]]
192 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
193 ; CHECK-NEXT: ret i8 0
195 %cmp1 = icmp ult i8 %a, %b
196 %cmp2 = icmp ult i8 %b, %a
197 %m1 = select i1 %cmp1, i8 %a, i8 %b
198 %m2 = select i1 %cmp2, i8 %b, i8 %a
203 ; Choose a vector type just to show that works.
205 define <2 x i8> @umin_swapped(<2 x i8> %a, <2 x i8> %b) {
206 ; CHECK-LABEL: @umin_swapped(
207 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i8> [[A:%.*]], [[B:%.*]]
208 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ult <2 x i8> [[A]], [[B]]
209 ; CHECK-NEXT: [[M1:%.*]] = select <2 x i1> [[CMP1]], <2 x i8> [[B]], <2 x i8> [[A]]
210 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
212 %cmp1 = icmp ugt <2 x i8> %a, %b
213 %cmp2 = icmp ult <2 x i8> %a, %b
214 %m1 = select <2 x i1> %cmp1, <2 x i8> %b, <2 x i8> %a
215 %m2 = select <2 x i1> %cmp2, <2 x i8> %a, <2 x i8> %b
216 %r = sub <2 x i8> %m2, %m1
220 define i1 @umin_inverted(i8 %a, i8 %b) {
221 ; CHECK-LABEL: @umin_inverted(
222 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i8 [[A:%.*]], [[B:%.*]]
223 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true
224 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
225 ; CHECK-NEXT: ret i1 true
227 %cmp1 = icmp ult i8 %a, %b
228 %cmp2 = xor i1 %cmp1, -1
229 %m1 = select i1 %cmp1, i8 %a, i8 %b
230 %m2 = select i1 %cmp2, i8 %b, i8 %a
231 %r = icmp eq i8 %m1, %m2
235 define i8 @umax_commute(i8 %a, i8 %b) {
236 ; CHECK-LABEL: @umax_commute(
237 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i8 [[A:%.*]], [[B:%.*]]
238 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i8 [[B]], [[A]]
239 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
240 ; CHECK-NEXT: ret i8 1
242 %cmp1 = icmp ugt i8 %a, %b
243 %cmp2 = icmp ugt i8 %b, %a
244 %m1 = select i1 %cmp1, i8 %a, i8 %b
245 %m2 = select i1 %cmp2, i8 %b, i8 %a
246 %r = udiv i8 %m1, %m2
250 define i8 @umax_swapped(i8 %a, i8 %b) {
251 ; CHECK-LABEL: @umax_swapped(
252 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i8 [[A:%.*]], [[B:%.*]]
253 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i8 [[A]], [[B]]
254 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[B]], i8 [[A]]
255 ; CHECK-NEXT: [[R:%.*]] = add i8 [[M1]], [[M1]]
256 ; CHECK-NEXT: ret i8 [[R]]
258 %cmp1 = icmp ult i8 %a, %b
259 %cmp2 = icmp ugt i8 %a, %b
260 %m1 = select i1 %cmp1, i8 %b, i8 %a
261 %m2 = select i1 %cmp2, i8 %a, i8 %b
266 define i1 @umax_inverted(i8 %a, i8 %b) {
267 ; CHECK-LABEL: @umax_inverted(
268 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i8 [[A:%.*]], [[B:%.*]]
269 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true
270 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]]
271 ; CHECK-NEXT: ret i1 true
273 %cmp1 = icmp ugt i8 %a, %b
274 %cmp2 = xor i1 %cmp1, -1
275 %m1 = select i1 %cmp1, i8 %a, i8 %b
276 %m2 = select i1 %cmp2, i8 %b, i8 %a
277 %r = icmp eq i8 %m1, %m2
281 ; Min/max may exist with non-canonical operands. Value tracking can match those.
282 ; But we do not use value tracking, so we expect instcombine will canonicalize
283 ; this code to a form that allows CSE.
285 define i8 @smax_nsw(i8 %a, i8 %b) {
286 ; CHECK-LABEL: @smax_nsw(
287 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i8 [[A:%.*]], [[B:%.*]]
288 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], [[B]]
289 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[SUB]], 0
290 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 0, i8 [[SUB]]
291 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[CMP2]], i8 [[SUB]], i8 0
292 ; CHECK-NEXT: [[R:%.*]] = sub i8 [[M2]], [[M1]]
293 ; CHECK-NEXT: ret i8 [[R]]
295 %sub = sub nsw i8 %a, %b
296 %cmp1 = icmp slt i8 %a, %b
297 %cmp2 = icmp sgt i8 %sub, 0
298 %m1 = select i1 %cmp1, i8 0, i8 %sub
299 %m2 = select i1 %cmp2, i8 %sub, i8 0
305 define i8 @abs_swapped_sge(i8 %a) {
306 ; CHECK-LABEL: @abs_swapped_sge(
307 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
308 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i8 [[A]], 0
309 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i8 [[A]], 0
310 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
311 ; CHECK-NEXT: ret i8 0
314 %cmp1 = icmp sge i8 %a, 0
315 %cmp2 = icmp slt i8 %a, 0
316 %m1 = select i1 %cmp1, i8 %a, i8 %neg
317 %m2 = select i1 %cmp2, i8 %neg, i8 %a
322 define i8 @nabs_swapped_sge(i8 %a) {
323 ; CHECK-LABEL: @nabs_swapped_sge(
324 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
325 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], 0
326 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i8 [[A]], 0
327 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
328 ; CHECK-NEXT: ret i8 0
331 %cmp1 = icmp slt i8 %a, 0
332 %cmp2 = icmp sge i8 %a, 0
333 %m1 = select i1 %cmp1, i8 %a, i8 %neg
334 %m2 = select i1 %cmp2, i8 %neg, i8 %a
339 ; Abs/nabs may exist with non-canonical operands. Value tracking can match those.
340 ; But we do not use value tracking, so we expect instcombine will canonicalize
341 ; this code to a form that allows CSE.
343 define i8 @abs_swapped(i8 %a) {
344 ; CHECK-LABEL: @abs_swapped(
345 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
346 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A]], 0
347 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i8 [[A]], 0
348 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
349 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[CMP2]], i8 [[NEG]], i8 [[A]]
350 ; CHECK-NEXT: [[R:%.*]] = or i8 [[M2]], [[M1]]
351 ; CHECK-NEXT: ret i8 [[R]]
354 %cmp1 = icmp sgt i8 %a, 0
355 %cmp2 = icmp slt i8 %a, 0
356 %m1 = select i1 %cmp1, i8 %a, i8 %neg
357 %m2 = select i1 %cmp2, i8 %neg, i8 %a
362 define i8 @abs_inverted(i8 %a) {
363 ; CHECK-LABEL: @abs_inverted(
364 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
365 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A]], 0
366 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true
367 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
368 ; CHECK-NEXT: ret i8 [[M1]]
371 %cmp1 = icmp sgt i8 %a, 0
372 %cmp2 = xor i1 %cmp1, -1
373 %m1 = select i1 %cmp1, i8 %a, i8 %neg
374 %m2 = select i1 %cmp2, i8 %neg, i8 %a
379 ; Abs/nabs may exist with non-canonical operands. Value tracking can match those.
380 ; But we do not use value tracking, so we expect instcombine will canonicalize
381 ; this code to a form that allows CSE.
383 define i8 @nabs_swapped(i8 %a) {
384 ; CHECK-LABEL: @nabs_swapped(
385 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
386 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], 0
387 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[A]], 0
388 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
389 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[CMP2]], i8 [[NEG]], i8 [[A]]
390 ; CHECK-NEXT: [[R:%.*]] = xor i8 [[M2]], [[M1]]
391 ; CHECK-NEXT: ret i8 [[R]]
394 %cmp1 = icmp slt i8 %a, 0
395 %cmp2 = icmp sgt i8 %a, 0
396 %m1 = select i1 %cmp1, i8 %a, i8 %neg
397 %m2 = select i1 %cmp2, i8 %neg, i8 %a
402 define i8 @nabs_inverted(i8 %a) {
403 ; CHECK-LABEL: @nabs_inverted(
404 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
405 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], 0
406 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true
407 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
408 ; CHECK-NEXT: ret i8 0
411 %cmp1 = icmp slt i8 %a, 0
412 %cmp2 = xor i1 %cmp1, -1
413 %m1 = select i1 %cmp1, i8 %a, i8 %neg
414 %m2 = select i1 %cmp2, i8 %neg, i8 %a
419 ; Abs/nabs may exist with non-canonical operands. Value tracking can match those.
420 ; But we do not use value tracking, so we expect instcombine will canonicalize
421 ; this code to a form that allows CSE.
423 ; compares are different.
424 define i8 @abs_different_constants(i8 %a) {
425 ; CHECK-LABEL: @abs_different_constants(
426 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
427 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A]], -1
428 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i8 [[A]], 0
429 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
430 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[CMP2]], i8 [[NEG]], i8 [[A]]
431 ; CHECK-NEXT: [[R:%.*]] = or i8 [[M2]], [[M1]]
432 ; CHECK-NEXT: ret i8 [[R]]
435 %cmp1 = icmp sgt i8 %a, -1
436 %cmp2 = icmp slt i8 %a, 0
437 %m1 = select i1 %cmp1, i8 %a, i8 %neg
438 %m2 = select i1 %cmp2, i8 %neg, i8 %a
443 ; Abs/nabs may exist with non-canonical operands. Value tracking can match those.
444 ; But we do not use value tracking, so we expect instcombine will canonicalize
445 ; this code to a form that allows CSE.
447 define i8 @nabs_different_constants(i8 %a) {
448 ; CHECK-LABEL: @nabs_different_constants(
449 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
450 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], 0
451 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[A]], -1
452 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]]
453 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[CMP2]], i8 [[NEG]], i8 [[A]]
454 ; CHECK-NEXT: [[R:%.*]] = xor i8 [[M2]], [[M1]]
455 ; CHECK-NEXT: ret i8 [[R]]
458 %cmp1 = icmp slt i8 %a, 0
459 %cmp2 = icmp sgt i8 %a, -1
460 %m1 = select i1 %cmp1, i8 %a, i8 %neg
461 %m2 = select i1 %cmp2, i8 %neg, i8 %a
466 ; https://bugs.llvm.org/show_bug.cgi?id=41101
467 ; Detect equivalence of selects with commuted operands: 'not' cond.
469 define i32 @select_not_cond(i1 %cond, i32 %t, i32 %f) {
470 ; CHECK-LABEL: @select_not_cond(
471 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[COND:%.*]], true
472 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T:%.*]], i32 [[F:%.*]]
473 ; CHECK-NEXT: ret i32 0
475 %not = xor i1 %cond, -1
476 %m1 = select i1 %cond, i32 %t, i32 %f
477 %m2 = select i1 %not, i32 %f, i32 %t
478 %r = xor i32 %m2, %m1
482 ; Detect equivalence of selects with commuted operands: 'not' cond with vector select.
484 define <2 x double> @select_not_cond_commute_vec(<2 x i1> %cond, <2 x double> %t, <2 x double> %f) {
485 ; CHECK-LABEL: @select_not_cond_commute_vec(
486 ; CHECK-NEXT: [[NOT:%.*]] = xor <2 x i1> [[COND:%.*]], <i1 true, i1 true>
487 ; CHECK-NEXT: [[M1:%.*]] = select <2 x i1> [[COND]], <2 x double> [[T:%.*]], <2 x double> [[F:%.*]]
488 ; CHECK-NEXT: ret <2 x double> <double 1.000000e+00, double 1.000000e+00>
490 %not = xor <2 x i1> %cond, <i1 -1, i1 -1>
491 %m1 = select <2 x i1> %cond, <2 x double> %t, <2 x double> %f
492 %m2 = select <2 x i1> %not, <2 x double> %f, <2 x double> %t
493 %r = fdiv nnan <2 x double> %m1, %m2
497 ; Negative test - select ops must be commuted.
499 define i32 @select_not_cond_wrong_select_ops(i1 %cond, i32 %t, i32 %f) {
500 ; CHECK-LABEL: @select_not_cond_wrong_select_ops(
501 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[COND:%.*]], true
502 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T:%.*]], i32 [[F:%.*]]
503 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[NOT]], i32 [[T]], i32 [[F]]
504 ; CHECK-NEXT: [[R:%.*]] = xor i32 [[M2]], [[M1]]
505 ; CHECK-NEXT: ret i32 [[R]]
507 %not = xor i1 %cond, -1
508 %m1 = select i1 %cond, i32 %t, i32 %f
509 %m2 = select i1 %not, i32 %t, i32 %f
510 %r = xor i32 %m2, %m1
514 ; Negative test - not a 'not'.
516 define i32 @select_not_cond_wrong_cond(i1 %cond, i32 %t, i32 %f) {
517 ; CHECK-LABEL: @select_not_cond_wrong_cond(
518 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND:%.*]], i32 [[T:%.*]], i32 [[F:%.*]]
519 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[COND]], i32 [[F]], i32 [[T]]
520 ; CHECK-NEXT: [[R:%.*]] = xor i32 [[M2]], [[M1]]
521 ; CHECK-NEXT: ret i32 [[R]]
523 %not = xor i1 %cond, -2
524 %m1 = select i1 %cond, i32 %t, i32 %f
525 %m2 = select i1 %not, i32 %f, i32 %t
526 %r = xor i32 %m2, %m1
530 ; Detect equivalence of selects with commuted operands: inverted pred with fcmps.
532 define i32 @select_invert_pred_cond(float %x, i32 %t, i32 %f) {
533 ; CHECK-LABEL: @select_invert_pred_cond(
534 ; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X:%.*]], 4.200000e+01
535 ; CHECK-NEXT: [[INVCOND:%.*]] = fcmp one float [[X]], 4.200000e+01
536 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T:%.*]], i32 [[F:%.*]]
537 ; CHECK-NEXT: ret i32 0
539 %cond = fcmp ueq float %x, 42.0
540 %invcond = fcmp one float %x, 42.0
541 %m1 = select i1 %cond, i32 %t, i32 %f
542 %m2 = select i1 %invcond, i32 %f, i32 %t
543 %r = xor i32 %m2, %m1
547 ; Detect equivalence of selects with commuted operands: inverted pred with icmps and vectors.
549 define <2 x i32> @select_invert_pred_cond_commute_vec(<2 x i8> %x, <2 x i32> %t, <2 x i32> %f) {
550 ; CHECK-LABEL: @select_invert_pred_cond_commute_vec(
551 ; CHECK-NEXT: [[COND:%.*]] = icmp sgt <2 x i8> [[X:%.*]], <i8 42, i8 -1>
552 ; CHECK-NEXT: [[INVCOND:%.*]] = icmp sle <2 x i8> [[X]], <i8 42, i8 -1>
553 ; CHECK-NEXT: [[M1:%.*]] = select <2 x i1> [[COND]], <2 x i32> [[T:%.*]], <2 x i32> [[F:%.*]]
554 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
556 %cond = icmp sgt <2 x i8> %x, <i8 42, i8 -1>
557 %invcond = icmp sle <2 x i8> %x, <i8 42, i8 -1>
558 %m1 = select <2 x i1> %cond, <2 x i32> %t, <2 x i32> %f
559 %m2 = select <2 x i1> %invcond, <2 x i32> %f, <2 x i32> %t
560 %r = xor <2 x i32> %m1, %m2
564 ; Negative test - select ops must be commuted.
566 define i32 @select_invert_pred_wrong_select_ops(float %x, i32 %t, i32 %f) {
567 ; CHECK-LABEL: @select_invert_pred_wrong_select_ops(
568 ; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X:%.*]], 4.200000e+01
569 ; CHECK-NEXT: [[INVCOND:%.*]] = fcmp one float [[X]], 4.200000e+01
570 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[F:%.*]], i32 [[T:%.*]]
571 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[INVCOND]], i32 [[F]], i32 [[T]]
572 ; CHECK-NEXT: [[R:%.*]] = xor i32 [[M2]], [[M1]]
573 ; CHECK-NEXT: ret i32 [[R]]
575 %cond = fcmp ueq float %x, 42.0
576 %invcond = fcmp one float %x, 42.0
577 %m1 = select i1 %cond, i32 %f, i32 %t
578 %m2 = select i1 %invcond, i32 %f, i32 %t
579 %r = xor i32 %m2, %m1
583 ; Negative test - not an inverted predicate.
585 define i32 @select_invert_pred_wrong_cond(float %x, i32 %t, i32 %f) {
586 ; CHECK-LABEL: @select_invert_pred_wrong_cond(
587 ; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X:%.*]], 4.200000e+01
588 ; CHECK-NEXT: [[INVCOND:%.*]] = fcmp une float [[X]], 4.200000e+01
589 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T:%.*]], i32 [[F:%.*]]
590 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[INVCOND]], i32 [[F]], i32 [[T]]
591 ; CHECK-NEXT: [[R:%.*]] = xor i32 [[M2]], [[M1]]
592 ; CHECK-NEXT: ret i32 [[R]]
594 %cond = fcmp ueq float %x, 42.0
595 %invcond = fcmp une float %x, 42.0
596 %m1 = select i1 %cond, i32 %t, i32 %f
597 %m2 = select i1 %invcond, i32 %f, i32 %t
598 %r = xor i32 %m2, %m1
602 ; Negative test - cmp ops must match.
604 define i32 @select_invert_pred_wrong_cmp_ops(float %x, i32 %t, i32 %f) {
605 ; CHECK-LABEL: @select_invert_pred_wrong_cmp_ops(
606 ; CHECK-NEXT: [[COND:%.*]] = fcmp ueq float [[X:%.*]], 4.200000e+01
607 ; CHECK-NEXT: [[INVCOND:%.*]] = fcmp one float [[X]], 4.300000e+01
608 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T:%.*]], i32 [[F:%.*]]
609 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[INVCOND]], i32 [[F]], i32 [[T]]
610 ; CHECK-NEXT: [[R:%.*]] = xor i32 [[M2]], [[M1]]
611 ; CHECK-NEXT: ret i32 [[R]]
613 %cond = fcmp ueq float %x, 42.0
614 %invcond = fcmp one float %x, 43.0
615 %m1 = select i1 %cond, i32 %t, i32 %f
616 %m2 = select i1 %invcond, i32 %f, i32 %t
617 %r = xor i32 %m2, %m1
621 ; If we have both an inverted predicate and a 'not' op, recognize the double-negation.
623 define i32 @select_not_invert_pred_cond(i8 %x, i32 %t, i32 %f) {
624 ; CHECK-LABEL: @select_not_invert_pred_cond(
625 ; CHECK-NEXT: [[COND:%.*]] = icmp ugt i8 [[X:%.*]], 42
626 ; CHECK-NEXT: [[INVCOND:%.*]] = icmp ule i8 [[X]], 42
627 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[INVCOND]], true
628 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T:%.*]], i32 [[F:%.*]]
629 ; CHECK-NEXT: ret i32 0
631 %cond = icmp ugt i8 %x, 42
632 %invcond = icmp ule i8 %x, 42
633 %not = xor i1 %invcond, -1
634 %m1 = select i1 %cond, i32 %t, i32 %f
635 %m2 = select i1 %not, i32 %t, i32 %f
636 %r = sub i32 %m1, %m2
640 ; If we have both an inverted predicate and a 'not' op, recognize the double-negation.
642 define i32 @select_not_invert_pred_cond_commute(i8 %x, i8 %y, i32 %t, i32 %f) {
643 ; CHECK-LABEL: @select_not_invert_pred_cond_commute(
644 ; CHECK-NEXT: [[INVCOND:%.*]] = icmp ule i8 [[X:%.*]], [[Y:%.*]]
645 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[INVCOND]], true
646 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[NOT]], i32 [[T:%.*]], i32 [[F:%.*]]
647 ; CHECK-NEXT: [[COND:%.*]] = icmp ugt i8 [[X]], [[Y]]
648 ; CHECK-NEXT: ret i32 0
650 %invcond = icmp ule i8 %x, %y
651 %not = xor i1 %invcond, -1
652 %m2 = select i1 %not, i32 %t, i32 %f
653 %cond = icmp ugt i8 %x, %y
654 %m1 = select i1 %cond, i32 %t, i32 %f
655 %r = sub i32 %m2, %m1
659 ; Negative test - not an inverted predicate.
661 define i32 @select_not_invert_pred_cond_wrong_pred(i8 %x, i8 %y, i32 %t, i32 %f) {
662 ; CHECK-LABEL: @select_not_invert_pred_cond_wrong_pred(
663 ; CHECK-NEXT: [[INVCOND:%.*]] = icmp ult i8 [[X:%.*]], [[Y:%.*]]
664 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[INVCOND]], true
665 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[NOT]], i32 [[T:%.*]], i32 [[F:%.*]]
666 ; CHECK-NEXT: [[COND:%.*]] = icmp ugt i8 [[X]], [[Y]]
667 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T]], i32 [[F]]
668 ; CHECK-NEXT: [[R:%.*]] = sub i32 [[M2]], [[M1]]
669 ; CHECK-NEXT: ret i32 [[R]]
671 %invcond = icmp ult i8 %x, %y
672 %not = xor i1 %invcond, -1
673 %m2 = select i1 %not, i32 %t, i32 %f
674 %cond = icmp ugt i8 %x, %y
675 %m1 = select i1 %cond, i32 %t, i32 %f
676 %r = sub i32 %m2, %m1
680 ; Negative test - cmp ops must match.
682 define i32 @select_not_invert_pred_cond_wrong_cmp_op(i8 %x, i8 %y, i32 %t, i32 %f) {
683 ; CHECK-LABEL: @select_not_invert_pred_cond_wrong_cmp_op(
684 ; CHECK-NEXT: [[INVCOND:%.*]] = icmp ule i8 [[X:%.*]], 42
685 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[INVCOND]], true
686 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[NOT]], i32 [[T:%.*]], i32 [[F:%.*]]
687 ; CHECK-NEXT: [[COND:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
688 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[T]], i32 [[F]]
689 ; CHECK-NEXT: [[R:%.*]] = sub i32 [[M2]], [[M1]]
690 ; CHECK-NEXT: ret i32 [[R]]
692 %invcond = icmp ule i8 %x, 42
693 %not = xor i1 %invcond, -1
694 %m2 = select i1 %not, i32 %t, i32 %f
695 %cond = icmp ugt i8 %x, %y
696 %m1 = select i1 %cond, i32 %t, i32 %f
697 %r = sub i32 %m2, %m1
701 ; Negative test - select ops must be same (and not commuted).
703 define i32 @select_not_invert_pred_cond_wrong_select_op(i8 %x, i8 %y, i32 %t, i32 %f) {
704 ; CHECK-LABEL: @select_not_invert_pred_cond_wrong_select_op(
705 ; CHECK-NEXT: [[INVCOND:%.*]] = icmp ule i8 [[X:%.*]], [[Y:%.*]]
706 ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[INVCOND]], true
707 ; CHECK-NEXT: [[M2:%.*]] = select i1 [[NOT]], i32 [[T:%.*]], i32 [[F:%.*]]
708 ; CHECK-NEXT: [[COND:%.*]] = icmp ugt i8 [[X]], [[Y]]
709 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[COND]], i32 [[F]], i32 [[T]]
710 ; CHECK-NEXT: [[R:%.*]] = sub i32 [[M2]], [[M1]]
711 ; CHECK-NEXT: ret i32 [[R]]
713 %invcond = icmp ule i8 %x, %y
714 %not = xor i1 %invcond, -1
715 %m2 = select i1 %not, i32 %t, i32 %f
716 %cond = icmp ugt i8 %x, %y
717 %m1 = select i1 %cond, i32 %f, i32 %t
718 %r = sub i32 %m2, %m1
722 ; This test is a reproducer for a bug involving inverted min/max selects
723 ; hashing differently but comparing as equal. It exhibits such a pair of
724 ; values, and we run this test with -earlycse-debug-hash which would catch
725 ; the disagreement and fail if it regressed.
726 ; EarlyCSE should be able to detect the 2nd redundant `select` and eliminate
728 define i32 @inverted_max(i32 %i) {
729 ; CHECK-LABEL: @inverted_max(
730 ; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 0, [[I:%.*]]
731 ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP]], i32 [[I]], i32 0
732 ; CHECK-NEXT: [[CMPINV:%.*]] = icmp sgt i32 0, [[I]]
733 ; CHECK-NEXT: [[R:%.*]] = add i32 [[M1]], [[M1]]
734 ; CHECK-NEXT: ret i32 [[R]]
736 %cmp = icmp sle i32 0, %i
737 %m1 = select i1 %cmp, i32 %i, i32 0
738 %cmpinv = icmp sgt i32 0, %i
739 %m2 = select i1 %cmpinv, i32 0, i32 %i
740 %r = add i32 %m1, %m2
744 ; This test is a reproducer for a bug involving inverted min/max selects
745 ; hashing differently but comparing as equal. It exhibits such a pair of
746 ; values, and we run this test with -earlycse-debug-hash which would catch
747 ; the disagreement and fail if it regressed. This test also includes a
748 ; negation of each negation to check for the same issue one level deeper.
749 define void @not_not_min(ptr %px, ptr %py, ptr %pout) {
750 ; CHECK-LABEL: @not_not_min(
751 ; CHECK-NEXT: [[X:%.*]] = load volatile i32, ptr [[PX:%.*]], align 4
752 ; CHECK-NEXT: [[Y:%.*]] = load volatile i32, ptr [[PY:%.*]], align 4
753 ; CHECK-NEXT: [[CMPA:%.*]] = icmp slt i32 [[X]], [[Y]]
754 ; CHECK-NEXT: [[CMPB:%.*]] = xor i1 [[CMPA]], true
755 ; CHECK-NEXT: [[RA:%.*]] = select i1 [[CMPA]], i32 [[X]], i32 [[Y]]
756 ; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT:%.*]], align 4
757 ; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT]], align 4
758 ; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT]], align 4
759 ; CHECK-NEXT: ret void
761 %x = load volatile i32, ptr %px
762 %y = load volatile i32, ptr %py
763 %cmpa = icmp slt i32 %x, %y
764 %cmpb = xor i1 %cmpa, -1
765 %cmpc = xor i1 %cmpb, -1
766 %ra = select i1 %cmpa, i32 %x, i32 %y
767 %rb = select i1 %cmpb, i32 %y, i32 %x
768 %rc = select i1 %cmpc, i32 %x, i32 %y
769 store volatile i32 %ra, ptr %pout
770 store volatile i32 %rb, ptr %pout
771 store volatile i32 %rc, ptr %pout
776 ; This would cause an assert/crash because we matched
777 ; a ValueTracking select pattern that required 'nsw'
778 ; on an operand, but we remove that flag as part of
779 ; CSE matching/hashing.
781 define void @PR41083_1(i32 %span_left, i32 %clip_left) {
782 ; CHECK-LABEL: @PR41083_1(
783 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[CLIP_LEFT:%.*]], [[SPAN_LEFT:%.*]]
784 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[CLIP_LEFT]], [[SPAN_LEFT]]
785 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 0
786 ; CHECK-NEXT: ret void
788 %cmp = icmp sgt i32 %clip_left, %span_left
789 %sub = sub nsw i32 %clip_left, %span_left
790 %cond = select i1 %cmp, i32 %sub, i32 0
791 %cmp83292 = icmp slt i32 %cond, undef
792 %sub2 = sub i32 %clip_left, %span_left
793 %sel2 = select i1 %cmp, i32 %sub2, i32 0
797 ; This would cause an assert/crash because we matched
798 ; a ValueTracking select pattern that required 'nsw'
799 ; on an operand, but we remove that flag as part of
800 ; CSE matching/hashing.
802 define i32 @PR41083_2(i32 %p) {
803 ; CHECK-LABEL: @PR41083_2(
804 ; CHECK-NEXT: [[S:%.*]] = sub i32 0, [[P:%.*]]
805 ; CHECK-NEXT: [[A:%.*]] = ashr exact i32 [[S]], 2
806 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 0, [[A]]
807 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A]]
808 ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 [[SUB]], i32 0
809 ; CHECK-NEXT: [[M:%.*]] = mul i32 [[SEL]], [[SUB]]
810 ; CHECK-NEXT: ret i32 [[M]]
813 %a = ashr exact i32 %s, 2
814 %cmp = icmp sgt i32 0, %a
815 %sub = sub nsw i32 0, %a
816 %sel = select i1 %cmp, i32 %sub, i32 0
818 %m = mul i32 %sel, %s2
822 define float @maxnum(float %a, float %b) {
823 ; CHECK-LABEL: @maxnum(
824 ; CHECK-NEXT: [[X:%.*]] = call float @llvm.maxnum.f32(float [[A:%.*]], float [[B:%.*]])
825 ; CHECK-NEXT: ret float 1.000000e+00
827 %x = call float @llvm.maxnum.f32(float %a, float %b)
828 %y = call float @llvm.maxnum.f32(float %b, float %a)
829 %r = fdiv nnan float %x, %y
833 define <2 x float> @minnum(<2 x float> %a, <2 x float> %b) {
834 ; CHECK-LABEL: @minnum(
835 ; CHECK-NEXT: [[X:%.*]] = call fast <2 x float> @llvm.minnum.v2f32(<2 x float> [[A:%.*]], <2 x float> [[B:%.*]])
836 ; CHECK-NEXT: ret <2 x float> <float 1.000000e+00, float 1.000000e+00>
838 %x = call fast <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %b)
839 %y = call fast <2 x float> @llvm.minnum.v2f32(<2 x float> %b, <2 x float> %a)
840 %r = fdiv nnan <2 x float> %x, %y
844 define <2 x double> @maximum(<2 x double> %a, <2 x double> %b) {
845 ; CHECK-LABEL: @maximum(
846 ; CHECK-NEXT: [[X:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]])
847 ; CHECK-NEXT: ret <2 x double> <double 1.000000e+00, double 1.000000e+00>
849 %x = call fast <2 x double> @llvm.maximum.v2f64(<2 x double> %a, <2 x double> %b)
850 %y = call <2 x double> @llvm.maximum.v2f64(<2 x double> %b, <2 x double> %a)
851 %r = fdiv nnan <2 x double> %x, %y
855 define double @minimum(double %a, double %b) {
856 ; CHECK-LABEL: @minimum(
857 ; CHECK-NEXT: [[X:%.*]] = call double @llvm.minimum.f64(double [[A:%.*]], double [[B:%.*]])
858 ; CHECK-NEXT: ret double 1.000000e+00
860 %x = call nsz double @llvm.minimum.f64(double %a, double %b)
861 %y = call ninf double @llvm.minimum.f64(double %b, double %a)
862 %r = fdiv nnan double %x, %y
865 define i16 @sadd_ov(i16 %a, i16 %b) {
866 ; CHECK-LABEL: @sadd_ov(
867 ; CHECK-NEXT: [[X:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A:%.*]], i16 [[B:%.*]])
868 ; CHECK-NEXT: [[X1:%.*]] = extractvalue { i16, i1 } [[X]], 0
869 ; CHECK-NEXT: ret i16 [[X1]]
871 %x = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
872 %y = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %b, i16 %a)
873 %x1 = extractvalue {i16, i1} %x, 0
874 %y1 = extractvalue {i16, i1} %y, 0
879 define <5 x i65> @uadd_ov(<5 x i65> %a, <5 x i65> %b) {
880 ; CHECK-LABEL: @uadd_ov(
881 ; CHECK-NEXT: [[X:%.*]] = call { <5 x i65>, <5 x i1> } @llvm.uadd.with.overflow.v5i65(<5 x i65> [[A:%.*]], <5 x i65> [[B:%.*]])
882 ; CHECK-NEXT: [[X1:%.*]] = extractvalue { <5 x i65>, <5 x i1> } [[X]], 0
883 ; CHECK-NEXT: ret <5 x i65> [[X1]]
885 %x = call {<5 x i65>, <5 x i1>} @llvm.uadd.with.overflow.v5i65(<5 x i65> %a, <5 x i65> %b)
886 %y = call {<5 x i65>, <5 x i1>} @llvm.uadd.with.overflow.v5i65(<5 x i65> %b, <5 x i65> %a)
887 %x1 = extractvalue {<5 x i65>, <5 x i1>} %x, 0
888 %y1 = extractvalue {<5 x i65>, <5 x i1>} %y, 0
889 %o = or <5 x i65> %x1, %y1
893 define i37 @smul_ov(i37 %a, i37 %b) {
894 ; CHECK-LABEL: @smul_ov(
895 ; CHECK-NEXT: [[X:%.*]] = call { i37, i1 } @llvm.smul.with.overflow.i37(i37 [[A:%.*]], i37 [[B:%.*]])
896 ; CHECK-NEXT: [[X1:%.*]] = extractvalue { i37, i1 } [[X]], 0
897 ; CHECK-NEXT: ret i37 [[X1]]
899 %x = call {i37, i1} @llvm.smul.with.overflow.i37(i37 %a, i37 %b)
900 %y = call {i37, i1} @llvm.smul.with.overflow.i37(i37 %b, i37 %a)
901 %x1 = extractvalue {i37, i1} %x, 0
902 %y1 = extractvalue {i37, i1} %y, 0
907 define <2 x i31> @umul_ov(<2 x i31> %a, <2 x i31> %b) {
908 ; CHECK-LABEL: @umul_ov(
909 ; CHECK-NEXT: [[X:%.*]] = call { <2 x i31>, <2 x i1> } @llvm.umul.with.overflow.v2i31(<2 x i31> [[A:%.*]], <2 x i31> [[B:%.*]])
910 ; CHECK-NEXT: [[X1:%.*]] = extractvalue { <2 x i31>, <2 x i1> } [[X]], 0
911 ; CHECK-NEXT: ret <2 x i31> [[X1]]
913 %x = call {<2 x i31>, <2 x i1>} @llvm.umul.with.overflow.v2i31(<2 x i31> %a, <2 x i31> %b)
914 %y = call {<2 x i31>, <2 x i1>} @llvm.umul.with.overflow.v2i31(<2 x i31> %b, <2 x i31> %a)
915 %x1 = extractvalue {<2 x i31>, <2 x i1>} %x, 0
916 %y1 = extractvalue {<2 x i31>, <2 x i1>} %y, 0
917 %o = or <2 x i31> %x1, %y1
921 define i64 @sadd_sat(i64 %a, i64 %b) {
922 ; CHECK-LABEL: @sadd_sat(
923 ; CHECK-NEXT: [[X:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A:%.*]], i64 [[B:%.*]])
924 ; CHECK-NEXT: ret i64 [[X]]
926 %x = call i64 @llvm.sadd.sat.i64(i64 %a, i64 %b)
927 %y = call i64 @llvm.sadd.sat.i64(i64 %b, i64 %a)
932 define <2 x i64> @uadd_sat(<2 x i64> %a, <2 x i64> %b) {
933 ; CHECK-LABEL: @uadd_sat(
934 ; CHECK-NEXT: [[X:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]])
935 ; CHECK-NEXT: ret <2 x i64> [[X]]
937 %x = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %a, <2 x i64> %b)
938 %y = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %b, <2 x i64> %a)
939 %o = or <2 x i64> %x, %y
943 define <2 x i64> @smax(<2 x i64> %a, <2 x i64> %b) {
944 ; CHECK-LABEL: @smax(
945 ; CHECK-NEXT: [[X:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]])
946 ; CHECK-NEXT: ret <2 x i64> [[X]]
948 %x = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %a, <2 x i64> %b)
949 %y = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %b, <2 x i64> %a)
950 %o = or <2 x i64> %x, %y
954 define i4 @smin(i4 %a, i4 %b) {
955 ; CHECK-LABEL: @smin(
956 ; CHECK-NEXT: [[X:%.*]] = call i4 @llvm.smin.i4(i4 [[A:%.*]], i4 [[B:%.*]])
957 ; CHECK-NEXT: ret i4 [[X]]
959 %x = call i4 @llvm.smin.i4(i4 %a, i4 %b)
960 %y = call i4 @llvm.smin.i4(i4 %b, i4 %a)
965 define i67 @umax(i67 %a, i67 %b) {
966 ; CHECK-LABEL: @umax(
967 ; CHECK-NEXT: [[X:%.*]] = call i67 @llvm.umax.i67(i67 [[A:%.*]], i67 [[B:%.*]])
968 ; CHECK-NEXT: ret i67 [[X]]
970 %x = call i67 @llvm.umax.i67(i67 %a, i67 %b)
971 %y = call i67 @llvm.umax.i67(i67 %b, i67 %a)
976 define <3 x i17> @umin(<3 x i17> %a, <3 x i17> %b) {
977 ; CHECK-LABEL: @umin(
978 ; CHECK-NEXT: [[X:%.*]] = call <3 x i17> @llvm.umin.v3i17(<3 x i17> [[A:%.*]], <3 x i17> [[B:%.*]])
979 ; CHECK-NEXT: ret <3 x i17> [[X]]
981 %x = call <3 x i17> @llvm.umin.v3i17(<3 x i17> %a, <3 x i17> %b)
982 %y = call <3 x i17> @llvm.umin.v3i17(<3 x i17> %b, <3 x i17> %a)
983 %o = or <3 x i17> %x, %y
987 ; Negative test - mismatched intrinsics
989 define i4 @smin_umin(i4 %a, i4 %b) {
990 ; CHECK-LABEL: @smin_umin(
991 ; CHECK-NEXT: [[X:%.*]] = call i4 @llvm.smin.i4(i4 [[A:%.*]], i4 [[B:%.*]])
992 ; CHECK-NEXT: [[Y:%.*]] = call i4 @llvm.umin.i4(i4 [[B]], i4 [[A]])
993 ; CHECK-NEXT: [[O:%.*]] = or i4 [[X]], [[Y]]
994 ; CHECK-NEXT: ret i4 [[O]]
996 %x = call i4 @llvm.smin.i4(i4 %a, i4 %b)
997 %y = call i4 @llvm.umin.i4(i4 %b, i4 %a)
1002 define i16 @smul_fix(i16 %a, i16 %b) {
1003 ; CHECK-LABEL: @smul_fix(
1004 ; CHECK-NEXT: [[X:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 3)
1005 ; CHECK-NEXT: ret i16 [[X]]
1007 %x = call i16 @llvm.smul.fix.i16(i16 %a, i16 %b, i32 3)
1008 %y = call i16 @llvm.smul.fix.i16(i16 %b, i16 %a, i32 3)
1013 define i16 @umul_fix(i16 %a, i16 %b, i32 %s) {
1014 ; CHECK-LABEL: @umul_fix(
1015 ; CHECK-NEXT: [[X:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 1)
1016 ; CHECK-NEXT: ret i16 [[X]]
1018 %x = call i16 @llvm.umul.fix.i16(i16 %a, i16 %b, i32 1)
1019 %y = call i16 @llvm.umul.fix.i16(i16 %b, i16 %a, i32 1)
1024 define <3 x i16> @smul_fix_sat(<3 x i16> %a, <3 x i16> %b) {
1025 ; CHECK-LABEL: @smul_fix_sat(
1026 ; CHECK-NEXT: [[X:%.*]] = call <3 x i16> @llvm.smul.fix.sat.v3i16(<3 x i16> [[A:%.*]], <3 x i16> [[B:%.*]], i32 2)
1027 ; CHECK-NEXT: ret <3 x i16> [[X]]
1029 %x = call <3 x i16> @llvm.smul.fix.sat.v3i16(<3 x i16> %a, <3 x i16> %b, i32 2)
1030 %y = call <3 x i16> @llvm.smul.fix.sat.v3i16(<3 x i16> %b, <3 x i16> %a, i32 2)
1031 %o = or <3 x i16> %x, %y
1035 define <3 x i16> @umul_fix_sat(<3 x i16> %a, <3 x i16> %b) {
1036 ; CHECK-LABEL: @umul_fix_sat(
1037 ; CHECK-NEXT: [[X:%.*]] = call <3 x i16> @llvm.umul.fix.sat.v3i16(<3 x i16> [[A:%.*]], <3 x i16> [[B:%.*]], i32 3)
1038 ; CHECK-NEXT: ret <3 x i16> [[X]]
1040 %x = call <3 x i16> @llvm.umul.fix.sat.v3i16(<3 x i16> %a, <3 x i16> %b, i32 3)
1041 %y = call <3 x i16> @llvm.umul.fix.sat.v3i16(<3 x i16> %b, <3 x i16> %a, i32 3)
1042 %o = or <3 x i16> %x, %y
1046 define i16 @umul_smul_fix(i16 %a, i16 %b, i32 %s) {
1047 ; CHECK-LABEL: @umul_smul_fix(
1048 ; CHECK-NEXT: [[X:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 1)
1049 ; CHECK-NEXT: [[Y:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[B]], i16 [[A]], i32 1)
1050 ; CHECK-NEXT: [[O:%.*]] = or i16 [[X]], [[Y]]
1051 ; CHECK-NEXT: ret i16 [[O]]
1053 %x = call i16 @llvm.umul.fix.i16(i16 %a, i16 %b, i32 1)
1054 %y = call i16 @llvm.smul.fix.i16(i16 %b, i16 %a, i32 1)
1059 define i16 @umul_fix_scale(i16 %a, i16 %b, i32 %s) {
1060 ; CHECK-LABEL: @umul_fix_scale(
1061 ; CHECK-NEXT: [[X:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 1)
1062 ; CHECK-NEXT: [[Y:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[B]], i16 [[A]], i32 2)
1063 ; CHECK-NEXT: [[O:%.*]] = or i16 [[X]], [[Y]]
1064 ; CHECK-NEXT: ret i16 [[O]]
1066 %x = call i16 @llvm.umul.fix.i16(i16 %a, i16 %b, i32 1)
1067 %y = call i16 @llvm.umul.fix.i16(i16 %b, i16 %a, i32 2)
1072 define float @fma(float %a, float %b, float %c) {
1073 ; CHECK-LABEL: @fma(
1074 ; CHECK-NEXT: [[X:%.*]] = call float @llvm.fma.f32(float [[A:%.*]], float [[B:%.*]], float [[C:%.*]])
1075 ; CHECK-NEXT: ret float 1.000000e+00
1077 %x = call float @llvm.fma.f32(float %a, float %b, float %c)
1078 %y = call float @llvm.fma.f32(float %b, float %a, float %c)
1079 %r = fdiv nnan float %x, %y
1083 define float @fma_fail(float %a, float %b, float %c) {
1084 ; CHECK-LABEL: @fma_fail(
1085 ; CHECK-NEXT: [[X:%.*]] = call float @llvm.fma.f32(float [[A:%.*]], float [[B:%.*]], float [[C:%.*]])
1086 ; CHECK-NEXT: [[Y:%.*]] = call float @llvm.fma.f32(float [[A]], float [[C]], float [[B]])
1087 ; CHECK-NEXT: [[R:%.*]] = fdiv nnan float [[X]], [[Y]]
1088 ; CHECK-NEXT: ret float [[R]]
1090 %x = call float @llvm.fma.f32(float %a, float %b, float %c)
1091 %y = call float @llvm.fma.f32(float %a, float %c, float %b)
1092 %r = fdiv nnan float %x, %y
1096 define float @fma_different_add_ops(float %a, float %b, float %c, float %d) {
1097 ; CHECK-LABEL: @fma_different_add_ops(
1098 ; CHECK-NEXT: [[X:%.*]] = call float @llvm.fma.f32(float [[A:%.*]], float [[B:%.*]], float [[C:%.*]])
1099 ; CHECK-NEXT: [[Y:%.*]] = call float @llvm.fma.f32(float [[B]], float [[A]], float [[D:%.*]])
1100 ; CHECK-NEXT: [[R:%.*]] = fdiv nnan float [[X]], [[Y]]
1101 ; CHECK-NEXT: ret float [[R]]
1103 %x = call float @llvm.fma.f32(float %a, float %b, float %c)
1104 %y = call float @llvm.fma.f32(float %b, float %a, float %d)
1105 %r = fdiv nnan float %x, %y
1109 define <2 x double> @fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
1110 ; CHECK-LABEL: @fmuladd(
1111 ; CHECK-NEXT: [[X:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]])
1112 ; CHECK-NEXT: ret <2 x double> <double 1.000000e+00, double 1.000000e+00>
1114 %x = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
1115 %y = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %b, <2 x double> %a, <2 x double> %c)
1116 %r = fdiv nnan <2 x double> %x, %y
1120 define <2 x double> @fmuladd_fail1(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
1121 ; CHECK-LABEL: @fmuladd_fail1(
1122 ; CHECK-NEXT: [[X:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]])
1123 ; CHECK-NEXT: [[Y:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[C]], <2 x double> [[B]], <2 x double> [[A]])
1124 ; CHECK-NEXT: [[R:%.*]] = fdiv nnan <2 x double> [[X]], [[Y]]
1125 ; CHECK-NEXT: ret <2 x double> [[R]]
1127 %x = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
1128 %y = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %c, <2 x double> %b, <2 x double> %a)
1129 %r = fdiv nnan <2 x double> %x, %y
1133 define <2 x double> @fmuladd_fail2(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
1134 ; CHECK-LABEL: @fmuladd_fail2(
1135 ; CHECK-NEXT: [[X:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]])
1136 ; CHECK-NEXT: [[Y:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[A]], <2 x double> [[C]], <2 x double> [[B]])
1137 ; CHECK-NEXT: [[R:%.*]] = fdiv nnan <2 x double> [[X]], [[Y]]
1138 ; CHECK-NEXT: ret <2 x double> [[R]]
1140 %x = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
1141 %y = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %a, <2 x double> %c, <2 x double> %b)
1142 %r = fdiv nnan <2 x double> %x, %y
1146 declare float @llvm.maxnum.f32(float, float)
1147 declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
1148 declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>)
1149 declare double @llvm.minimum.f64(double, double)
1151 declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16)
1152 declare {<5 x i65>, <5 x i1>} @llvm.uadd.with.overflow.v5i65(<5 x i65>, <5 x i65>)
1153 declare {i37, i1} @llvm.smul.with.overflow.i37(i37, i37)
1154 declare {<2 x i31>, <2 x i1>} @llvm.umul.with.overflow.v2i31(<2 x i31>, <2 x i31>)
1155 declare i64 @llvm.sadd.sat.i64(i64, i64)
1156 declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
1158 declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
1159 declare i4 @llvm.smin.i4(i4, i4)
1160 declare i4 @llvm.umin.i4(i4, i4)
1161 declare i67 @llvm.umax.i67(i67, i67)
1162 declare <3 x i17> @llvm.umin.v3i17(<3 x i17>, <3 x i17>)
1164 declare i16 @llvm.smul.fix.i16(i16, i16, i32)
1165 declare i16 @llvm.umul.fix.i16(i16, i16, i32)
1166 declare <3 x i16> @llvm.smul.fix.sat.v3i16(<3 x i16>, <3 x i16>, i32)
1167 declare <3 x i16> @llvm.umul.fix.sat.v3i16(<3 x i16>, <3 x i16>, i32)
1169 declare float @llvm.fma.f32(float, float, float)
1170 declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>)