1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-unknown-linux -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=corei7-avx -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
4 ; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=core-avx2 -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
6 @arr = local_unnamed_addr global [32 x i32] zeroinitializer, align 16
8 declare i32 @llvm.smax.i32(i32, i32)
10 define i32 @smax_v2i32(i32) {
11 ; CHECK-LABEL: @smax_v2i32(
12 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
13 ; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
14 ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP2]], i32 [[TMP3]])
15 ; CHECK-NEXT: ret i32 [[TMP4]]
17 %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
18 %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
19 %4 = call i32 @llvm.smax.i32(i32 %2, i32 %3)
23 define i32 @smax_v4i32(i32) {
24 ; SSE-LABEL: @smax_v4i32(
25 ; SSE-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
26 ; SSE-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
27 ; SSE-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2), align 8
28 ; SSE-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 3), align 4
29 ; SSE-NEXT: [[TMP6:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP2]], i32 [[TMP3]])
30 ; SSE-NEXT: [[TMP7:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP6]], i32 [[TMP4]])
31 ; SSE-NEXT: [[TMP8:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP7]], i32 [[TMP5]])
32 ; SSE-NEXT: ret i32 [[TMP8]]
34 ; AVX-LABEL: @smax_v4i32(
35 ; AVX-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([32 x i32]* @arr to <4 x i32>*), align 16
36 ; AVX-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]])
37 ; AVX-NEXT: ret i32 [[TMP3]]
39 %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
40 %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
41 %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2), align 8
42 %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 3), align 4
43 %6 = call i32 @llvm.smax.i32(i32 %2, i32 %3)
44 %7 = call i32 @llvm.smax.i32(i32 %6, i32 %4)
45 %8 = call i32 @llvm.smax.i32(i32 %7, i32 %5)
49 define i32 @smax_v8i32(i32) {
50 ; CHECK-LABEL: @smax_v8i32(
51 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr to <8 x i32>*), align 16
52 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> [[TMP2]])
53 ; CHECK-NEXT: ret i32 [[TMP3]]
55 %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
56 %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
57 %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2), align 8
58 %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 3), align 4
59 %6 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 4), align 16
60 %7 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 5), align 4
61 %8 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6), align 8
62 %9 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 7), align 4
63 %10 = call i32 @llvm.smax.i32(i32 %2, i32 %3)
64 %11 = call i32 @llvm.smax.i32(i32 %10, i32 %4)
65 %12 = call i32 @llvm.smax.i32(i32 %11, i32 %5)
66 %13 = call i32 @llvm.smax.i32(i32 %12, i32 %6)
67 %14 = call i32 @llvm.smax.i32(i32 %13, i32 %7)
68 %15 = call i32 @llvm.smax.i32(i32 %14, i32 %8)
69 %16 = call i32 @llvm.smax.i32(i32 %15, i32 %9)
73 define i32 @smax_v16i32(i32) {
74 ; CHECK-LABEL: @smax_v16i32(
75 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([32 x i32]* @arr to <16 x i32>*), align 16
76 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> [[TMP2]])
77 ; CHECK-NEXT: ret i32 [[TMP3]]
79 %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
80 %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
81 %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2), align 8
82 %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 3), align 4
83 %6 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 4), align 16
84 %7 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 5), align 4
85 %8 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6), align 8
86 %9 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 7), align 4
87 %10 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 8), align 16
88 %11 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 9), align 4
89 %12 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 10), align 8
90 %13 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 11), align 4
91 %14 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 12), align 16
92 %15 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 13), align 4
93 %16 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 14), align 8
94 %17 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 15), align 4
95 %18 = call i32 @llvm.smax.i32(i32 %2, i32 %3)
96 %19 = call i32 @llvm.smax.i32(i32 %18, i32 %4)
97 %20 = call i32 @llvm.smax.i32(i32 %19, i32 %5)
98 %21 = call i32 @llvm.smax.i32(i32 %20, i32 %6)
99 %22 = call i32 @llvm.smax.i32(i32 %21, i32 %7)
100 %23 = call i32 @llvm.smax.i32(i32 %22, i32 %8)
101 %24 = call i32 @llvm.smax.i32(i32 %23, i32 %9)
102 %25 = call i32 @llvm.smax.i32(i32 %24, i32 %10)
103 %26 = call i32 @llvm.smax.i32(i32 %25, i32 %11)
104 %27 = call i32 @llvm.smax.i32(i32 %26, i32 %12)
105 %28 = call i32 @llvm.smax.i32(i32 %27, i32 %13)
106 %29 = call i32 @llvm.smax.i32(i32 %28, i32 %14)
107 %30 = call i32 @llvm.smax.i32(i32 %29, i32 %15)
108 %31 = call i32 @llvm.smax.i32(i32 %30, i32 %16)
109 %32 = call i32 @llvm.smax.i32(i32 %31, i32 %17)