1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer,instcombine -S -mtriple=x86_64-unknown-linux -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
3 ; RUN: opt < %s -passes=slp-vectorizer,instcombine -S -mtriple=x86_64-unknown-linux -mattr=+avx | FileCheck %s --check-prefixes=AVX
4 ; RUN: opt < %s -passes=slp-vectorizer,instcombine -S -mtriple=x86_64-unknown-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
5 ; RUN: opt < %s -passes=slp-vectorizer,instcombine -S -mtriple=x86_64-unknown-linux -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512
6 ; RUN: opt < %s -passes=slp-vectorizer,instcombine -S -mtriple=x86_64-unknown-linux -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
9 @b = global [8 x i32] zeroinitializer, align 16
10 @a = global [8 x i32] zeroinitializer, align 16
14 ; SSE-NEXT: [[TMP1:%.*]] = load i32, ptr @b, align 16
15 ; SSE-NEXT: store i32 [[TMP1]], ptr @a, align 16
16 ; SSE-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
17 ; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 1), align 4
18 ; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 2), align 8
19 ; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 3), align 4
20 ; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 4), align 16
21 ; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 5), align 4
22 ; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 6), align 8
23 ; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 7), align 4
27 ; AVX-NEXT: [[TMP1:%.*]] = load i32, ptr @b, align 16
28 ; AVX-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
29 ; AVX-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[TMP1]], i64 0
30 ; AVX-NEXT: [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 [[TMP2]], i64 1
31 ; AVX-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
32 ; AVX-NEXT: store <8 x i32> [[SHUFFLE]], ptr @a, align 16
36 ; AVX512-NEXT: [[TMP1:%.*]] = load i32, ptr @b, align 16
37 ; AVX512-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
38 ; AVX512-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[TMP1]], i64 0
39 ; AVX512-NEXT: [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 [[TMP2]], i64 1
40 ; AVX512-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
41 ; AVX512-NEXT: store <8 x i32> [[SHUFFLE]], ptr @a, align 16
42 ; AVX512-NEXT: ret void
44 %1 = load i32, ptr @b, align 16
45 store i32 %1, ptr @a, align 16
46 %2 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
47 store i32 %2, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 1), align 4
48 store i32 %1, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 2), align 8
49 store i32 %2, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 3), align 4
50 store i32 %1, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 4), align 16
51 store i32 %2, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 5), align 4
52 store i32 %1, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 6), align 8
53 store i32 %2, ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 7), align 4