1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -slp-vectorizer -S |FileCheck %s
3 ; RUN: opt < %s -aa-pipeline=basic-aa -passes=slp-vectorizer -S |FileCheck %s
4 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
5 target triple = "x86_64-unknown-unknown"
7 ; Test if SLP can handle GEP expressions.
8 ; The test perform the following action:
9 ; x->first = y->first + 16
10 ; x->second = y->second + 16
12 define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
14 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y:%.*]], i64 0, i32 0
15 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X:%.*]], i64 0, i32 0
16 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y]], i64 0, i32 1
17 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32** [[TMP1]] to <2 x i32*>*
18 ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32*>, <2 x i32*>* [[TMP4]], align 8
19 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, <2 x i32*> [[TMP5]], <2 x i64> <i64 16, i64 16>
20 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X]], i64 0, i32 1
21 ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32** [[TMP2]] to <2 x i32*>*
22 ; CHECK-NEXT: store <2 x i32*> [[TMP6]], <2 x i32*>* [[TMP8]], align 8
23 ; CHECK-NEXT: ret void
25 %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
26 %2 = load i32*, i32** %1, align 8
27 %3 = getelementptr inbounds i32, i32* %2, i64 16
28 %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
29 store i32* %3, i32** %4, align 8
30 %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
31 %6 = load i32*, i32** %5, align 8
32 %7 = getelementptr inbounds i32, i32* %6, i64 16
33 %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
34 store i32* %7, i32** %8, align 8
38 ; Test that we don't vectorize GEP expressions if indexes are not constants.
39 ; We can't produce an efficient code in that case.
40 define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
42 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y:%.*]], i64 0, i32 0
43 ; CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
44 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[I:%.*]]
45 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X:%.*]], i64 0, i32 0
46 ; CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP4]], align 8
47 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[Y]], i64 0, i32 1
48 ; CHECK-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
49 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 [[I]]
50 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* [[X]], i64 0, i32 1
51 ; CHECK-NEXT: store i32* [[TMP7]], i32** [[TMP8]], align 8
52 ; CHECK-NEXT: ret void
54 %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
55 %2 = load i32*, i32** %1, align 8
56 %3 = getelementptr inbounds i32, i32* %2, i32 %i
57 %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
58 store i32* %3, i32** %4, align 8
59 %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
60 %6 = load i32*, i32** %5, align 8
61 %7 = getelementptr inbounds i32, i32* %6, i32 %i
62 %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
63 store i32* %7, i32** %8, align 8