1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
3 ; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
8 ; CHECK-NEXT: [[A:%.*]] = getelementptr [1000 x i64], ptr null, i64 0, i64 5
9 ; CHECK-NEXT: [[A1:%.*]] = getelementptr [1000 x i64], ptr null, i64 0, i64 6
10 ; CHECK-NEXT: br label [[WHILE:%.*]]
12 ; CHECK-NEXT: [[PH:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX26:%.*]], [[WHILE]] ]
13 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr null, align 8
14 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[A1]], align 16
15 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr null, align 8
16 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr [[A]], align 8
17 ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
18 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> [[TMP4]])
19 ; CHECK-NEXT: [[OP_RDX:%.*]] = xor i64 0, [[TMP2]]
20 ; CHECK-NEXT: [[OP_RDX24:%.*]] = xor i64 [[TMP0]], [[TMP1]]
21 ; CHECK-NEXT: [[OP_RDX25:%.*]] = xor i64 [[OP_RDX]], [[OP_RDX24]]
22 ; CHECK-NEXT: [[OP_RDX26]] = xor i64 [[OP_RDX25]], [[TMP5]]
23 ; CHECK-NEXT: br label [[WHILE]]
26 %a = getelementptr [1000 x i64], ptr null, i64 0, i64 5
27 %a1 = getelementptr [1000 x i64], ptr null, i64 0, i64 6
28 %a2 = getelementptr [1000 x i64], ptr null, i64 0, i64 7
29 %a3 = getelementptr [1000 x i64], ptr null, i64 0, i64 8
33 %ph = phi i64 [ 0, %entry ], [ %xor, %while ]
34 %0 = load i64, ptr null, align 8
35 %1 = load i64, ptr %a1, align 16
36 %2 = load i64, ptr %a2, align 8
37 %3 = load i64, ptr %a3, align 16
38 %4 = load i64, ptr null, align 8
39 %5 = load i64, ptr %a, align 8
40 %6 = load i64, ptr %a1, align 16
41 %7 = load i64, ptr %a2, align 8
42 %8 = load i64, ptr %a3, align 16
70 %xor = xor i64 %35, %8