1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -S -mtriple=aarch64 -slp-vectorizer | FileCheck %s
4 %struct.buf = type { [8 x i8] }
6 define i8 @reduce_and(%struct.buf* %a, %struct.buf* %b) {
7 ; CHECK-LABEL: @reduce_and(
9 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], %struct.buf* [[A:%.*]], i64 0, i32 0, i64 0
10 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B:%.*]], i64 0, i32 0, i64 0
11 ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 1
12 ; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 1
13 ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 2
14 ; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 2
15 ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 3
16 ; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 3
17 ; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 4
18 ; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 4
19 ; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 5
20 ; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 5
21 ; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 6
22 ; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 6
23 ; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 7
24 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARRAYIDX]] to <8 x i8>*
25 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 1
26 ; CHECK-NEXT: [[ARRAYIDX3_7:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 7
27 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[ARRAYIDX3]] to <8 x i8>*
28 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[TMP2]], align 1
29 ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i8> [[TMP3]], [[TMP1]]
30 ; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP4]])
31 ; CHECK-NEXT: [[OP_EXTRA:%.*]] = and i8 [[TMP5]], 1
32 ; CHECK-NEXT: ret i8 [[OP_EXTRA]]
35 %arrayidx = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 0
36 %0 = load i8, i8* %arrayidx, align 1
37 %arrayidx3 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 0
38 %1 = load i8, i8* %arrayidx3, align 1
39 %xor12 = xor i8 %1, %0
40 %and13 = and i8 %xor12, 1
41 %arrayidx.1 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 1
42 %2 = load i8, i8* %arrayidx.1, align 1
43 %arrayidx3.1 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 1
44 %3 = load i8, i8* %arrayidx3.1, align 1
45 %xor12.1 = xor i8 %3, %2
46 %and13.1 = and i8 %xor12.1, %and13
47 %arrayidx.2 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 2
48 %4 = load i8, i8* %arrayidx.2, align 1
49 %arrayidx3.2 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 2
50 %5 = load i8, i8* %arrayidx3.2, align 1
51 %xor12.2 = xor i8 %5, %4
52 %and13.2 = and i8 %xor12.2, %and13.1
53 %arrayidx.3 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 3
54 %6 = load i8, i8* %arrayidx.3, align 1
55 %arrayidx3.3 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 3
56 %7 = load i8, i8* %arrayidx3.3, align 1
57 %xor12.3 = xor i8 %7, %6
58 %and13.3 = and i8 %xor12.3, %and13.2
59 %arrayidx.4 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 4
60 %8 = load i8, i8* %arrayidx.4, align 1
61 %arrayidx3.4 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 4
62 %9 = load i8, i8* %arrayidx3.4, align 1
63 %xor12.4 = xor i8 %9, %8
64 %and13.4 = and i8 %xor12.4, %and13.3
65 %arrayidx.5 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 5
66 %10 = load i8, i8* %arrayidx.5, align 1
67 %arrayidx3.5 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 5
68 %11 = load i8, i8* %arrayidx3.5, align 1
69 %xor12.5 = xor i8 %11, %10
70 %and13.5 = and i8 %xor12.5, %and13.4
71 %arrayidx.6 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 6
72 %12 = load i8, i8* %arrayidx.6, align 1
73 %arrayidx3.6 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 6
74 %13 = load i8, i8* %arrayidx3.6, align 1
75 %xor12.6 = xor i8 %13, %12
76 %and13.6 = and i8 %xor12.6, %and13.5
77 %arrayidx.7 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 7
78 %14 = load i8, i8* %arrayidx.7, align 1
79 %arrayidx3.7 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 7
80 %15 = load i8, i8* %arrayidx3.7, align 1
81 %xor12.7 = xor i8 %15, %14
82 %and13.7 = and i8 %xor12.7, %and13.6