1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+xtheadmemidx,+xtheadmempair -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefix RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+xtheadmemidx,+xtheadmempair -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefix RV64
7 define void @test(ptr %ref_array, ptr %sad_array) {
9 ; RV32: # %bb.0: # %entry
10 ; RV32-NEXT: th.lwd a2, a3, (a0), 0, 3
11 ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
12 ; RV32-NEXT: vle8.v v8, (a2)
13 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
14 ; RV32-NEXT: vzext.vf4 v12, v8
15 ; RV32-NEXT: vmv.s.x v8, zero
16 ; RV32-NEXT: vredsum.vs v9, v12, v8
17 ; RV32-NEXT: vmv.x.s a0, v9
18 ; RV32-NEXT: th.swia a0, (a1), 4, 0
19 ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
20 ; RV32-NEXT: vle8.v v9, (a3)
21 ; RV32-NEXT: vmv.v.i v10, 0
22 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
23 ; RV32-NEXT: vslideup.vi v9, v10, 4
24 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
25 ; RV32-NEXT: vzext.vf4 v12, v9
26 ; RV32-NEXT: vredsum.vs v8, v12, v8
27 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
28 ; RV32-NEXT: vse32.v v8, (a1)
32 ; RV64: # %bb.0: # %entry
33 ; RV64-NEXT: th.ldd a2, a3, (a0), 0, 4
34 ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
35 ; RV64-NEXT: vle8.v v8, (a2)
36 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
37 ; RV64-NEXT: vzext.vf4 v12, v8
38 ; RV64-NEXT: vmv.s.x v8, zero
39 ; RV64-NEXT: vredsum.vs v9, v12, v8
40 ; RV64-NEXT: vmv.x.s a0, v9
41 ; RV64-NEXT: th.swia a0, (a1), 4, 0
42 ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
43 ; RV64-NEXT: vle8.v v9, (a3)
44 ; RV64-NEXT: vmv.v.i v10, 0
45 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
46 ; RV64-NEXT: vslideup.vi v9, v10, 4
47 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
48 ; RV64-NEXT: vzext.vf4 v12, v9
49 ; RV64-NEXT: vredsum.vs v8, v12, v8
50 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
51 ; RV64-NEXT: vse32.v v8, (a1)
54 %0 = load ptr, ptr %ref_array, align 8
55 %1 = load <4 x i8>, ptr %0, align 1
56 %2 = shufflevector <4 x i8> %1, <4 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
57 %3 = zext <16 x i8> %2 to <16 x i32>
58 %4 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
59 store i32 %4, ptr %sad_array, align 4, !tbaa !0
60 %arrayidx.1 = getelementptr ptr, ptr %ref_array, i64 1
61 %5 = load ptr, ptr %arrayidx.1, align 8, !tbaa !4
62 %6 = load <4 x i8>, ptr %5, align 1
63 %7 = shufflevector <4 x i8> %6, <4 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
64 %8 = zext <16 x i8> %7 to <16 x i32>
65 %9 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %8)
66 %arrayidx2.1 = getelementptr i32, ptr %sad_array, i64 1
67 store i32 %9, ptr %arrayidx2.1, align 4
71 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
74 !1 = !{!"int", !2, i64 0}
75 !2 = !{!"omnipotent char", !3, i64 0}
76 !3 = !{!"Simple C/C++ TBAA"}
78 !5 = !{!"any pointer", !2, i64 0}