1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes='default<O3>' -S | FileCheck %s
4 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
5 target triple = "thumbv8.1m.main-arm-none-eabi"
7 ; This should, after inlining and simplification, be a single tail predicated
8 ; 16x vector loop handling llvm.sadd.sat. __SSAT is inlined and so is DCE'd.
10 ; Function Attrs: nounwind
11 define dso_local void @arm_add_q7(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockSize) #0 {
12 ; CHECK-LABEL: @arm_add_q7(
14 ; CHECK-NEXT: [[CMP_NOT3:%.*]] = icmp eq i32 [[BLOCKSIZE:%.*]], 0
15 ; CHECK-NEXT: br i1 [[CMP_NOT3]], label [[WHILE_END:%.*]], label [[VECTOR_PH:%.*]]
17 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[BLOCKSIZE]], 15
18 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
19 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
21 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
22 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA:%.*]], i32 [[INDEX]]
23 ; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i32 [[INDEX]]
24 ; CHECK-NEXT: [[NEXT_GEP15:%.*]] = getelementptr i8, ptr [[PSRCB:%.*]], i32 [[INDEX]]
25 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = tail call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[BLOCKSIZE]])
26 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[NEXT_GEP]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
27 ; CHECK-NEXT: [[WIDE_MASKED_LOAD16:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[NEXT_GEP15]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
28 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]], <16 x i8> [[WIDE_MASKED_LOAD16]])
29 ; CHECK-NEXT: tail call void @llvm.masked.store.v16i8.p0(<16 x i8> [[TMP2]], ptr [[NEXT_GEP14]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]])
30 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 16
31 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
32 ; CHECK-NEXT: br i1 [[TMP4]], label [[WHILE_END]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
34 ; CHECK-NEXT: ret void
37 %pSrcA.addr = alloca ptr, align 4
38 %pSrcB.addr = alloca ptr, align 4
39 %pDst.addr = alloca ptr, align 4
40 %blockSize.addr = alloca i32, align 4
41 %blkCnt = alloca i32, align 4
42 store ptr %pSrcA, ptr %pSrcA.addr, align 4
43 store ptr %pSrcB, ptr %pSrcB.addr, align 4
44 store ptr %pDst, ptr %pDst.addr, align 4
45 store i32 %blockSize, ptr %blockSize.addr, align 4
46 call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt)
47 %0 = load i32, ptr %blockSize.addr, align 4
48 store i32 %0, ptr %blkCnt, align 4
51 while.cond: ; preds = %while.body, %entry
52 %1 = load i32, ptr %blkCnt, align 4
53 %cmp = icmp ugt i32 %1, 0
54 br i1 %cmp, label %while.body, label %while.end
56 while.body: ; preds = %while.cond
57 %2 = load ptr, ptr %pSrcA.addr, align 4
58 %incdec.ptr = getelementptr inbounds i8, ptr %2, i32 1
59 store ptr %incdec.ptr, ptr %pSrcA.addr, align 4
60 %3 = load i8, ptr %2, align 1
61 %conv = sext i8 %3 to i16
62 %conv1 = sext i16 %conv to i32
63 %4 = load ptr, ptr %pSrcB.addr, align 4
64 %incdec.ptr2 = getelementptr inbounds i8, ptr %4, i32 1
65 store ptr %incdec.ptr2, ptr %pSrcB.addr, align 4
66 %5 = load i8, ptr %4, align 1
67 %conv3 = sext i8 %5 to i32
68 %add = add nsw i32 %conv1, %conv3
69 %call = call i32 @__SSAT(i32 %add, i32 8)
70 %conv4 = trunc i32 %call to i8
71 %6 = load ptr, ptr %pDst.addr, align 4
72 %incdec.ptr5 = getelementptr inbounds i8, ptr %6, i32 1
73 store ptr %incdec.ptr5, ptr %pDst.addr, align 4
74 store i8 %conv4, ptr %6, align 1
75 %7 = load i32, ptr %blkCnt, align 4
77 store i32 %dec, ptr %blkCnt, align 4
80 while.end: ; preds = %while.cond
81 call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt)
85 define internal i32 @__SSAT(i32 %val, i32 %sat) #1 {
87 %retval = alloca i32, align 4
88 %val.addr = alloca i32, align 4
89 %sat.addr = alloca i32, align 4
90 %max = alloca i32, align 4
91 %min = alloca i32, align 4
92 %cleanup.dest.slot = alloca i32, align 4
93 store i32 %val, ptr %val.addr, align 4
94 store i32 %sat, ptr %sat.addr, align 4
95 %0 = load i32, ptr %sat.addr, align 4
96 %cmp = icmp uge i32 %0, 1
97 br i1 %cmp, label %land.lhs.true, label %if.end10
99 land.lhs.true: ; preds = %entry
100 %1 = load i32, ptr %sat.addr, align 4
101 %cmp1 = icmp ule i32 %1, 32
102 br i1 %cmp1, label %if.then, label %if.end10
104 if.then: ; preds = %land.lhs.true
105 call void @llvm.lifetime.start.p0(i64 4, ptr %max)
106 %2 = load i32, ptr %sat.addr, align 4
108 %shl = shl i32 1, %sub
109 %sub2 = sub i32 %shl, 1
110 store i32 %sub2, ptr %max, align 4
111 call void @llvm.lifetime.start.p0(i64 4, ptr %min)
112 %3 = load i32, ptr %max, align 4
113 %sub3 = sub nsw i32 -1, %3
114 store i32 %sub3, ptr %min, align 4
115 %4 = load i32, ptr %val.addr, align 4
116 %5 = load i32, ptr %max, align 4
117 %cmp4 = icmp sgt i32 %4, %5
118 br i1 %cmp4, label %if.then5, label %if.else
120 if.then5: ; preds = %if.then
121 %6 = load i32, ptr %max, align 4
122 store i32 %6, ptr %retval, align 4
123 store i32 1, ptr %cleanup.dest.slot, align 4
126 if.else: ; preds = %if.then
127 %7 = load i32, ptr %val.addr, align 4
128 %8 = load i32, ptr %min, align 4
129 %cmp6 = icmp slt i32 %7, %8
130 br i1 %cmp6, label %if.then7, label %if.end
132 if.then7: ; preds = %if.else
133 %9 = load i32, ptr %min, align 4
134 store i32 %9, ptr %retval, align 4
135 store i32 1, ptr %cleanup.dest.slot, align 4
138 if.end: ; preds = %if.else
141 if.end8: ; preds = %if.end
142 store i32 0, ptr %cleanup.dest.slot, align 4
145 cleanup: ; preds = %if.end8, %if.then7, %if.then5
146 call void @llvm.lifetime.end.p0(i64 4, ptr %min)
147 call void @llvm.lifetime.end.p0(i64 4, ptr %max)
148 %cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4
149 switch i32 %cleanup.dest, label %unreachable [
150 i32 0, label %cleanup.cont
154 cleanup.cont: ; preds = %cleanup
157 if.end10: ; preds = %cleanup.cont, %land.lhs.true, %entry
158 %10 = load i32, ptr %val.addr, align 4
159 store i32 %10, ptr %retval, align 4
162 return: ; preds = %if.end10, %cleanup
163 %11 = load i32, ptr %retval, align 4
166 unreachable: ; preds = %cleanup
170 declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
171 declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
173 attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
174 attributes #1 = { alwaysinline nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }