1 ; RUN: opt -S -loop-reduce < %s | FileCheck %s --check-prefix=IR
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefix=ASM
3 ; Note: To update this test, please run utils/update_test_checks.py and utils/update_llc_test_checks.py separately on opt/llc run line.
5 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
6 target triple = "aarch64-linux-gnu"
8 ; These tests check that the IR coming out of LSR does not cast input/output pointer from i16* to i8* type.
9 ; And scaled-index addressing mode is leveraged in the generated assembly, i.e. ld1h { z1.h }, p0/z, [x0, x8, lsl #1].
11 define void @ld_st_nxv8i16(i16* %in, i16* %out) {
12 ; IR-LABEL: @ld_st_nxv8i16(
14 ; IR-NEXT: br label [[LOOP_PH:%.*]]
16 ; IR-NEXT: [[P_VEC_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> undef, i16 3, i32 0
17 ; IR-NEXT: [[P_VEC_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[P_VEC_SPLATINSERT]], <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
18 ; IR-NEXT: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64()
19 ; IR-NEXT: [[SCALED_VF:%.*]] = shl i64 [[VSCALE]], 3
20 ; IR-NEXT: br label [[LOOP:%.*]]
22 ; IR-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[LOOP_PH]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
23 ; IR-NEXT: [[SCEVGEP2:%.*]] = getelementptr i16, i16* [[IN:%.*]], i64 [[INDVAR]]
24 ; IR-NEXT: [[SCEVGEP23:%.*]] = bitcast i16* [[SCEVGEP2]] to <vscale x 8 x i16>*
25 ; IR-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[OUT:%.*]], i64 [[INDVAR]]
26 ; IR-NEXT: [[SCEVGEP1:%.*]] = bitcast i16* [[SCEVGEP]] to <vscale x 8 x i16>*
27 ; IR-NEXT: [[VAL:%.*]] = load <vscale x 8 x i16>, <vscale x 8 x i16>* [[SCEVGEP23]], align 16
28 ; IR-NEXT: [[ADDP_VEC:%.*]] = add <vscale x 8 x i16> [[VAL]], [[P_VEC_SPLAT]]
29 ; IR-NEXT: store <vscale x 8 x i16> [[ADDP_VEC]], <vscale x 8 x i16>* [[SCEVGEP1]], align 16
30 ; IR-NEXT: [[INDVAR_NEXT]] = add nsw i64 [[INDVAR]], [[SCALED_VF]]
31 ; IR-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 1024
32 ; IR-NEXT: br i1 [[EXIT_COND]], label [[LOOP_EXIT:%.*]], label [[LOOP]]
34 ; IR-NEXT: br label [[EXIT:%.*]]
38 ; ASM-LABEL: ld_st_nxv8i16:
39 ; ASM: // %bb.0: // %entry
40 ; ASM-NEXT: mov x8, xzr
41 ; ASM-NEXT: mov z0.h, #3 // =0x3
43 ; ASM-NEXT: ptrue p0.h
44 ; ASM-NEXT: .LBB0_1: // %loop
45 ; ASM-NEXT: // =>This Inner Loop Header: Depth=1
46 ; ASM-NEXT: ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
47 ; ASM-NEXT: add z1.h, z1.h, z0.h
48 ; ASM-NEXT: st1h { z1.h }, p0, [x1, x8, lsl #1]
49 ; ASM-NEXT: add x8, x8, x9
50 ; ASM-NEXT: cmp x8, #1024
51 ; ASM-NEXT: b.ne .LBB0_1
52 ; ASM-NEXT: // %bb.2: // %exit
58 %p_vec.splatinsert = insertelement <vscale x 8 x i16> undef, i16 3, i32 0
59 %p_vec.splat = shufflevector <vscale x 8 x i16> %p_vec.splatinsert, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
60 %vscale = call i64 @llvm.vscale.i64()
61 %scaled_vf = shl i64 %vscale, 3
64 loop: ; preds = %loop, %loop.ph
65 %indvar = phi i64 [ 0, %loop.ph ], [ %indvar.next, %loop ]
66 %ptr.in = getelementptr inbounds i16, i16* %in, i64 %indvar
67 %ptr.out = getelementptr inbounds i16, i16* %out, i64 %indvar
68 %in.ptrcast = bitcast i16* %ptr.in to <vscale x 8 x i16>*
69 %out.ptrcast = bitcast i16* %ptr.out to <vscale x 8 x i16>*
70 %val = load <vscale x 8 x i16>, <vscale x 8 x i16>* %in.ptrcast, align 16
71 %addp_vec = add <vscale x 8 x i16> %val, %p_vec.splat
72 store <vscale x 8 x i16> %addp_vec, <vscale x 8 x i16>* %out.ptrcast, align 16
73 %indvar.next = add nsw i64 %indvar, %scaled_vf
74 %exit.cond = icmp eq i64 %indvar.next, 1024
75 br i1 %exit.cond, label %loop.exit, label %loop
77 loop.exit: ; preds = %loop
84 define void @masked_ld_st_nxv8i16(i16* %in, i16* %out, i64 %n) {
85 ; IR-LABEL: @masked_ld_st_nxv8i16(
87 ; IR-NEXT: br label [[LOOP_PH:%.*]]
89 ; IR-NEXT: [[P_VEC_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> undef, i16 3, i32 0
90 ; IR-NEXT: [[P_VEC_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[P_VEC_SPLATINSERT]], <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
91 ; IR-NEXT: [[PTRUE_VEC_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
92 ; IR-NEXT: [[PTRUE_VEC_SPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[PTRUE_VEC_SPLATINSERT]], <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
93 ; IR-NEXT: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64()
94 ; IR-NEXT: [[SCALED_VF:%.*]] = shl i64 [[VSCALE]], 3
95 ; IR-NEXT: br label [[LOOP:%.*]]
97 ; IR-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[LOOP_PH]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
98 ; IR-NEXT: [[SCEVGEP2:%.*]] = getelementptr i16, i16* [[IN:%.*]], i64 [[INDVAR]]
99 ; IR-NEXT: [[SCEVGEP23:%.*]] = bitcast i16* [[SCEVGEP2]] to <vscale x 8 x i16>*
100 ; IR-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[OUT:%.*]], i64 [[INDVAR]]
101 ; IR-NEXT: [[SCEVGEP1:%.*]] = bitcast i16* [[SCEVGEP]] to <vscale x 8 x i16>*
102 ; IR-NEXT: [[VAL:%.*]] = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[SCEVGEP23]], i32 4, <vscale x 8 x i1> [[PTRUE_VEC_SPLAT]], <vscale x 8 x i16> undef)
103 ; IR-NEXT: [[ADDP_VEC:%.*]] = add <vscale x 8 x i16> [[VAL]], [[P_VEC_SPLAT]]
104 ; IR-NEXT: call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> [[ADDP_VEC]], <vscale x 8 x i16>* [[SCEVGEP1]], i32 4, <vscale x 8 x i1> [[PTRUE_VEC_SPLAT]])
105 ; IR-NEXT: [[INDVAR_NEXT]] = add nsw i64 [[INDVAR]], [[SCALED_VF]]
106 ; IR-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[N:%.*]], [[INDVAR_NEXT]]
107 ; IR-NEXT: br i1 [[EXIT_COND]], label [[LOOP_EXIT:%.*]], label [[LOOP]]
109 ; IR-NEXT: br label [[EXIT:%.*]]
113 ; ASM-LABEL: masked_ld_st_nxv8i16:
114 ; ASM: // %bb.0: // %entry
115 ; ASM-NEXT: mov x8, xzr
116 ; ASM-NEXT: mov z0.h, #3 // =0x3
117 ; ASM-NEXT: ptrue p0.h
119 ; ASM-NEXT: .LBB1_1: // %loop
120 ; ASM-NEXT: // =>This Inner Loop Header: Depth=1
121 ; ASM-NEXT: ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
122 ; ASM-NEXT: add z1.h, z1.h, z0.h
123 ; ASM-NEXT: st1h { z1.h }, p0, [x1, x8, lsl #1]
124 ; ASM-NEXT: add x8, x8, x9
125 ; ASM-NEXT: cmp x2, x8
126 ; ASM-NEXT: b.ne .LBB1_1
127 ; ASM-NEXT: // %bb.2: // %exit
133 %p_vec.splatinsert = insertelement <vscale x 8 x i16> undef, i16 3, i32 0
134 %p_vec.splat = shufflevector <vscale x 8 x i16> %p_vec.splatinsert, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
135 %ptrue_vec.splatinsert = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
136 %ptrue_vec.splat = shufflevector <vscale x 8 x i1> %ptrue_vec.splatinsert, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
137 %vscale = call i64 @llvm.vscale.i64()
138 %scaled_vf = shl i64 %vscale, 3
141 loop: ; preds = %loop, %loop.ph
142 %indvar = phi i64 [ 0, %loop.ph ], [ %indvar.next, %loop ]
143 %ptr.in = getelementptr inbounds i16, i16* %in, i64 %indvar
144 %ptr.out = getelementptr inbounds i16, i16* %out, i64 %indvar
145 %in.ptrcast = bitcast i16* %ptr.in to <vscale x 8 x i16>*
146 %out.ptrcast = bitcast i16* %ptr.out to <vscale x 8 x i16>*
147 %val = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %in.ptrcast, i32 4, <vscale x 8 x i1> %ptrue_vec.splat, <vscale x 8 x i16> undef)
148 %addp_vec = add <vscale x 8 x i16> %val, %p_vec.splat
149 call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %addp_vec, <vscale x 8 x i16>* %out.ptrcast, i32 4, <vscale x 8 x i1> %ptrue_vec.splat)
150 %indvar.next = add nsw i64 %indvar, %scaled_vf
151 %exit.cond = icmp eq i64 %indvar.next, %n
152 br i1 %exit.cond, label %loop.exit, label %loop
154 loop.exit: ; preds = %loop
161 declare i64 @llvm.vscale.i64()
163 declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
165 declare void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>)