1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=0 < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=1 < %s | FileCheck %s
5 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
6 ; unscaled unpacked 32-bit offsets
7 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
9 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
10 ; CHECK-LABEL: masked_gather_nxv2i16:
12 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
14 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
15 %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
16 %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
17 %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
18 ret <vscale x 2 x i64> %vals.zext
21 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
22 ; CHECK-LABEL: masked_gather_nxv2i32:
24 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
26 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
27 %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
28 %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
29 %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
30 ret <vscale x 2 x i64> %vals.zext
33 define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
34 ; CHECK-LABEL: masked_gather_nxv2i64:
36 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
38 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
39 %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets.zext
40 %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
41 ret <vscale x 2 x i64> %vals
44 define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
45 ; CHECK-LABEL: masked_gather_nxv2f16:
47 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
49 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
50 %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets.zext
51 %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
52 ret <vscale x 2 x half> %vals
55 define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
56 ; CHECK-LABEL: masked_gather_nxv2bf16:
58 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
60 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
61 %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %offsets.zext
62 %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
63 ret <vscale x 2 x bfloat> %vals
66 define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
67 ; CHECK-LABEL: masked_gather_nxv2f32:
69 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
71 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
72 %ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %offsets.zext
73 %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
74 ret <vscale x 2 x float> %vals
77 define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
78 ; CHECK-LABEL: masked_gather_nxv2f64:
80 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
82 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
83 %ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %offsets.zext
84 %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
85 ret <vscale x 2 x double> %vals
88 define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
89 ; CHECK-LABEL: masked_sgather_nxv2i16:
91 ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
93 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
94 %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
95 %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
96 %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
97 ret <vscale x 2 x i64> %vals.sext
100 define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
101 ; CHECK-LABEL: masked_sgather_nxv2i32:
103 ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
105 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
106 %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
107 %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
108 %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
109 ret <vscale x 2 x i64> %vals.sext
112 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
113 ; unscaled packed 32-bit offsets
114 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
116 define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
117 ; CHECK-LABEL: masked_gather_nxv4i16:
119 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
121 %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
122 %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %offsets.zext
123 %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
124 %vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
125 ret <vscale x 4 x i32> %vals.zext
128 define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
129 ; CHECK-LABEL: masked_gather_nxv4i32:
131 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
133 %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
134 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.zext
135 %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
136 ret <vscale x 4 x i32> %vals
139 define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
140 ; CHECK-LABEL: masked_gather_nxv4f16:
142 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
144 %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
145 %ptrs = getelementptr half, ptr %base, <vscale x 4 x i64> %offsets.zext
146 %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
147 ret <vscale x 4 x half> %vals
150 define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
151 ; CHECK-LABEL: masked_gather_nxv4bf16:
153 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
155 %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
156 %ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i64> %offsets.zext
157 %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
158 ret <vscale x 4 x bfloat> %vals
161 define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
162 ; CHECK-LABEL: masked_gather_nxv4f32:
164 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
166 %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
167 %ptrs = getelementptr float, ptr %base, <vscale x 4 x i64> %offsets.zext
168 %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
169 ret <vscale x 4 x float> %vals
172 define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
173 ; CHECK-LABEL: masked_sgather_nxv4i16:
175 ; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1]
177 %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
178 %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %offsets.zext
179 %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
180 %vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
181 ret <vscale x 4 x i32> %vals.sext
184 declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
185 declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
186 declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
187 declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
188 declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
189 declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
190 declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
192 declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
193 declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
194 declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
195 declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
196 declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
197 attributes #0 = { "target-features"="+sve,+bf16" }