1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=0 < %s | FileCheck %s
4 define <vscale x 2 x i64> @masked_gather_nxv2i16(i16* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
5 ; CHECK-LABEL: masked_gather_nxv2i16:
7 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
8 ; CHECK-NEXT: and z0.d, z0.d, #0xffff
10 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets
11 %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
12 %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
13 ret <vscale x 2 x i64> %vals.zext
16 define <vscale x 2 x i64> @masked_gather_nxv2i32(i32* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
17 ; CHECK-LABEL: masked_gather_nxv2i32:
19 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
20 ; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
22 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets
23 %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
24 %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
25 ret <vscale x 2 x i64> %vals.zext
28 define <vscale x 2 x i64> @masked_gather_nxv2i64(i64* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
29 ; CHECK-LABEL: masked_gather_nxv2i64:
31 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
33 %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets
34 %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
35 ret <vscale x 2 x i64> %vals
38 define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
39 ; CHECK-LABEL: masked_gather_nxv2f16:
41 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
43 %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets
44 %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
45 ret <vscale x 2 x half> %vals
48 define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
49 ; CHECK-LABEL: masked_gather_nxv2f32:
51 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
53 %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets
54 %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
55 ret <vscale x 2 x float> %vals
58 define <vscale x 2 x double> @masked_gather_nxv2f64(double* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
59 ; CHECK-LABEL: masked_gather_nxv2f64:
61 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
63 %ptrs = getelementptr double, double* %base, <vscale x 2 x i64> %offsets
64 %vals.sext = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
65 ret <vscale x 2 x double> %vals.sext
68 define <vscale x 2 x i64> @masked_sgather_nxv2i16(i16* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
69 ; CHECK-LABEL: masked_sgather_nxv2i16:
71 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
72 ; CHECK-NEXT: ptrue p0.d
73 ; CHECK-NEXT: sxth z0.d, p0/m, z0.d
75 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets
76 %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
77 %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
78 ret <vscale x 2 x i64> %vals.sext
81 define <vscale x 2 x i64> @masked_sgather_nxv2i32(i32* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
82 ; CHECK-LABEL: masked_sgather_nxv2i32:
84 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
85 ; CHECK-NEXT: ptrue p0.d
86 ; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
88 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets
89 %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
90 %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
91 ret <vscale x 2 x i64> %vals.sext
94 declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
95 declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
96 declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
97 declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
98 declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
99 declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)