1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
6 define <vscale x 4 x i16> @load_promote_4i16(<vscale x 4 x i16>* %a) {
7 ; CHECK-LABEL: load_promote_4i16:
9 ; CHECK-NEXT: ptrue p0.s
10 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
12 %load = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
13 ret <vscale x 4 x i16> %load
16 define <vscale x 16 x i16> @load_split_i16(<vscale x 16 x i16>* %a) {
17 ; CHECK-LABEL: load_split_i16:
19 ; CHECK-NEXT: ptrue p0.h
20 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
21 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
23 %load = load <vscale x 16 x i16>, <vscale x 16 x i16>* %a
24 ret <vscale x 16 x i16> %load
27 define <vscale x 24 x i16> @load_split_24i16(<vscale x 24 x i16>* %a) {
28 ; CHECK-LABEL: load_split_24i16:
30 ; CHECK-NEXT: ptrue p0.h
31 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
32 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
33 ; CHECK-NEXT: ld1h { z2.h }, p0/z, [x0, #2, mul vl]
35 %load = load <vscale x 24 x i16>, <vscale x 24 x i16>* %a
36 ret <vscale x 24 x i16> %load
39 define <vscale x 32 x i16> @load_split_32i16(<vscale x 32 x i16>* %a) {
40 ; CHECK-LABEL: load_split_32i16:
42 ; CHECK-NEXT: ptrue p0.h
43 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
44 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
45 ; CHECK-NEXT: ld1h { z2.h }, p0/z, [x0, #2, mul vl]
46 ; CHECK-NEXT: ld1h { z3.h }, p0/z, [x0, #3, mul vl]
48 %load = load <vscale x 32 x i16>, <vscale x 32 x i16>* %a
49 ret <vscale x 32 x i16> %load
52 define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
53 ; CHECK-LABEL: load_split_16i64:
55 ; CHECK-NEXT: ptrue p0.d
56 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
57 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
58 ; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #2, mul vl]
59 ; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
60 ; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #4, mul vl]
61 ; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #5, mul vl]
62 ; CHECK-NEXT: ld1d { z6.d }, p0/z, [x0, #6, mul vl]
63 ; CHECK-NEXT: ld1d { z7.d }, p0/z, [x0, #7, mul vl]
65 %load = load <vscale x 16 x i64>, <vscale x 16 x i64>* %a
66 ret <vscale x 16 x i64> %load
71 define <vscale x 2 x i32> @masked_load_promote_2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %pg) {
72 ; CHECK-LABEL: masked_load_promote_2i32:
74 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
76 %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
77 ret <vscale x 2 x i32> %load
80 define <vscale x 32 x i8> @masked_load_split_32i8(<vscale x 32 x i8> *%a, <vscale x 32 x i1> %pg) {
81 ; CHECK-LABEL: masked_load_split_32i8:
83 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
84 ; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0, #1, mul vl]
86 %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> undef)
87 ret <vscale x 32 x i8> %load
90 define <vscale x 32 x i16> @masked_load_split_32i16(<vscale x 32 x i16> *%a, <vscale x 32 x i1> %pg) {
91 ; CHECK-LABEL: masked_load_split_32i16:
93 ; CHECK-NEXT: punpklo p2.h, p0.b
94 ; CHECK-NEXT: punpkhi p0.h, p0.b
95 ; CHECK-NEXT: punpklo p3.h, p1.b
96 ; CHECK-NEXT: punpkhi p1.h, p1.b
97 ; CHECK-NEXT: ld1h { z0.h }, p2/z, [x0]
98 ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
99 ; CHECK-NEXT: ld1h { z2.h }, p3/z, [x0, #2, mul vl]
100 ; CHECK-NEXT: ld1h { z3.h }, p1/z, [x0, #3, mul vl]
102 %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> undef)
103 ret <vscale x 32 x i16> %load
106 define <vscale x 8 x i32> @masked_load_split_8i32(<vscale x 8 x i32> *%a, <vscale x 8 x i1> %pg) {
107 ; CHECK-LABEL: masked_load_split_8i32:
109 ; CHECK-NEXT: punpklo p1.h, p0.b
110 ; CHECK-NEXT: punpkhi p0.h, p0.b
111 ; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0]
112 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
114 %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> undef)
115 ret <vscale x 8 x i32> %load
118 define <vscale x 8 x i64> @masked_load_split_8i64(<vscale x 8 x i64> *%a, <vscale x 8 x i1> %pg) {
119 ; CHECK-LABEL: masked_load_split_8i64:
121 ; CHECK-NEXT: punpklo p1.h, p0.b
122 ; CHECK-NEXT: punpkhi p0.h, p0.b
123 ; CHECK-NEXT: punpklo p2.h, p1.b
124 ; CHECK-NEXT: punpkhi p1.h, p1.b
125 ; CHECK-NEXT: punpklo p3.h, p0.b
126 ; CHECK-NEXT: punpkhi p0.h, p0.b
127 ; CHECK-NEXT: ld1d { z0.d }, p2/z, [x0]
128 ; CHECK-NEXT: ld1d { z1.d }, p1/z, [x0, #1, mul vl]
129 ; CHECK-NEXT: ld1d { z2.d }, p3/z, [x0, #2, mul vl]
130 ; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
132 %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> undef)
133 ret <vscale x 8 x i64> %load
136 declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
138 declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
140 declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
141 declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
143 declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)