1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s
9 define <vscale x 2 x i64> @mul_lane_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
10 ; CHECK-LABEL: mul_lane_d:
12 ; CHECK-NEXT: mul z0.d, z0.d, z1.d[1]
14 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64> %a,
15 <vscale x 2 x i64> %b,
17 ret <vscale x 2 x i64> %out
20 define <vscale x 4 x i32> @mul_lane_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
21 ; CHECK-LABEL: mul_lane_s:
23 ; CHECK-NEXT: mul z0.s, z0.s, z1.s[1]
25 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32> %a,
26 <vscale x 4 x i32> %b,
28 ret <vscale x 4 x i32> %out
31 define <vscale x 8 x i16> @mul_lane_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
32 ; CHECK-LABEL: mul_lane_h:
34 ; CHECK-NEXT: mul z0.h, z0.h, z1.h[1]
36 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16> %a,
37 <vscale x 8 x i16> %b,
39 ret <vscale x 8 x i16> %out
46 define <vscale x 2 x i64> @mla_lane_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
47 ; CHECK-LABEL: mla_lane_d:
49 ; CHECK-NEXT: mla z0.d, z1.d, z2.d[1]
51 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64> %a,
52 <vscale x 2 x i64> %b,
53 <vscale x 2 x i64> %c,
55 ret <vscale x 2 x i64> %out
58 define <vscale x 4 x i32> @mla_lane_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
59 ; CHECK-LABEL: mla_lane_s:
61 ; CHECK-NEXT: mla z0.s, z1.s, z2.s[1]
63 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32> %a,
64 <vscale x 4 x i32> %b,
65 <vscale x 4 x i32> %c,
67 ret <vscale x 4 x i32> %out
70 define <vscale x 8 x i16> @mla_lane_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
71 ; CHECK-LABEL: mla_lane_h:
73 ; CHECK-NEXT: mla z0.h, z1.h, z2.h[1]
75 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16> %a,
76 <vscale x 8 x i16> %b,
77 <vscale x 8 x i16> %c,
79 ret <vscale x 8 x i16> %out
86 define <vscale x 2 x i64> @mls_lane_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
87 ; CHECK-LABEL: mls_lane_d:
89 ; CHECK-NEXT: mls z0.d, z1.d, z2.d[1]
91 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64> %a,
92 <vscale x 2 x i64> %b,
93 <vscale x 2 x i64> %c,
95 ret <vscale x 2 x i64> %out
98 define <vscale x 4 x i32> @mls_lane_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
99 ; CHECK-LABEL: mls_lane_s:
101 ; CHECK-NEXT: mls z0.s, z1.s, z2.s[1]
103 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32> %a,
104 <vscale x 4 x i32> %b,
105 <vscale x 4 x i32> %c,
107 ret <vscale x 4 x i32> %out
110 define <vscale x 8 x i16> @mls_lane_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
111 ; CHECK-LABEL: mls_lane_h:
113 ; CHECK-NEXT: mls z0.h, z1.h, z2.h[1]
115 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16> %a,
116 <vscale x 8 x i16> %b,
117 <vscale x 8 x i16> %c,
119 ret <vscale x 8 x i16> %out
122 declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32)
123 declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
124 declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
125 declare <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
126 declare <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
127 declare <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
128 declare <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
129 declare <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
130 declare <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)