1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
5 define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg) {
8 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
9 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, #8
11 %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
12 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 8))
13 ret <vscale x 16 x i8> %res
16 define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg) {
17 ; CHECK-LABEL: asr_i16:
19 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
20 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, #16
22 %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
23 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 16))
24 ret <vscale x 8 x i16> %res
27 define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg) local_unnamed_addr #0 {
28 ; CHECK-LABEL: asr_i32:
30 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
31 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, #32
33 %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
34 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 32))
35 ret <vscale x 4 x i32> %res
38 define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg) {
39 ; CHECK-LABEL: asr_i64:
41 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
42 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, #64
44 %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
45 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 64))
46 ret <vscale x 2 x i64> %res
50 define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg) {
51 ; CHECK-LABEL: lsl_i8:
53 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
54 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, #7
56 %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
57 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 7))
58 ret <vscale x 16 x i8> %res
61 define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg) {
62 ; CHECK-LABEL: lsl_i16:
64 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
65 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, #15
67 %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
68 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 15))
69 ret <vscale x 8 x i16> %res
72 define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg) local_unnamed_addr #0 {
73 ; CHECK-LABEL: lsl_i32:
75 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
76 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, #31
78 %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
79 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 31))
80 ret <vscale x 4 x i32> %res
83 define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg) {
84 ; CHECK-LABEL: lsl_i64:
86 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
87 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, #63
89 %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
90 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 63))
91 ret <vscale x 2 x i64> %res
95 define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg) {
96 ; CHECK-LABEL: lsr_i8:
98 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
99 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, #8
101 %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
102 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 8))
103 ret <vscale x 16 x i8> %res
106 define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg) {
107 ; CHECK-LABEL: lsr_i16:
109 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
110 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, #16
112 %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
113 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 16))
114 ret <vscale x 8 x i16> %res
117 define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg) local_unnamed_addr #0 {
118 ; CHECK-LABEL: lsr_i32:
120 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
121 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, #32
123 %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
124 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 32))
125 ret <vscale x 4 x i32> %res
128 define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg) {
129 ; CHECK-LABEL: lsr_i64:
131 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
132 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, #64
134 %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
135 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 64))
136 ret <vscale x 2 x i64> %res
139 declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
140 declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
141 declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
142 declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
144 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
145 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
146 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
147 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
149 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
150 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
151 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
152 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)