1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 < %s | FileCheck %s
6 define <vscale x 8 x i16> @test_pmov_to_vector_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i1> %pn) {
7 ; CHECK-LABEL: test_pmov_to_vector_i16:
8 ; CHECK: // %bb.0: // %entry
9 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
10 ; CHECK-NEXT: .cfi_def_cfa_offset 16
11 ; CHECK-NEXT: .cfi_offset w30, -16
12 ; CHECK-NEXT: mov w0, #1 // =0x1
13 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16
14 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
17 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i1> %pn, i32 1)
18 ret <vscale x 8 x i16> %res
21 define <vscale x 4 x i32> @test_pmov_to_vector_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i1> %pn) {
22 ; CHECK-LABEL: test_pmov_to_vector_i32:
23 ; CHECK: // %bb.0: // %entry
24 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
25 ; CHECK-NEXT: .cfi_def_cfa_offset 16
26 ; CHECK-NEXT: .cfi_offset w30, -16
27 ; CHECK-NEXT: mov w0, #3 // =0x3
28 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32
29 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
32 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i1> %pn, i32 3)
33 ret <vscale x 4 x i32> %res
36 define <vscale x 2 x i64> @test_pmov_to_vector_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i1> %pn) {
37 ; CHECK-LABEL: test_pmov_to_vector_i64:
38 ; CHECK: // %bb.0: // %entry
39 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
40 ; CHECK-NEXT: .cfi_def_cfa_offset 16
41 ; CHECK-NEXT: .cfi_offset w30, -16
42 ; CHECK-NEXT: mov w0, #7 // =0x7
43 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64
44 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
47 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i1> %pn, i32 7)
48 ret <vscale x 2 x i64> %res
54 define <vscale x 16 x i8> @test_pmov_to_vector_zero_i8(<vscale x 16 x i1> %pn) {
55 ; CHECK-LABEL: test_pmov_to_vector_zero_i8:
56 ; CHECK: // %bb.0: // %entry
57 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
58 ; CHECK-NEXT: .cfi_def_cfa_offset 16
59 ; CHECK-NEXT: .cfi_offset w30, -16
60 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8
61 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
64 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8(<vscale x 16 x i1> %pn)
65 ret <vscale x 16 x i8> %res
68 define <vscale x 8 x i16> @test_pmov_to_vector_zero_i16(<vscale x 8 x i1> %pn) {
69 ; CHECK-LABEL: test_pmov_to_vector_zero_i16:
70 ; CHECK: // %bb.0: // %entry
71 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
72 ; CHECK-NEXT: .cfi_def_cfa_offset 16
73 ; CHECK-NEXT: .cfi_offset w30, -16
74 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16
75 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
78 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16(<vscale x 8 x i1> %pn)
79 ret <vscale x 8 x i16> %res
82 define <vscale x 4 x i32> @test_pmov_to_vector_zero_i32(<vscale x 4 x i1> %pn) {
83 ; CHECK-LABEL: test_pmov_to_vector_zero_i32:
84 ; CHECK: // %bb.0: // %entry
85 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
86 ; CHECK-NEXT: .cfi_def_cfa_offset 16
87 ; CHECK-NEXT: .cfi_offset w30, -16
88 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32
89 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
92 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32(<vscale x 4 x i1> %pn)
93 ret <vscale x 4 x i32> %res
96 define <vscale x 2 x i64> @test_pmov_to_vector_zero_i64(<vscale x 2 x i1> %pn) {
97 ; CHECK-LABEL: test_pmov_to_vector_zero_i64:
98 ; CHECK: // %bb.0: // %entry
99 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
100 ; CHECK-NEXT: .cfi_def_cfa_offset 16
101 ; CHECK-NEXT: .cfi_offset w30, -16
102 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64
103 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
106 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64(<vscale x 2 x i1> %pn)
107 ret <vscale x 2 x i64> %res
110 declare <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32)
111 declare <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
112 declare <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
114 declare <vscale x 16 x i8> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8(<vscale x 16 x i1>)
115 declare <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16(<vscale x 8 x i1>)
116 declare <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32(<vscale x 4 x i1>)
117 declare <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64(<vscale x 2 x i1>)