1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
4 ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5 define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
6 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32:
8 ; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7]
10 call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1)
14 ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
15 define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
16 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64:
18 ; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7]
20 call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1)
24 ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
25 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
26 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32:
28 ; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6]
30 call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1)
34 ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
35 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
36 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64:
38 ; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6]
40 call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1)
44 ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
45 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
46 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32:
48 ; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12]
50 call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1)
54 ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
55 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
56 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64:
58 ; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12]
60 call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1)
64 ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
65 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
66 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32:
68 ; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16]
70 call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1)
74 ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
75 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
76 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64:
78 ; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16]
80 call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1)
84 declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
85 declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
86 declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
87 declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
88 declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
89 declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)
90 declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop)
91 declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop)