1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
8 define <vscale x 16 x i8> @ursra_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
9 ; CHECK-LABEL: ursra_i8:
11 ; CHECK-NEXT: ursra z0.b, z1.b, #1
13 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
14 %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, i32 1)
15 %add = add <vscale x 16 x i8> %a, %shift
16 ret <vscale x 16 x i8> %add
19 define <vscale x 8 x i16> @ursra_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
20 ; CHECK-LABEL: ursra_i16:
22 ; CHECK-NEXT: ursra z0.h, z1.h, #2
24 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
25 %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, i32 2)
26 %add = add <vscale x 8 x i16> %a, %shift
27 ret <vscale x 8 x i16> %add
30 define <vscale x 4 x i32> @ursra_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
31 ; CHECK-LABEL: ursra_i32:
33 ; CHECK-NEXT: ursra z0.s, z1.s, #3
35 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
36 %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, i32 3)
37 %add = add <vscale x 4 x i32> %a, %shift
38 ret <vscale x 4 x i32> %add
41 define <vscale x 2 x i64> @ursra_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
42 ; CHECK-LABEL: ursra_i64:
44 ; CHECK-NEXT: ursra z0.d, z1.d, #4
46 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
47 %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, i32 4)
48 %add = add <vscale x 2 x i64> %a, %shift
49 ret <vscale x 2 x i64> %add
54 define <vscale x 16 x i8> @srsra_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
55 ; CHECK-LABEL: srsra_i8:
57 ; CHECK-NEXT: srsra z0.b, z1.b, #1
59 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
60 %shift = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, i32 1)
61 %add = add <vscale x 16 x i8> %a, %shift
62 ret <vscale x 16 x i8> %add
65 define <vscale x 8 x i16> @srsra_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
66 ; CHECK-LABEL: srsra_i16:
68 ; CHECK-NEXT: srsra z0.h, z1.h, #2
70 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
71 %shift = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, i32 2)
72 %add = add <vscale x 8 x i16> %a, %shift
73 ret <vscale x 8 x i16> %add
76 define <vscale x 4 x i32> @srsra_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
77 ; CHECK-LABEL: srsra_i32:
79 ; CHECK-NEXT: srsra z0.s, z1.s, #3
81 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
82 %shift = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, i32 3)
83 %add = add <vscale x 4 x i32> %a, %shift
84 ret <vscale x 4 x i32> %add
87 define <vscale x 2 x i64> @srsra_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
88 ; CHECK-LABEL: srsra_i64:
90 ; CHECK-NEXT: srsra z0.d, z1.d, #4
92 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
93 %shift = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, i32 4)
94 %add = add <vscale x 2 x i64> %a, %shift
95 ret <vscale x 2 x i64> %add
99 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
100 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg)
101 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 immarg)
102 declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 immarg)
104 declare <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i32)
105 declare <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i32)
106 declare <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i32)
107 declare <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i32)
109 declare <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i32)
110 declare <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i32)
111 declare <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i32)
112 declare <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i32)
114 attributes #0 = { "target-features"="+sve,+sve2" }