1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 declare <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
6 declare <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
7 declare <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
9 define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i32> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vsra_vv_nxv1i16:
12 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
13 ; CHECK-NEXT: vnsra.wv v8, v8, v9, v0.t
15 %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
16 %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
17 %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> %m, i32 %evl)
18 ret <vscale x 1 x i16> %vr
22 define <vscale x 1 x i16> @vsra_vv_nxv1i16_unmasked(<vscale x 1 x i32> %a, <vscale x 1 x i16> %b, i32 zeroext %evl) {
23 ; CHECK-LABEL: vsra_vv_nxv1i16_unmasked:
25 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
26 ; CHECK-NEXT: vnsra.wv v8, v8, v9
28 %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
29 %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
30 %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
31 ret <vscale x 1 x i16> %vr
34 declare <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
35 declare <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
36 declare <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
38 define <vscale x 1 x i32> @vsra_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
39 ; CHECK-LABEL: vsra_vv_nxv1i64:
41 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
42 ; CHECK-NEXT: vnsra.wv v8, v8, v9, v0.t
44 %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
45 %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
46 %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> %m, i32 %evl)
47 ret <vscale x 1 x i32> %vr
50 define <vscale x 1 x i32> @vsra_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
51 ; CHECK-LABEL: vsra_vv_nxv1i64_unmasked:
53 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
54 ; CHECK-NEXT: vnsra.wv v8, v8, v9
56 %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
57 %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
58 %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
59 ret <vscale x 1 x i32> %vr