1 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
7 define void @masked_trunc_store_nxv2i8(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i8> *%b, <vscale x 2 x i1> %mask) nounwind {
8 ; CHECK-LABEL: masked_trunc_store_nxv2i8:
9 ; CHECK-NEXT: st1b { z0.d }, p0, [x1]
11 %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
12 call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i8> *%b, i32 8, <vscale x 2 x i1> %mask)
16 define void @masked_trunc_store_nxv2i16(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i16> *%b, <vscale x 2 x i1> %mask) nounwind {
17 ; CHECK-LABEL: masked_trunc_store_nxv2i16:
18 ; CHECK-NEXT: st1h { z0.d }, p0, [x1]
20 %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
21 call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i16> *%b, i32 8, <vscale x 2 x i1> %mask)
25 define void @masked_trunc_store_nxv2i32(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i32> *%b, <vscale x 2 x i1> %mask) nounwind {
26 ; CHECK-LABEL: masked_trunc_store_nxv2i32:
27 ; CHECK-NEXT: st1w { z0.d }, p0, [x1]
29 %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
30 call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i32> *%b, i32 8, <vscale x 2 x i1> %mask)
34 define void @masked_trunc_store_nxv4i8(<vscale x 4 x i32> *%a, <vscale x 4 x i32> %val, <vscale x 4 x i8> *%b, <vscale x 4 x i1> %mask) nounwind {
35 ; CHECK-LABEL: masked_trunc_store_nxv4i8:
36 ; CHECK-NEXT: st1b { z0.s }, p0, [x1]
38 %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
39 call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i8> *%b, i32 4, <vscale x 4 x i1> %mask)
43 define void @masked_trunc_store_nxv4i16(<vscale x 4 x i32> *%a, <vscale x 4 x i32> %val, <vscale x 4 x i16> *%b, <vscale x 4 x i1> %mask) nounwind {
44 ; CHECK-LABEL: masked_trunc_store_nxv4i16:
45 ; CHECK-NEXT: st1h { z0.s }, p0, [x1]
47 %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
48 call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i16> *%b, i32 4, <vscale x 4 x i1> %mask)
52 define void @masked_trunc_store_nxv8i8(<vscale x 8 x i16> *%a, <vscale x 8 x i16> %val, <vscale x 8 x i8> *%b, <vscale x 8 x i1> %mask) nounwind {
53 ; CHECK-LABEL: masked_trunc_store_nxv8i8:
54 ; CHECK-NEXT: st1b { z0.h }, p0, [x1]
56 %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
57 call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i8> *%b, i32 2, <vscale x 8 x i1> %mask)
61 declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
62 declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
63 declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
64 declare void @llvm.masked.store.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>*, i32, <vscale x 4 x i1>)
65 declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
66 declare void @llvm.masked.store.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32, <vscale x 8 x i1>)