1 ; RUN: opt -S -instcombine < %s | FileCheck %s
3 target triple = "aarch64-unknown-linux-gnu"
5 define <vscale x 16 x i8> @dup_insertelement_0(<vscale x 16 x i8> %v, i8 %s) #0 {
6 ; CHECK-LABEL: @dup_insertelement_0(
7 ; CHECK: %insert = insertelement <vscale x 16 x i8> %v, i8 %s, i64 0
8 ; CHECK-NEXT: ret <vscale x 16 x i8> %insert
9 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 1)
10 %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s)
11 ret <vscale x 16 x i8> %insert
14 define <vscale x 16 x i8> @dup_insertelement_1(<vscale x 16 x i8> %v, i8 %s) #0 {
15 ; CHECK-LABEL: @dup_insertelement_1(
16 ; CHECK: %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 2)
17 ; CHECK-NEXT: %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s)
18 ; CHECK-NEXT: ret <vscale x 16 x i8> %insert
19 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 2)
20 %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s)
21 ret <vscale x 16 x i8> %insert
24 define <vscale x 16 x i8> @dup_insertelement_x(<vscale x 16 x i8> %v, i8 %s, <vscale x 16 x i1> %pg) #0 {
25 ; CHECK-LABEL: @dup_insertelement_x(
26 ; CHECK: %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s)
27 ; CHECK-NEXT: ret <vscale x 16 x i8> %insert
28 %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s)
29 ret <vscale x 16 x i8> %insert
32 define <vscale x 8 x i16> @dup_insertelement_0_convert(<vscale x 8 x i16> %v, i16 %s) #0 {
33 ; CHECK-LABEL: @dup_insertelement_0_convert(
34 ; CHECK: %insert = insertelement <vscale x 8 x i16> %v, i16 %s, i64 0
35 ; CHECK-NEXT: ret <vscale x 8 x i16> %insert
36 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 1)
37 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %pg)
38 %2 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %1)
39 %insert = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %v, <vscale x 8 x i1> %2, i16 %s)
40 ret <vscale x 8 x i16> %insert
43 declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
44 declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
46 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32)
47 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
49 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
50 declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1>)
52 attributes #0 = { "target-features"="+sve" }