1 ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
2 ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s
4 define void @sadd.sat() {
5 ; CHECK-LABEL: 'sadd.sat'
6 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
7 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
8 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
9 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
10 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
11 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
12 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
13 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
14 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
15 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %10 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
16 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
17 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
18 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
19 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
20 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
21 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
22 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
23 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
24 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %19 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
25 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
26 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
27 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
28 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
29 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
30 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
31 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
32 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
33 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %28 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
34 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
35 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
36 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
37 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
38 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
39 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
40 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
41 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
43 call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
44 call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
45 call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
46 call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
47 call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
48 call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
49 call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
50 call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
51 call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
52 call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
53 call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
54 call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
55 call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
56 call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
57 call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
58 call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
59 call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
60 call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
61 call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
62 call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
63 call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
64 call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
65 call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
66 call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
67 call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
68 call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
69 call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
70 call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
71 call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
72 call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
73 call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
74 call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
75 call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
76 call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
77 call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
81 define void @uadd.sat() {
82 ; CHECK-LABEL: 'uadd.sat'
83 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %1 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
84 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
85 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
86 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
87 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
88 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
89 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
90 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
91 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
92 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %10 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
93 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
94 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
95 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
96 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
97 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
98 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
99 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
100 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
101 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %19 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
102 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
103 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
104 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
105 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
106 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
107 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
108 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
109 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
110 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %28 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
111 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
112 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
113 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
114 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
115 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
116 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
117 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
118 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
120 call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
121 call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
122 call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
123 call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
124 call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
125 call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
126 call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
127 call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
128 call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
129 call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
130 call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
131 call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
132 call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
133 call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
134 call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
135 call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
136 call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
137 call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
138 call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
139 call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
140 call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
141 call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
142 call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
143 call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
144 call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
145 call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
146 call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
147 call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
148 call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
149 call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
150 call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
151 call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
152 call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
153 call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
154 call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
158 define void @usub.sat() {
159 ; CHECK-LABEL: 'usub.sat'
160 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %1 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
161 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
162 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
163 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
164 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
165 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
166 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
167 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
168 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
169 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %10 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
170 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
171 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
172 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
173 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
174 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
175 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
176 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
177 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
178 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %19 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
179 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
180 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
181 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
182 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
183 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
184 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
185 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
186 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
187 ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %28 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
188 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
189 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
190 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
191 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
192 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
193 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
194 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
195 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
197 call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
198 call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
199 call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
200 call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
201 call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
202 call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
203 call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
204 call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
205 call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
206 call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
207 call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
208 call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
209 call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
210 call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
211 call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
212 call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
213 call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
214 call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
215 call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
216 call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
217 call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
218 call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
219 call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
220 call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
221 call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
222 call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
223 call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
224 call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
225 call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
226 call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
227 call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
228 call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
229 call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
230 call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
231 call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
235 define void @ssub.sat() {
236 ; CHECK-LABEL: 'ssub.sat'
237 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
238 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
239 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
240 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
241 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
242 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
243 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
244 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
245 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
246 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %10 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
247 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
248 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
249 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
250 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
251 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
252 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
253 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
254 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
255 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %19 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
256 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
257 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
258 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
259 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
260 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
261 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
262 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
263 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
264 ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %28 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
265 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
266 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
267 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
268 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
269 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
270 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
271 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
272 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
274 call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
275 call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
276 call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
277 call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
278 call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
279 call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
280 call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
281 call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
282 call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
283 call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
284 call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
285 call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
286 call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
287 call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
288 call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
289 call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
290 call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
291 call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
292 call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
293 call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
294 call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
295 call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
296 call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
297 call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
298 call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
299 call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
300 call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
301 call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
302 call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
303 call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
304 call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
305 call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
306 call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
307 call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
308 call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
312 define void @ushl.sat() {
313 ; CHECK-LABEL: 'ushl.sat'
314 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.ushl.sat.i8(i8 undef, i8 undef)
315 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
316 ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
317 ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
318 ; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
319 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
320 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
321 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
322 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
323 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.ushl.sat.i16(i16 undef, i16 undef)
324 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
325 ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
326 ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
327 ; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %14 = call <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
328 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
329 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
330 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
331 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
332 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.ushl.sat.i32(i32 undef, i32 undef)
333 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
334 ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
335 ; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %22 = call <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
336 ; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %23 = call <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
337 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
338 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
339 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
340 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
341 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.ushl.sat.i64(i64 undef, i64 undef)
342 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
343 ; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %30 = call <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
344 ; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %31 = call <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
345 ; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %32 = call <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
346 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
347 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
348 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
349 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
351 call i8 @llvm.ushl.sat.i8(i8 undef, i8 undef)
352 call <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
353 call <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
354 call <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
355 call <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
356 call <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
357 call <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
358 call <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
359 call <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
360 call i16 @llvm.ushl.sat.i16(i16 undef, i16 undef)
361 call <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
362 call <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
363 call <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
364 call <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
365 call <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
366 call <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
367 call <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
368 call <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
369 call i32 @llvm.ushl.sat.i32(i32 undef, i32 undef)
370 call <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
371 call <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
372 call <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
373 call <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
374 call <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
375 call <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
376 call <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
377 call <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
378 call i64 @llvm.ushl.sat.i64(i64 undef, i64 undef)
379 call <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
380 call <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
381 call <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
382 call <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
383 call <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
384 call <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
385 call <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
389 define void @sshl.sat() {
390 ; CHECK-LABEL: 'sshl.sat'
391 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.sshl.sat.i8(i8 undef, i8 undef)
392 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
393 ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
394 ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
395 ; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
396 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
397 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
398 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
399 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
400 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.sshl.sat.i16(i16 undef, i16 undef)
401 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
402 ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
403 ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
404 ; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %14 = call <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
405 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
406 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
407 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
408 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
409 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.sshl.sat.i32(i32 undef, i32 undef)
410 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
411 ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
412 ; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %22 = call <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
413 ; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %23 = call <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
414 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
415 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
416 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
417 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
418 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.sshl.sat.i64(i64 undef, i64 undef)
419 ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
420 ; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %30 = call <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
421 ; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %31 = call <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
422 ; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %32 = call <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
423 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
424 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
425 ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
426 ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
428 call i8 @llvm.sshl.sat.i8(i8 undef, i8 undef)
429 call <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
430 call <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
431 call <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
432 call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
433 call <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
434 call <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
435 call <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
436 call <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
437 call i16 @llvm.sshl.sat.i16(i16 undef, i16 undef)
438 call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
439 call <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
440 call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
441 call <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
442 call <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
443 call <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
444 call <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
445 call <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
446 call i32 @llvm.sshl.sat.i32(i32 undef, i32 undef)
447 call <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
448 call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
449 call <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
450 call <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
451 call <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
452 call <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
453 call <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
454 call <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
455 call i64 @llvm.sshl.sat.i64(i64 undef, i64 undef)
456 call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
457 call <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
458 call <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
459 call <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
460 call <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
461 call <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
462 call <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
466 declare i8 @llvm.sadd.sat.i8(i8, i8)
467 declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
468 declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>)
469 declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>)
470 declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
471 declare <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
472 declare <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
473 declare <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
474 declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
475 declare i16 @llvm.sadd.sat.i16(i16, i16)
476 declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>)
477 declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>)
478 declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
479 declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
480 declare <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
481 declare <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
482 declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
483 declare <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
484 declare i32 @llvm.sadd.sat.i32(i32, i32)
485 declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>)
486 declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
487 declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
488 declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>)
489 declare <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
490 declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
491 declare <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
492 declare <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
493 declare i64 @llvm.sadd.sat.i64(i64, i64)
494 declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
495 declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
496 declare <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>)
497 declare <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64>, <16 x i64>)
498 declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
499 declare <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
500 declare <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
502 declare i8 @llvm.uadd.sat.i8(i8, i8)
503 declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
504 declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)
505 declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>)
506 declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
507 declare <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
508 declare <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
509 declare <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
510 declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
511 declare i16 @llvm.uadd.sat.i16(i16, i16)
512 declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
513 declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>)
514 declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
515 declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
516 declare <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
517 declare <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
518 declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
519 declare <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
520 declare i32 @llvm.uadd.sat.i32(i32, i32)
521 declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
522 declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
523 declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
524 declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>)
525 declare <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
526 declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
527 declare <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
528 declare <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
529 declare i64 @llvm.uadd.sat.i64(i64, i64)
530 declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
531 declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
532 declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>)
533 declare <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64>, <16 x i64>)
534 declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
535 declare <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
536 declare <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
538 declare i8 @llvm.usub.sat.i8(i8, i8)
539 declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
540 declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>)
541 declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>)
542 declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
543 declare <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
544 declare <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
545 declare <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
546 declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
547 declare i16 @llvm.usub.sat.i16(i16, i16)
548 declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>)
549 declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>)
550 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
551 declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
552 declare <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
553 declare <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
554 declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
555 declare <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
556 declare i32 @llvm.usub.sat.i32(i32, i32)
557 declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>)
558 declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
559 declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
560 declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>)
561 declare <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
562 declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
563 declare <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
564 declare <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
565 declare i64 @llvm.usub.sat.i64(i64, i64)
566 declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
567 declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
568 declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>)
569 declare <16 x i64> @llvm.usub.sat.v16i64(<16 x i64>, <16 x i64>)
570 declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
571 declare <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
572 declare <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
574 declare i8 @llvm.ssub.sat.i8(i8, i8)
575 declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
576 declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)
577 declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>)
578 declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
579 declare <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
580 declare <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
581 declare <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
582 declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
583 declare i16 @llvm.ssub.sat.i16(i16, i16)
584 declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>)
585 declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>)
586 declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
587 declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
588 declare <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
589 declare <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
590 declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
591 declare <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
592 declare i32 @llvm.ssub.sat.i32(i32, i32)
593 declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
594 declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
595 declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
596 declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>)
597 declare <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
598 declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
599 declare <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
600 declare <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
601 declare i64 @llvm.ssub.sat.i64(i64, i64)
602 declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
603 declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
604 declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>)
605 declare <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64>, <16 x i64>)
606 declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
607 declare <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
608 declare <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
610 declare i8 @llvm.ushl.sat.i8(i8, i8)
611 declare <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8>, <2 x i8>)
612 declare <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8>, <4 x i8>)
613 declare <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8>, <8 x i8>)
614 declare <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8>, <16 x i8>)
615 declare <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
616 declare <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
617 declare <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
618 declare <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
619 declare i16 @llvm.ushl.sat.i16(i16, i16)
620 declare <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16>, <2 x i16>)
621 declare <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16>, <4 x i16>)
622 declare <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16>, <8 x i16>)
623 declare <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16>, <16 x i16>)
624 declare <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
625 declare <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
626 declare <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
627 declare <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
628 declare i32 @llvm.ushl.sat.i32(i32, i32)
629 declare <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32>, <2 x i32>)
630 declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>)
631 declare <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32>, <8 x i32>)
632 declare <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32>, <16 x i32>)
633 declare <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
634 declare <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
635 declare <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
636 declare <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
637 declare i64 @llvm.ushl.sat.i64(i64, i64)
638 declare <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64>, <2 x i64>)
639 declare <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64>, <4 x i64>)
640 declare <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64>, <8 x i64>)
641 declare <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64>, <16 x i64>)
642 declare <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
643 declare <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
644 declare <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
646 declare i8 @llvm.sshl.sat.i8(i8, i8)
647 declare <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8>, <2 x i8>)
648 declare <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8>, <4 x i8>)
649 declare <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8>, <8 x i8>)
650 declare <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8>, <16 x i8>)
651 declare <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
652 declare <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
653 declare <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
654 declare <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
655 declare i16 @llvm.sshl.sat.i16(i16, i16)
656 declare <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16>, <2 x i16>)
657 declare <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16>, <4 x i16>)
658 declare <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16>, <8 x i16>)
659 declare <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16>, <16 x i16>)
660 declare <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
661 declare <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
662 declare <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
663 declare <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
664 declare i32 @llvm.sshl.sat.i32(i32, i32)
665 declare <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32>, <2 x i32>)
666 declare <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32>, <4 x i32>)
667 declare <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32>, <8 x i32>)
668 declare <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32>, <16 x i32>)
669 declare <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
670 declare <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
671 declare <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
672 declare <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
673 declare i64 @llvm.sshl.sat.i64(i64, i64)
674 declare <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64>, <2 x i64>)
675 declare <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64>, <4 x i64>)
676 declare <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64>, <8 x i64>)
677 declare <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64>, <16 x i64>)
678 declare <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
679 declare <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
680 declare <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)