1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv32 -target-feature +xcvalu -emit-llvm %s -o - \
7 // CHECK-LABEL: @test_abs(
9 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
10 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
11 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
12 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[TMP0]], i1 true)
13 // CHECK-NEXT: ret i32 [[TMP1]]
16 return __builtin_abs(a
);
19 // CHECK-LABEL: @test_alu_slet(
21 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
22 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
23 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
24 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
25 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
26 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
27 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle i32 [[TMP0]], [[TMP1]]
28 // CHECK-NEXT: [[SLE:%.*]] = zext i1 [[TMP2]] to i32
29 // CHECK-NEXT: ret i32 [[SLE]]
31 int test_alu_slet(int32_t a
, int32_t b
) {
32 return __builtin_riscv_cv_alu_slet(a
, b
);
35 // CHECK-LABEL: @test_alu_sletu(
37 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
38 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
39 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
40 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
41 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
42 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
43 // CHECK-NEXT: [[TMP2:%.*]] = icmp ule i32 [[TMP0]], [[TMP1]]
44 // CHECK-NEXT: [[SLEU:%.*]] = zext i1 [[TMP2]] to i32
45 // CHECK-NEXT: ret i32 [[SLEU]]
47 int test_alu_sletu(uint32_t a
, uint32_t b
) {
48 return __builtin_riscv_cv_alu_sletu(a
, b
);
51 // CHECK-LABEL: @test_alu_exths(
53 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2
54 // CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2
55 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2
56 // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
57 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i16
58 // CHECK-NEXT: [[EXTHS:%.*]] = sext i16 [[TMP1]] to i32
59 // CHECK-NEXT: ret i32 [[EXTHS]]
61 int test_alu_exths(int16_t a
) {
62 return __builtin_riscv_cv_alu_exths(a
);
65 // CHECK-LABEL: @test_alu_exthz(
67 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2
68 // CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2
69 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2
70 // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP0]] to i32
71 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i16
72 // CHECK-NEXT: [[EXTHZ:%.*]] = zext i16 [[TMP1]] to i32
73 // CHECK-NEXT: ret i32 [[EXTHZ]]
75 int test_alu_exthz(uint16_t a
) {
76 return __builtin_riscv_cv_alu_exthz(a
);
79 // CHECK-LABEL: @test_alu_extbs(
81 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1
82 // CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1
83 // CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1
84 // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
85 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i8
86 // CHECK-NEXT: [[EXTBS:%.*]] = sext i8 [[TMP1]] to i32
87 // CHECK-NEXT: ret i32 [[EXTBS]]
89 int test_alu_extbs(int8_t a
) {
90 return __builtin_riscv_cv_alu_extbs(a
);
93 // CHECK-LABEL: @test_alu_extbz(
95 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1
96 // CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1
97 // CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1
98 // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
99 // CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i8
100 // CHECK-NEXT: [[EXTBZ:%.*]] = zext i8 [[TMP1]] to i32
101 // CHECK-NEXT: ret i32 [[EXTBZ]]
103 int test_alu_extbz(uint8_t a
) {
104 return __builtin_riscv_cv_alu_extbz(a
);
107 // CHECK-LABEL: @test_alu_clip(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
110 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
111 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
112 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.cv.alu.clip(i32 [[TMP0]], i32 15)
113 // CHECK-NEXT: ret i32 [[TMP1]]
115 int test_alu_clip(int32_t a
) {
116 return __builtin_riscv_cv_alu_clip(a
, 15);
119 // CHECK-LABEL: @test_alu_clipu(
120 // CHECK-NEXT: entry:
121 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
122 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
123 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
124 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.cv.alu.clipu(i32 [[TMP0]], i32 15)
125 // CHECK-NEXT: ret i32 [[TMP1]]
127 int test_alu_clipu(uint32_t a
) {
128 return __builtin_riscv_cv_alu_clipu(a
, 15);
131 // CHECK-LABEL: @test_alu_addN(
132 // CHECK-NEXT: entry:
133 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
134 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
135 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
136 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
137 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
138 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
139 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.addN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
140 // CHECK-NEXT: ret i32 [[TMP2]]
142 int test_alu_addN(int32_t a
, int32_t b
) {
143 return __builtin_riscv_cv_alu_addN(a
, b
, 0);
146 // CHECK-LABEL: @test_alu_adduN(
147 // CHECK-NEXT: entry:
148 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
149 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
150 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
151 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
152 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
153 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
154 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.adduN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
155 // CHECK-NEXT: ret i32 [[TMP2]]
157 int test_alu_adduN(uint32_t a
, uint32_t b
) {
158 return __builtin_riscv_cv_alu_adduN(a
, b
, 0);
161 // CHECK-LABEL: @test_alu_addRN(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
164 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
165 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
166 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
167 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
168 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
169 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.addRN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
170 // CHECK-NEXT: ret i32 [[TMP2]]
172 int test_alu_addRN(int32_t a
, int32_t b
) {
173 return __builtin_riscv_cv_alu_addRN(a
, b
, 0);
176 // CHECK-LABEL: @test_alu_adduRN(
177 // CHECK-NEXT: entry:
178 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
179 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
180 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
181 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
182 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
183 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
184 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.adduRN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
185 // CHECK-NEXT: ret i32 [[TMP2]]
187 int test_alu_adduRN(uint32_t a
, uint32_t b
) {
188 return __builtin_riscv_cv_alu_adduRN(a
, b
, 0);
191 // CHECK-LABEL: @test_alu_subN(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
194 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
195 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
196 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
197 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
198 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
199 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
200 // CHECK-NEXT: ret i32 [[TMP2]]
202 int test_alu_subN(int32_t a
, int32_t b
) {
203 return __builtin_riscv_cv_alu_subN(a
, b
, 0);
206 // CHECK-LABEL: @test_alu_subuN(
207 // CHECK-NEXT: entry:
208 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
209 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
210 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
211 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
212 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
213 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
214 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subuN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
215 // CHECK-NEXT: ret i32 [[TMP2]]
217 int test_alu_subuN(uint32_t a
, uint32_t b
) {
218 return __builtin_riscv_cv_alu_subuN(a
, b
, 0);
221 // CHECK-LABEL: @test_alu_subRN(
222 // CHECK-NEXT: entry:
223 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
224 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
225 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
226 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
227 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
228 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
229 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subRN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
230 // CHECK-NEXT: ret i32 [[TMP2]]
232 int test_alu_subRN(int32_t a
, int32_t b
) {
233 return __builtin_riscv_cv_alu_subRN(a
, b
, 0);
236 // CHECK-LABEL: @test_alu_subuRN(
237 // CHECK-NEXT: entry:
238 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
239 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
240 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
241 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
242 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
243 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
244 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subuRN(i32 [[TMP0]], i32 [[TMP1]], i32 0)
245 // CHECK-NEXT: ret i32 [[TMP2]]
247 int test_alu_subuRN(uint32_t a
, uint32_t b
) {
248 return __builtin_riscv_cv_alu_subuRN(a
, b
, 0);