[OptTable] Fix typo VALUE => VALUES (NFCI) (#121523)
[llvm-project.git] / clang / test / CodeGen / RISCV / riscv-xcvalu-c-api.c
blobb4690a5f1c1ca5d180444d34241f7cb86113fc8c
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv32 -target-feature +xcvalu -emit-llvm %s -o - \
3 // RUN: | FileCheck %s
5 #include <stdint.h>
6 #include <riscv_corev_alu.h>
8 // CHECK-LABEL: @test_alu_slet(
9 // CHECK-NEXT: entry:
10 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
11 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
12 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
13 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
14 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
15 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
16 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
17 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
18 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
19 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
20 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
21 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
22 // CHECK-NEXT: [[TMP4:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
23 // CHECK-NEXT: [[SLE_I:%.*]] = zext i1 [[TMP4]] to i32
24 // CHECK-NEXT: ret i32 [[SLE_I]]
26 int test_alu_slet(int32_t a, int32_t b) {
27 return __riscv_cv_alu_slet(a, b);
30 // CHECK-LABEL: @test_alu_sletu(
31 // CHECK-NEXT: entry:
32 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
33 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
34 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
35 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
36 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
37 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
38 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
39 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
40 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
41 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
42 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
43 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
44 // CHECK-NEXT: [[TMP4:%.*]] = icmp ule i32 [[TMP2]], [[TMP3]]
45 // CHECK-NEXT: [[SLEU_I:%.*]] = zext i1 [[TMP4]] to i32
46 // CHECK-NEXT: ret i32 [[SLEU_I]]
48 int test_alu_sletu(uint32_t a, uint32_t b) {
49 return __riscv_cv_alu_sletu(a, b);
52 // CHECK-LABEL: @test_alu_min(
53 // CHECK-NEXT: entry:
54 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
55 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
56 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
57 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
58 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
59 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
60 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
61 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
62 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
63 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
64 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
65 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
66 // CHECK-NEXT: [[ELT_MIN_I:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP2]], i32 [[TMP3]])
67 // CHECK-NEXT: ret i32 [[ELT_MIN_I]]
69 int test_alu_min(int32_t a, int32_t b) {
70 return __riscv_cv_alu_min(a, b);
73 // CHECK-LABEL: @test_alu_minu(
74 // CHECK-NEXT: entry:
75 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
76 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
77 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
78 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
79 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
80 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
81 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
82 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
83 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
84 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
85 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
86 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
87 // CHECK-NEXT: [[ELT_MIN_I:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 [[TMP3]])
88 // CHECK-NEXT: ret i32 [[ELT_MIN_I]]
90 int test_alu_minu(uint32_t a, uint32_t b) {
91 return __riscv_cv_alu_minu(a, b);
94 // CHECK-LABEL: @test_alu_max(
95 // CHECK-NEXT: entry:
96 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
97 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
98 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
99 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
100 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
101 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
102 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
103 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
104 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
105 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
106 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
107 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
108 // CHECK-NEXT: [[ELT_MAX_I:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP2]], i32 [[TMP3]])
109 // CHECK-NEXT: ret i32 [[ELT_MAX_I]]
111 int test_alu_max(int32_t a, int32_t b) {
112 return __riscv_cv_alu_max(a, b);
115 // CHECK-LABEL: @test_alu_maxu(
116 // CHECK-NEXT: entry:
117 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
118 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
119 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
120 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
121 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
122 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
123 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
124 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
125 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
126 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
127 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
128 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
129 // CHECK-NEXT: [[ELT_MAX_I:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP2]], i32 [[TMP3]])
130 // CHECK-NEXT: ret i32 [[ELT_MAX_I]]
132 int test_alu_maxu(uint32_t a, uint32_t b) {
133 return __riscv_cv_alu_maxu(a, b);
136 // CHECK-LABEL: @test_alu_exths(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i16, align 2
139 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2
140 // CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2
141 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2
142 // CHECK-NEXT: store i16 [[TMP0]], ptr [[A_ADDR_I]], align 2
143 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[A_ADDR_I]], align 2
144 // CHECK-NEXT: [[CONV_I:%.*]] = sext i16 [[TMP1]] to i32
145 // CHECK-NEXT: [[EXTHS_I:%.*]] = sext i16 [[TMP1]] to i32
146 // CHECK-NEXT: ret i32 [[EXTHS_I]]
148 int test_alu_exths(int16_t a) {
149 return __riscv_cv_alu_exths(a);
152 // CHECK-LABEL: @test_alu_exthz(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i16, align 2
155 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2
156 // CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2
157 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2
158 // CHECK-NEXT: store i16 [[TMP0]], ptr [[A_ADDR_I]], align 2
159 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[A_ADDR_I]], align 2
160 // CHECK-NEXT: [[CONV_I:%.*]] = zext i16 [[TMP1]] to i32
161 // CHECK-NEXT: [[EXTHZ_I:%.*]] = zext i16 [[TMP1]] to i32
162 // CHECK-NEXT: ret i32 [[EXTHZ_I]]
164 int test_alu_exthz(uint16_t a) {
165 return __riscv_cv_alu_exthz(a);
168 // CHECK-LABEL: @test_alu_extbs(
169 // CHECK-NEXT: entry:
170 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i8, align 1
171 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1
172 // CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1
173 // CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1
174 // CHECK-NEXT: store i8 [[TMP0]], ptr [[A_ADDR_I]], align 1
175 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR_I]], align 1
176 // CHECK-NEXT: [[CONV_I:%.*]] = sext i8 [[TMP1]] to i32
177 // CHECK-NEXT: [[EXTBS_I:%.*]] = sext i8 [[TMP1]] to i32
178 // CHECK-NEXT: ret i32 [[EXTBS_I]]
180 int test_alu_extbs(int8_t a) {
181 return __riscv_cv_alu_extbs(a);
184 // CHECK-LABEL: @test_alu_extbz(
185 // CHECK-NEXT: entry:
186 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i8, align 1
187 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1
188 // CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1
189 // CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1
190 // CHECK-NEXT: store i8 [[TMP0]], ptr [[A_ADDR_I]], align 1
191 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR_I]], align 1
192 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP1]] to i32
193 // CHECK-NEXT: [[EXTBZ_I:%.*]] = zext i8 [[TMP1]] to i32
194 // CHECK-NEXT: ret i32 [[EXTBZ_I]]
196 int test_alu_extbz(uint8_t a) {
197 return __riscv_cv_alu_extbz(a);
200 // CHECK-LABEL: @test_alu_clip(
201 // CHECK-NEXT: entry:
202 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
203 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
204 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
205 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
206 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
207 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
208 // CHECK-NEXT: store i32 0, ptr [[B_ADDR_I]], align 4
209 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
210 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
211 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.cv.alu.clip(i32 [[TMP1]], i32 [[TMP2]])
212 // CHECK-NEXT: ret i32 [[TMP3]]
214 int test_alu_clip(int32_t a) {
215 return __riscv_cv_alu_clip(a, 0);
218 // CHECK-LABEL: @test_alu_clipu(
219 // CHECK-NEXT: entry:
220 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
221 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
222 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
223 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
224 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
225 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
226 // CHECK-NEXT: store i32 0, ptr [[B_ADDR_I]], align 4
227 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
228 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
229 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.cv.alu.clipu(i32 [[TMP1]], i32 [[TMP2]])
230 // CHECK-NEXT: ret i32 [[TMP3]]
232 int test_alu_clipu(uint32_t a) {
233 return __riscv_cv_alu_clipu(a, 0);
236 // CHECK-LABEL: @test_alu_addN(
237 // CHECK-NEXT: entry:
238 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
239 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
240 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
241 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
242 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
243 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
244 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
245 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
246 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
247 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
248 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
249 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
250 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
251 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
252 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
253 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
254 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.addN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
255 // CHECK-NEXT: ret i32 [[TMP5]]
257 int test_alu_addN(int32_t a, int32_t b) {
258 return __riscv_cv_alu_addN(a, b, 0);
261 // CHECK-LABEL: @test_alu_adduN(
262 // CHECK-NEXT: entry:
263 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
264 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
265 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
266 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
267 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
268 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
269 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
270 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
271 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
272 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
273 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
274 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
275 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
276 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
277 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
278 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
279 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.adduN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
280 // CHECK-NEXT: ret i32 [[TMP5]]
282 int test_alu_adduN(uint32_t a, uint32_t b) {
283 return __riscv_cv_alu_adduN(a, b, 0);
286 // CHECK-LABEL: @test_alu_addRN(
287 // CHECK-NEXT: entry:
288 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
289 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
290 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
291 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
292 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
293 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
294 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
295 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
296 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
297 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
298 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
299 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
300 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
301 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
302 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
303 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
304 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.addRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
305 // CHECK-NEXT: ret i32 [[TMP5]]
307 int test_alu_addRN(int32_t a, int32_t b) {
308 return __riscv_cv_alu_addRN(a, b, 0);
311 // CHECK-LABEL: @test_alu_adduRN(
312 // CHECK-NEXT: entry:
313 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
314 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
315 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
316 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
317 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
318 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
319 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
320 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
321 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
322 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
323 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
324 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
325 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
326 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
327 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
328 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
329 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.adduRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
330 // CHECK-NEXT: ret i32 [[TMP5]]
332 int test_alu_adduRN(uint32_t a, uint32_t b) {
333 return __riscv_cv_alu_adduRN(a, b, 0);
336 // CHECK-LABEL: @test_alu_subN(
337 // CHECK-NEXT: entry:
338 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
339 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
340 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
341 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
342 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
343 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
344 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
345 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
346 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
347 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
348 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
349 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
350 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
351 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
352 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
353 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
354 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
355 // CHECK-NEXT: ret i32 [[TMP5]]
357 int test_alu_subN(int32_t a, int32_t b) {
358 return __riscv_cv_alu_subN(a, b, 0);
361 // CHECK-LABEL: @test_alu_subuN(
362 // CHECK-NEXT: entry:
363 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
364 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
365 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
366 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
367 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
368 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
369 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
370 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
371 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
372 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
373 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
374 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
375 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
376 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
377 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
378 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
379 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subuN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
380 // CHECK-NEXT: ret i32 [[TMP5]]
382 int test_alu_subuN(uint32_t a, uint32_t b) {
383 return __riscv_cv_alu_subuN(a, b, 0);
386 // CHECK-LABEL: @test_alu_subRN(
387 // CHECK-NEXT: entry:
388 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
389 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
390 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
391 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
392 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
393 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
394 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
395 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
396 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
397 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
398 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
399 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
400 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
401 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
402 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
403 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
404 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
405 // CHECK-NEXT: ret i32 [[TMP5]]
407 int test_alu_subRN(int32_t a, int32_t b) {
408 return __riscv_cv_alu_subRN(a, b, 0);
411 // CHECK-LABEL: @test_alu_subuRN(
412 // CHECK-NEXT: entry:
413 // CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4
414 // CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4
415 // CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1
416 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
417 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
418 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
419 // CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
420 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
421 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
422 // CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4
423 // CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4
424 // CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1
425 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
426 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
427 // CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1
428 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32
429 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subuRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]])
430 // CHECK-NEXT: ret i32 [[TMP5]]
432 int test_alu_subuRN(uint32_t a, uint32_t b) {
433 return __riscv_cv_alu_subuRN(a, b, 0);