1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple loongarch64 -target-feature +f -O2 -emit-llvm %s -o - | FileCheck %s
4 #include <larchintrin.h>
8 // CHECK-NEXT: tail call void @llvm.loongarch.dbar(i32 0)
9 // CHECK-NEXT: tail call void @llvm.loongarch.dbar(i32 0)
10 // CHECK-NEXT: ret void
14 __builtin_loongarch_dbar(0);
17 // CHECK-LABEL: @ibar(
19 // CHECK-NEXT: tail call void @llvm.loongarch.ibar(i32 0)
20 // CHECK-NEXT: tail call void @llvm.loongarch.ibar(i32 0)
21 // CHECK-NEXT: ret void
25 __builtin_loongarch_ibar(0);
28 // CHECK-LABEL: @loongarch_break(
30 // CHECK-NEXT: tail call void @llvm.loongarch.break(i32 1)
31 // CHECK-NEXT: tail call void @llvm.loongarch.break(i32 1)
32 // CHECK-NEXT: ret void
34 void loongarch_break() {
36 __builtin_loongarch_break(1);
39 // CHECK-LABEL: @syscall(
41 // CHECK-NEXT: tail call void @llvm.loongarch.syscall(i32 1)
42 // CHECK-NEXT: tail call void @llvm.loongarch.syscall(i32 1)
43 // CHECK-NEXT: ret void
47 __builtin_loongarch_syscall(1);
50 // CHECK-LABEL: @csrrd_w(
52 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.csrrd.w(i32 1)
53 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.csrrd.w(i32 1)
54 // CHECK-NEXT: ret i32 0
56 unsigned int csrrd_w() {
57 unsigned int a
= __csrrd_w(1);
58 unsigned int b
= __builtin_loongarch_csrrd_w(1);
62 // CHECK-LABEL: @csrwr_w(
64 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.csrwr.w(i32 [[A:%.*]], i32 1)
65 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.csrwr.w(i32 [[A]], i32 1)
66 // CHECK-NEXT: ret i32 0
68 unsigned int csrwr_w(unsigned int a
) {
69 unsigned int b
= __csrwr_w(a
, 1);
70 unsigned int c
= __builtin_loongarch_csrwr_w(a
, 1);
74 // CHECK-LABEL: @csrxchg_w(
76 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.csrxchg.w(i32 [[A:%.*]], i32 [[B:%.*]], i32 1)
77 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.csrxchg.w(i32 [[A]], i32 [[B]], i32 1)
78 // CHECK-NEXT: ret i32 0
80 unsigned int csrxchg_w(unsigned int a
, unsigned int b
) {
81 unsigned int c
= __csrxchg_w(a
, b
, 1);
82 unsigned int d
= __builtin_loongarch_csrxchg_w(a
, b
, 1);
86 // CHECK-LABEL: @crc_w_b_w(
88 // CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[A:%.*]], 24
89 // CHECK-NEXT: [[CONV_I:%.*]] = ashr exact i32 [[TMP0]], 24
90 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crc.w.b.w(i32 [[CONV_I]], i32 [[B:%.*]])
91 // CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.loongarch.crc.w.b.w(i32 [[A]], i32 [[B]])
92 // CHECK-NEXT: ret i32 0
94 int crc_w_b_w(int a
, int b
) {
95 int c
= __crc_w_b_w(a
, b
);
96 int d
= __builtin_loongarch_crc_w_b_w(a
, b
);
100 // CHECK-LABEL: @crc_w_h_w(
101 // CHECK-NEXT: entry:
102 // CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[A:%.*]], 16
103 // CHECK-NEXT: [[CONV_I:%.*]] = ashr exact i32 [[TMP0]], 16
104 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crc.w.h.w(i32 [[CONV_I]], i32 [[B:%.*]])
105 // CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.loongarch.crc.w.h.w(i32 [[A]], i32 [[B]])
106 // CHECK-NEXT: ret i32 0
108 int crc_w_h_w(int a
, int b
) {
109 int c
= __crc_w_h_w(a
, b
);
110 int d
= __builtin_loongarch_crc_w_h_w(a
, b
);
114 // CHECK-LABEL: @crc_w_w_w(
115 // CHECK-NEXT: entry:
116 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.crc.w.w.w(i32 [[A:%.*]], i32 [[B:%.*]])
117 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crc.w.w.w(i32 [[A]], i32 [[B]])
118 // CHECK-NEXT: ret i32 0
120 int crc_w_w_w(int a
, int b
) {
121 int c
= __crc_w_w_w(a
, b
);
122 int d
= __builtin_loongarch_crc_w_w_w(a
, b
);
126 // CHECK-LABEL: @cacop_d(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: tail call void @llvm.loongarch.cacop.d(i64 1, i64 [[A:%.*]], i64 1024)
129 // CHECK-NEXT: tail call void @llvm.loongarch.cacop.d(i64 1, i64 [[A]], i64 1024)
130 // CHECK-NEXT: ret void
132 void cacop_d(unsigned long int a
) {
133 __cacop_d(1, a
, 1024);
134 __builtin_loongarch_cacop_d(1, a
, 1024);
137 // CHECK-LABEL: @crc_w_d_w(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.crc.w.d.w(i64 [[A:%.*]], i32 [[B:%.*]])
140 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crc.w.d.w(i64 [[A]], i32 [[B]])
141 // CHECK-NEXT: ret i32 0
143 int crc_w_d_w(long int a
, int b
) {
144 int c
= __crc_w_d_w(a
, b
);
145 int d
= __builtin_loongarch_crc_w_d_w(a
, b
);
149 // CHECK-LABEL: @crcc_w_b_w(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[A:%.*]], 24
152 // CHECK-NEXT: [[CONV_I:%.*]] = ashr exact i32 [[TMP0]], 24
153 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crcc.w.b.w(i32 [[CONV_I]], i32 [[B:%.*]])
154 // CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.loongarch.crcc.w.b.w(i32 [[A]], i32 [[B]])
155 // CHECK-NEXT: ret i32 0
157 int crcc_w_b_w(int a
, int b
) {
158 int c
= __crcc_w_b_w(a
, b
);
159 int d
= __builtin_loongarch_crcc_w_b_w(a
, b
);
163 // CHECK-LABEL: @crcc_w_h_w(
164 // CHECK-NEXT: entry:
165 // CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[A:%.*]], 16
166 // CHECK-NEXT: [[CONV_I:%.*]] = ashr exact i32 [[TMP0]], 16
167 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crcc.w.h.w(i32 [[CONV_I]], i32 [[B:%.*]])
168 // CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.loongarch.crcc.w.h.w(i32 [[A]], i32 [[B]])
169 // CHECK-NEXT: ret i32 0
171 int crcc_w_h_w(int a
, int b
) {
172 int c
= __crcc_w_h_w(a
, b
);
173 int d
= __builtin_loongarch_crcc_w_h_w(a
, b
);
177 // CHECK-LABEL: @crcc_w_w_w(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.crcc.w.w.w(i32 [[A:%.*]], i32 [[B:%.*]])
180 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crcc.w.w.w(i32 [[A]], i32 [[B]])
181 // CHECK-NEXT: ret i32 0
183 int crcc_w_w_w(int a
, int b
) {
184 int c
= __crcc_w_w_w(a
, b
);
185 int d
= __builtin_loongarch_crcc_w_w_w(a
, b
);
189 // CHECK-LABEL: @crcc_w_d_w(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.crcc.w.d.w(i64 [[A:%.*]], i32 [[B:%.*]])
192 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.crcc.w.d.w(i64 [[A]], i32 [[B]])
193 // CHECK-NEXT: ret i32 0
195 int crcc_w_d_w(long int a
, int b
) {
196 int c
= __crcc_w_d_w(a
, b
);
197 int d
= __builtin_loongarch_crcc_w_d_w(a
, b
);
201 // CHECK-LABEL: @csrrd_d(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.csrrd.d(i32 1)
204 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.loongarch.csrrd.d(i32 1)
205 // CHECK-NEXT: ret i64 0
207 unsigned long int csrrd_d() {
208 unsigned long int a
= __csrrd_d(1);
209 unsigned long int b
= __builtin_loongarch_csrrd_d(1);
213 // CHECK-LABEL: @csrwr_d(
214 // CHECK-NEXT: entry:
215 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.csrwr.d(i64 [[A:%.*]], i32 1)
216 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.loongarch.csrwr.d(i64 [[A]], i32 1)
217 // CHECK-NEXT: ret i64 0
219 unsigned long int csrwr_d(unsigned long int a
) {
220 unsigned long int b
= __csrwr_d(a
, 1);
221 unsigned long int c
= __builtin_loongarch_csrwr_d(a
, 1);
225 // CHECK-LABEL: @csrxchg_d(
226 // CHECK-NEXT: entry:
227 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.csrxchg.d(i64 [[A:%.*]], i64 [[B:%.*]], i32 1)
228 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.loongarch.csrxchg.d(i64 [[A]], i64 [[B]], i32 1)
229 // CHECK-NEXT: ret i64 0
231 unsigned long int csrxchg_d(unsigned long int a
, unsigned long int b
) {
232 unsigned long int c
= __csrxchg_d(a
, b
, 1);
233 unsigned long int d
= __builtin_loongarch_csrxchg_d(a
, b
, 1);
237 // CHECK-LABEL: @iocsrrd_b(
238 // CHECK-NEXT: entry:
239 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.b(i32 [[A:%.*]])
240 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.b(i32 [[A]])
241 // CHECK-NEXT: ret i8 0
243 unsigned char iocsrrd_b(unsigned int a
) {
244 unsigned char b
= __iocsrrd_b(a
);
245 unsigned char c
= __builtin_loongarch_iocsrrd_b(a
);
249 // CHECK-LABEL: @iocsrrd_h(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.h(i32 [[A:%.*]])
252 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.h(i32 [[A]])
253 // CHECK-NEXT: ret i16 0
255 unsigned short iocsrrd_h(unsigned int a
) {
256 unsigned short b
= __iocsrrd_h(a
);
257 unsigned short c
= __builtin_loongarch_iocsrrd_h(a
);
261 // CHECK-LABEL: @iocsrrd_w(
262 // CHECK-NEXT: entry:
263 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.w(i32 [[A:%.*]])
264 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.w(i32 [[A]])
265 // CHECK-NEXT: ret i32 0
267 unsigned int iocsrrd_w(unsigned int a
) {
268 unsigned int b
= __iocsrrd_w(a
);
269 unsigned int c
= __builtin_loongarch_iocsrrd_w(a
);
273 // CHECK-LABEL: @iocsrwr_b(
274 // CHECK-NEXT: entry:
275 // CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[A:%.*]] to i32
276 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.b(i32 [[CONV_I]], i32 [[B:%.*]])
277 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.b(i32 [[CONV_I]], i32 [[B]])
278 // CHECK-NEXT: ret void
280 void iocsrwr_b(unsigned char a
, unsigned int b
) {
282 __builtin_loongarch_iocsrwr_b(a
, b
);
285 // CHECK-LABEL: @iocsrwr_h(
286 // CHECK-NEXT: entry:
287 // CHECK-NEXT: [[CONV_I:%.*]] = zext i16 [[A:%.*]] to i32
288 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.h(i32 [[CONV_I]], i32 [[B:%.*]])
289 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.h(i32 [[CONV_I]], i32 [[B]])
290 // CHECK-NEXT: ret void
292 void iocsrwr_h(unsigned short a
, unsigned int b
) {
294 __builtin_loongarch_iocsrwr_h(a
, b
);
297 // CHECK-LABEL: @iocsrwr_w(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.w(i32 [[A:%.*]], i32 [[B:%.*]])
300 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.w(i32 [[A]], i32 [[B]])
301 // CHECK-NEXT: ret void
303 void iocsrwr_w(unsigned int a
, unsigned int b
) {
305 __builtin_loongarch_iocsrwr_w(a
, b
);
308 // CHECK-LABEL: @iocsrrd_d(
309 // CHECK-NEXT: entry:
310 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.iocsrrd.d(i32 [[A:%.*]])
311 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.loongarch.iocsrrd.d(i32 [[A]])
312 // CHECK-NEXT: ret i64 0
314 unsigned long int iocsrrd_d(unsigned int a
) {
315 unsigned long int b
= __iocsrrd_d(a
);
316 unsigned long int c
= __builtin_loongarch_iocsrrd_d(a
);
320 // CHECK-LABEL: @iocsrwr_d(
321 // CHECK-NEXT: entry:
322 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.d(i64 [[A:%.*]], i32 [[B:%.*]])
323 // CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.d(i64 [[A]], i32 [[B]])
324 // CHECK-NEXT: ret void
326 void iocsrwr_d(unsigned long int a
, unsigned int b
) {
328 __builtin_loongarch_iocsrwr_d(a
, b
);
331 // CHECK-LABEL: @asrtle_d(
332 // CHECK-NEXT: entry:
333 // CHECK-NEXT: tail call void @llvm.loongarch.asrtle.d(i64 [[A:%.*]], i64 [[B:%.*]])
334 // CHECK-NEXT: tail call void @llvm.loongarch.asrtle.d(i64 [[A]], i64 [[B]])
335 // CHECK-NEXT: ret void
337 void asrtle_d(long int a
, long int b
) {
339 __builtin_loongarch_asrtle_d(a
, b
);
342 // CHECK-LABEL: @asrtgt_d(
343 // CHECK-NEXT: entry:
344 // CHECK-NEXT: tail call void @llvm.loongarch.asrtgt.d(i64 [[A:%.*]], i64 [[B:%.*]])
345 // CHECK-NEXT: tail call void @llvm.loongarch.asrtgt.d(i64 [[A]], i64 [[B]])
346 // CHECK-NEXT: ret void
348 void asrtgt_d(long int a
, long int b
) {
350 __builtin_loongarch_asrtgt_d(a
, b
);
353 // CHECK-LABEL: @lddir_d(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.lddir.d(i64 [[A:%.*]], i64 1)
356 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.loongarch.lddir.d(i64 [[A]], i64 1)
357 // CHECK-NEXT: ret i64 0
359 long int lddir_d(long int a
) {
360 long int b
= __lddir_d(a
, 1);
361 long int c
= __builtin_loongarch_lddir_d(a
, 1);
365 // CHECK-LABEL: @ldpte_d(
366 // CHECK-NEXT: entry:
367 // CHECK-NEXT: tail call void @llvm.loongarch.ldpte.d(i64 [[A:%.*]], i64 1)
368 // CHECK-NEXT: tail call void @llvm.loongarch.ldpte.d(i64 [[A]], i64 1)
369 // CHECK-NEXT: ret void
371 void ldpte_d(long int a
) {
373 __builtin_loongarch_ldpte_d(a
, 1);
376 // CHECK-LABEL: @cpucfg(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.cpucfg(i32 [[A:%.*]])
379 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.cpucfg(i32 [[A]])
380 // CHECK-NEXT: ret i32 0
382 unsigned int cpucfg(unsigned int a
) {
383 unsigned int b
= __cpucfg(a
);
384 unsigned int c
= __builtin_loongarch_cpucfg(a
);
388 // CHECK-LABEL: @rdtime_d(
389 // CHECK-NEXT: entry:
390 // CHECK-NEXT: [[TMP0:%.*]] = tail call { i64, i64 } asm sideeffect "rdtime.d $0, $1\0A\09", "=&r,=&r"() #[[ATTR1:[0-9]+]], !srcloc !2
391 // CHECK-NEXT: ret void
397 // CHECK-LABEL: @rdtime(
398 // CHECK-NEXT: entry:
399 // CHECK-NEXT: [[TMP0:%.*]] = tail call { i32, i32 } asm sideeffect "rdtimeh.w $0, $1\0A\09", "=&r,=&r"() #[[ATTR1]], !srcloc !3
400 // CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i32 } asm sideeffect "rdtimel.w $0, $1\0A\09", "=&r,=&r"() #[[ATTR1]], !srcloc !4
401 // CHECK-NEXT: ret void
408 // CHECK-LABEL: @loongarch_movfcsr2gr(
409 // CHECK-NEXT: entry:
410 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.movfcsr2gr(i32 1)
411 // CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.movfcsr2gr(i32 1)
412 // CHECK-NEXT: ret i32 0
414 int loongarch_movfcsr2gr() {
415 int a
= __movfcsr2gr(1);
416 int b
= __builtin_loongarch_movfcsr2gr(1);
420 // CHECK-LABEL: @loongarch_movgr2fcsr(
421 // CHECK-NEXT: entry:
422 // CHECK-NEXT: tail call void @llvm.loongarch.movgr2fcsr(i32 1, i32 [[A:%.*]])
423 // CHECK-NEXT: tail call void @llvm.loongarch.movgr2fcsr(i32 1, i32 [[A]])
424 // CHECK-NEXT: ret void
426 void loongarch_movgr2fcsr(int a
) {
428 __builtin_loongarch_movgr2fcsr(1, a
);