Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGenOpenCL / builtins-amdgcn.cl
blob0bc9a54682d3e31ee3a8fb0dd1cecca2eacdfd42
1 // REQUIRES: amdgpu-registered-target
2 // RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -target-cpu tahiti -S -emit-llvm -o - %s | FileCheck -enable-var-scope %s
4 #pragma OPENCL EXTENSION cl_khr_fp64 : enable
6 typedef unsigned long ulong;
7 typedef unsigned int uint;
8 typedef unsigned short ushort;
9 typedef half __attribute__((ext_vector_type(2))) half2;
10 typedef short __attribute__((ext_vector_type(2))) short2;
11 typedef ushort __attribute__((ext_vector_type(2))) ushort2;
12 typedef uint __attribute__((ext_vector_type(4))) uint4;
14 // CHECK-LABEL: @test_div_scale_f64
15 // CHECK: call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true)
16 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { double, i1 } %{{.+}}, 1
17 // CHECK-DAG: [[VAL:%.+]] = extractvalue { double, i1 } %{{.+}}, 0
18 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i32
19 // CHECK: store i32 [[FLAGEXT]]
20 void test_div_scale_f64(global double* out, global int* flagout, double a, double b)
22 bool flag;
23 *out = __builtin_amdgcn_div_scale(a, b, true, &flag);
24 *flagout = flag;
27 // CHECK-LABEL: @test_div_scale_f32(
28 // CHECK: call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
29 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { float, i1 } %{{.+}}, 1
30 // CHECK-DAG: [[VAL:%.+]] = extractvalue { float, i1 } %{{.+}}, 0
31 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i8
32 // CHECK: store i8 [[FLAGEXT]]
33 void test_div_scale_f32(global float* out, global bool* flagout, float a, float b)
35 bool flag;
36 *out = __builtin_amdgcn_div_scalef(a, b, true, &flag);
37 *flagout = flag;
40 // CHECK-LABEL: @test_div_scale_f32_global_ptr(
41 // CHECK: call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
42 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { float, i1 } %{{.+}}, 1
43 // CHECK-DAG: [[VAL:%.+]] = extractvalue { float, i1 } %{{.+}}, 0
44 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i8
45 // CHECK: store i8 [[FLAGEXT]]
46 void test_div_scale_f32_global_ptr(global float* out, global int* flagout, float a, float b, global bool* flag)
48 *out = __builtin_amdgcn_div_scalef(a, b, true, flag);
51 // CHECK-LABEL: @test_div_scale_f32_generic_ptr(
52 // CHECK: call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
53 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { float, i1 } %{{.+}}, 1
54 // CHECK-DAG: [[VAL:%.+]] = extractvalue { float, i1 } %{{.+}}, 0
55 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i8
56 // CHECK: store i8 [[FLAGEXT]]
57 void test_div_scale_f32_generic_ptr(global float* out, global int* flagout, float a, float b, global bool* flag_arg)
59 generic bool* flag = flag_arg;
60 *out = __builtin_amdgcn_div_scalef(a, b, true, flag);
63 // CHECK-LABEL: @test_div_fmas_f32
64 // CHECK: call float @llvm.amdgcn.div.fmas.f32
65 void test_div_fmas_f32(global float* out, float a, float b, float c, int d)
67 *out = __builtin_amdgcn_div_fmasf(a, b, c, d);
70 // CHECK-LABEL: @test_div_fmas_f64
71 // CHECK: call double @llvm.amdgcn.div.fmas.f64
72 void test_div_fmas_f64(global double* out, double a, double b, double c, int d)
74 *out = __builtin_amdgcn_div_fmas(a, b, c, d);
77 // CHECK-LABEL: @test_div_fixup_f32
78 // CHECK: call float @llvm.amdgcn.div.fixup.f32
79 void test_div_fixup_f32(global float* out, float a, float b, float c)
81 *out = __builtin_amdgcn_div_fixupf(a, b, c);
84 // CHECK-LABEL: @test_div_fixup_f64
85 // CHECK: call double @llvm.amdgcn.div.fixup.f64
86 void test_div_fixup_f64(global double* out, double a, double b, double c)
88 *out = __builtin_amdgcn_div_fixup(a, b, c);
91 // CHECK-LABEL: @test_trig_preop_f32
92 // CHECK: call float @llvm.amdgcn.trig.preop.f32
93 void test_trig_preop_f32(global float* out, float a, int b)
95 *out = __builtin_amdgcn_trig_preopf(a, b);
98 // CHECK-LABEL: @test_trig_preop_f64
99 // CHECK: call double @llvm.amdgcn.trig.preop.f64
100 void test_trig_preop_f64(global double* out, double a, int b)
102 *out = __builtin_amdgcn_trig_preop(a, b);
105 // CHECK-LABEL: @test_rcp_f32
106 // CHECK: call float @llvm.amdgcn.rcp.f32
107 void test_rcp_f32(global float* out, float a)
109 *out = __builtin_amdgcn_rcpf(a);
112 // CHECK-LABEL: @test_rcp_f64
113 // CHECK: call double @llvm.amdgcn.rcp.f64
114 void test_rcp_f64(global double* out, double a)
116 *out = __builtin_amdgcn_rcp(a);
119 // CHECK-LABEL: @test_sqrt_f32
120 // CHECK: call float @llvm.amdgcn.sqrt.f32
121 void test_sqrt_f32(global float* out, float a)
123 *out = __builtin_amdgcn_sqrtf(a);
126 // CHECK-LABEL: @test_sqrt_f64
127 // CHECK: call double @llvm.amdgcn.sqrt.f64
128 void test_sqrt_f64(global double* out, double a)
130 *out = __builtin_amdgcn_sqrt(a);
133 // CHECK-LABEL: @test_rsq_f32
134 // CHECK: call float @llvm.amdgcn.rsq.f32
135 void test_rsq_f32(global float* out, float a)
137 *out = __builtin_amdgcn_rsqf(a);
140 // CHECK-LABEL: @test_rsq_f64
141 // CHECK: call double @llvm.amdgcn.rsq.f64
142 void test_rsq_f64(global double* out, double a)
144 *out = __builtin_amdgcn_rsq(a);
147 // CHECK-LABEL: @test_rsq_clamp_f32
148 // CHECK: call float @llvm.amdgcn.rsq.clamp.f32
149 void test_rsq_clamp_f32(global float* out, float a)
151 *out = __builtin_amdgcn_rsq_clampf(a);
154 // CHECK-LABEL: @test_rsq_clamp_f64
155 // CHECK: call double @llvm.amdgcn.rsq.clamp.f64
156 void test_rsq_clamp_f64(global double* out, double a)
158 *out = __builtin_amdgcn_rsq_clamp(a);
161 // CHECK-LABEL: @test_sin_f32
162 // CHECK: call float @llvm.amdgcn.sin.f32
163 void test_sin_f32(global float* out, float a)
165 *out = __builtin_amdgcn_sinf(a);
168 // CHECK-LABEL: @test_cos_f32
169 // CHECK: call float @llvm.amdgcn.cos.f32
170 void test_cos_f32(global float* out, float a)
172 *out = __builtin_amdgcn_cosf(a);
175 // CHECK-LABEL: @test_log_f32
176 // CHECK: call float @llvm.amdgcn.log.f32
177 void test_log_f32(global float* out, float a)
179 *out = __builtin_amdgcn_logf(a);
182 // CHECK-LABEL: @test_exp2_f32
183 // CHECK: call float @llvm.amdgcn.exp2.f32
184 void test_exp2_f32(global float* out, float a)
186 *out = __builtin_amdgcn_exp2f(a);
189 // CHECK-LABEL: @test_log_clamp_f32
190 // CHECK: call float @llvm.amdgcn.log.clamp.f32
191 void test_log_clamp_f32(global float* out, float a)
193 *out = __builtin_amdgcn_log_clampf(a);
196 // CHECK-LABEL: @test_ldexp_f32
197 // CHECK: call float @llvm.ldexp.f32.i32
198 void test_ldexp_f32(global float* out, float a, int b)
200 *out = __builtin_amdgcn_ldexpf(a, b);
203 // CHECK-LABEL: @test_ldexp_f64
204 // CHECK: call double @llvm.ldexp.f64.i32
205 void test_ldexp_f64(global double* out, double a, int b)
207 *out = __builtin_amdgcn_ldexp(a, b);
210 // CHECK-LABEL: @test_frexp_mant_f32
211 // CHECK: call float @llvm.amdgcn.frexp.mant.f32
212 void test_frexp_mant_f32(global float* out, float a)
214 *out = __builtin_amdgcn_frexp_mantf(a);
217 // CHECK-LABEL: @test_frexp_mant_f64
218 // CHECK: call double @llvm.amdgcn.frexp.mant.f64
219 void test_frexp_mant_f64(global double* out, double a)
221 *out = __builtin_amdgcn_frexp_mant(a);
224 // CHECK-LABEL: @test_frexp_exp_f32
225 // CHECK: call i32 @llvm.amdgcn.frexp.exp.i32.f32
226 void test_frexp_exp_f32(global int* out, float a)
228 *out = __builtin_amdgcn_frexp_expf(a);
231 // CHECK-LABEL: @test_frexp_exp_f64
232 // CHECK: call i32 @llvm.amdgcn.frexp.exp.i32.f64
233 void test_frexp_exp_f64(global int* out, double a)
235 *out = __builtin_amdgcn_frexp_exp(a);
238 // CHECK-LABEL: @test_fract_f32
239 // CHECK: call float @llvm.amdgcn.fract.f32
240 void test_fract_f32(global int* out, float a)
242 *out = __builtin_amdgcn_fractf(a);
245 // CHECK-LABEL: @test_fract_f64
246 // CHECK: call double @llvm.amdgcn.fract.f64
247 void test_fract_f64(global int* out, double a)
249 *out = __builtin_amdgcn_fract(a);
252 // CHECK-LABEL: @test_lerp
253 // CHECK: call i32 @llvm.amdgcn.lerp
254 void test_lerp(global int* out, int a, int b, int c)
256 *out = __builtin_amdgcn_lerp(a, b, c);
259 // CHECK-LABEL: @test_sicmp_i32
260 // CHECK: call i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 32)
261 void test_sicmp_i32(global ulong* out, int a, int b)
263 *out = __builtin_amdgcn_sicmp(a, b, 32);
266 // CHECK-LABEL: @test_uicmp_i32
267 // CHECK: call i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 32)
268 void test_uicmp_i32(global ulong* out, uint a, uint b)
270 *out = __builtin_amdgcn_uicmp(a, b, 32);
273 // CHECK-LABEL: @test_sicmp_i64
274 // CHECK: call i64 @llvm.amdgcn.icmp.i64.i64(i64 %a, i64 %b, i32 38)
275 void test_sicmp_i64(global ulong* out, long a, long b)
277 *out = __builtin_amdgcn_sicmpl(a, b, 39-1);
280 // CHECK-LABEL: @test_uicmp_i64
281 // CHECK: call i64 @llvm.amdgcn.icmp.i64.i64(i64 %a, i64 %b, i32 35)
282 void test_uicmp_i64(global ulong* out, ulong a, ulong b)
284 *out = __builtin_amdgcn_uicmpl(a, b, 30+5);
287 // CHECK-LABEL: @test_ds_swizzle
288 // CHECK: call i32 @llvm.amdgcn.ds.swizzle(i32 %a, i32 32)
289 void test_ds_swizzle(global int* out, int a)
291 *out = __builtin_amdgcn_ds_swizzle(a, 32);
294 // CHECK-LABEL: @test_ds_permute
295 // CHECK: call i32 @llvm.amdgcn.ds.permute(i32 %a, i32 %b)
296 void test_ds_permute(global int* out, int a, int b)
298 out[0] = __builtin_amdgcn_ds_permute(a, b);
301 // CHECK-LABEL: @test_ds_bpermute
302 // CHECK: call i32 @llvm.amdgcn.ds.bpermute(i32 %a, i32 %b)
303 void test_ds_bpermute(global int* out, int a, int b)
305 *out = __builtin_amdgcn_ds_bpermute(a, b);
308 // CHECK-LABEL: @test_readfirstlane
309 // CHECK: call i32 @llvm.amdgcn.readfirstlane(i32 %a)
310 void test_readfirstlane(global int* out, int a)
312 *out = __builtin_amdgcn_readfirstlane(a);
315 // CHECK-LABEL: @test_readlane
316 // CHECK: call i32 @llvm.amdgcn.readlane(i32 %a, i32 %b)
317 void test_readlane(global int* out, int a, int b)
319 *out = __builtin_amdgcn_readlane(a, b);
322 // CHECK-LABEL: @test_fcmp_f32
323 // CHECK: call i64 @llvm.amdgcn.fcmp.i64.f32(float %a, float %b, i32 5)
324 void test_fcmp_f32(global ulong* out, float a, float b)
326 *out = __builtin_amdgcn_fcmpf(a, b, 5);
329 // CHECK-LABEL: @test_fcmp_f64
330 // CHECK: call i64 @llvm.amdgcn.fcmp.i64.f64(double %a, double %b, i32 6)
331 void test_fcmp_f64(global ulong* out, double a, double b)
333 *out = __builtin_amdgcn_fcmp(a, b, 3+3);
336 // CHECK-LABEL: @test_class_f32
337 // CHECK: call i1 @llvm.amdgcn.class.f32
338 void test_class_f32(global float* out, float a, int b)
340 *out = __builtin_amdgcn_classf(a, b);
343 // CHECK-LABEL: @test_class_f64
344 // CHECK: call i1 @llvm.amdgcn.class.f64
345 void test_class_f64(global double* out, double a, int b)
347 *out = __builtin_amdgcn_class(a, b);
350 // CHECK-LABEL: @test_buffer_wbinvl1
351 // CHECK: call void @llvm.amdgcn.buffer.wbinvl1(
352 void test_buffer_wbinvl1()
354 __builtin_amdgcn_buffer_wbinvl1();
357 // CHECK-LABEL: @test_s_dcache_inv
358 // CHECK: call void @llvm.amdgcn.s.dcache.inv(
359 void test_s_dcache_inv()
361 __builtin_amdgcn_s_dcache_inv();
364 // CHECK-LABEL: @test_s_waitcnt
365 // CHECK: call void @llvm.amdgcn.s.waitcnt(
366 void test_s_waitcnt()
368 __builtin_amdgcn_s_waitcnt(0);
371 // CHECK-LABEL: @test_s_sendmsg
372 // CHECK: call void @llvm.amdgcn.s.sendmsg(
373 void test_s_sendmsg()
375 __builtin_amdgcn_s_sendmsg(1, 0);
378 // CHECK-LABEL: @test_s_sendmsg_var
379 // CHECK: call void @llvm.amdgcn.s.sendmsg(
380 void test_s_sendmsg_var(int in)
382 __builtin_amdgcn_s_sendmsg(1, in);
385 // CHECK-LABEL: @test_s_sendmsghalt
386 // CHECK: call void @llvm.amdgcn.s.sendmsghalt(
387 void test_s_sendmsghalt()
389 __builtin_amdgcn_s_sendmsghalt(1, 0);
392 // CHECK-LABEL: @test_s_sendmsghalt
393 // CHECK: call void @llvm.amdgcn.s.sendmsghalt(
394 void test_s_sendmsghalt_var(int in)
396 __builtin_amdgcn_s_sendmsghalt(1, in);
399 // CHECK-LABEL: @test_s_barrier
400 // CHECK: call void @llvm.amdgcn.s.barrier(
401 void test_s_barrier()
403 __builtin_amdgcn_s_barrier();
406 // CHECK-LABEL: @test_wave_barrier
407 // CHECK: call void @llvm.amdgcn.wave.barrier(
408 void test_wave_barrier()
410 __builtin_amdgcn_wave_barrier();
413 // CHECK-LABEL: @test_sched_barrier
414 // CHECK: call void @llvm.amdgcn.sched.barrier(i32 0)
415 // CHECK: call void @llvm.amdgcn.sched.barrier(i32 1)
416 // CHECK: call void @llvm.amdgcn.sched.barrier(i32 4)
417 // CHECK: call void @llvm.amdgcn.sched.barrier(i32 15)
418 void test_sched_barrier()
420 __builtin_amdgcn_sched_barrier(0);
421 __builtin_amdgcn_sched_barrier(1);
422 __builtin_amdgcn_sched_barrier(4);
423 __builtin_amdgcn_sched_barrier(15);
426 // CHECK-LABEL: @test_sched_group_barrier
427 // CHECK: call void @llvm.amdgcn.sched.group.barrier(i32 0, i32 1, i32 2)
428 // CHECK: call void @llvm.amdgcn.sched.group.barrier(i32 1, i32 2, i32 4)
429 // CHECK: call void @llvm.amdgcn.sched.group.barrier(i32 4, i32 8, i32 16)
430 // CHECK: call void @llvm.amdgcn.sched.group.barrier(i32 15, i32 10000, i32 -1)
431 void test_sched_group_barrier()
433 __builtin_amdgcn_sched_group_barrier(0, 1, 2);
434 __builtin_amdgcn_sched_group_barrier(1, 2, 4);
435 __builtin_amdgcn_sched_group_barrier(4, 8, 16);
436 __builtin_amdgcn_sched_group_barrier(15, 10000, -1);
439 // CHECK-LABEL: @test_iglp_opt
440 // CHECK: call void @llvm.amdgcn.iglp.opt(i32 0)
441 // CHECK: call void @llvm.amdgcn.iglp.opt(i32 1)
442 // CHECK: call void @llvm.amdgcn.iglp.opt(i32 4)
443 // CHECK: call void @llvm.amdgcn.iglp.opt(i32 15)
444 void test_iglp_opt()
446 __builtin_amdgcn_iglp_opt(0);
447 __builtin_amdgcn_iglp_opt(1);
448 __builtin_amdgcn_iglp_opt(4);
449 __builtin_amdgcn_iglp_opt(15);
452 // CHECK-LABEL: @test_s_sleep
453 // CHECK: call void @llvm.amdgcn.s.sleep(i32 1)
454 // CHECK: call void @llvm.amdgcn.s.sleep(i32 15)
455 void test_s_sleep()
457 __builtin_amdgcn_s_sleep(1);
458 __builtin_amdgcn_s_sleep(15);
461 // CHECK-LABEL: @test_s_incperflevel
462 // CHECK: call void @llvm.amdgcn.s.incperflevel(i32 1)
463 // CHECK: call void @llvm.amdgcn.s.incperflevel(i32 15)
464 void test_s_incperflevel()
466 __builtin_amdgcn_s_incperflevel(1);
467 __builtin_amdgcn_s_incperflevel(15);
470 // CHECK-LABEL: @test_s_decperflevel
471 // CHECK: call void @llvm.amdgcn.s.decperflevel(i32 1)
472 // CHECK: call void @llvm.amdgcn.s.decperflevel(i32 15)
473 void test_s_decperflevel()
475 __builtin_amdgcn_s_decperflevel(1);
476 __builtin_amdgcn_s_decperflevel(15);
479 // CHECK-LABEL: @test_s_setprio
480 // CHECK: call void @llvm.amdgcn.s.setprio(i16 0)
481 // CHECK: call void @llvm.amdgcn.s.setprio(i16 3)
482 void test_s_setprio()
484 __builtin_amdgcn_s_setprio(0);
485 __builtin_amdgcn_s_setprio(3);
488 // CHECK-LABEL: @test_cubeid(
489 // CHECK: call float @llvm.amdgcn.cubeid(float %a, float %b, float %c)
490 void test_cubeid(global float* out, float a, float b, float c) {
491 *out = __builtin_amdgcn_cubeid(a, b, c);
494 // CHECK-LABEL: @test_cubesc(
495 // CHECK: call float @llvm.amdgcn.cubesc(float %a, float %b, float %c)
496 void test_cubesc(global float* out, float a, float b, float c) {
497 *out = __builtin_amdgcn_cubesc(a, b, c);
500 // CHECK-LABEL: @test_cubetc(
501 // CHECK: call float @llvm.amdgcn.cubetc(float %a, float %b, float %c)
502 void test_cubetc(global float* out, float a, float b, float c) {
503 *out = __builtin_amdgcn_cubetc(a, b, c);
506 // CHECK-LABEL: @test_cubema(
507 // CHECK: call float @llvm.amdgcn.cubema(float %a, float %b, float %c)
508 void test_cubema(global float* out, float a, float b, float c) {
509 *out = __builtin_amdgcn_cubema(a, b, c);
512 // CHECK-LABEL: @test_read_exec(
513 // CHECK: call i64 @llvm.amdgcn.ballot.i64(i1 true)
514 void test_read_exec(global ulong* out) {
515 *out = __builtin_amdgcn_read_exec();
518 // CHECK: declare i64 @llvm.amdgcn.ballot.i64(i1) #[[$NOUNWIND_READONLY:[0-9]+]]
520 // CHECK-LABEL: @test_read_exec_lo(
521 // CHECK: call i32 @llvm.amdgcn.ballot.i32(i1 true)
522 void test_read_exec_lo(global uint* out) {
523 *out = __builtin_amdgcn_read_exec_lo();
526 // CHECK: declare i32 @llvm.amdgcn.ballot.i32(i1) #[[$NOUNWIND_READONLY:[0-9]+]]
528 // CHECK-LABEL: @test_read_exec_hi(
529 // CHECK: call i64 @llvm.amdgcn.ballot.i64(i1 true)
530 // CHECK: lshr i64 [[A:%.*]], 32
531 // CHECK: trunc i64 [[B:%.*]] to i32
532 void test_read_exec_hi(global uint* out) {
533 *out = __builtin_amdgcn_read_exec_hi();
536 // CHECK-LABEL: @test_dispatch_ptr
537 // CHECK: call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
538 void test_dispatch_ptr(__constant unsigned char ** out)
540 *out = __builtin_amdgcn_dispatch_ptr();
543 // CHECK-LABEL: @test_queue_ptr
544 // CHECK: call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
545 void test_queue_ptr(__constant unsigned char ** out)
547 *out = __builtin_amdgcn_queue_ptr();
550 // CHECK-LABEL: @test_kernarg_segment_ptr
551 // CHECK: call ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
552 void test_kernarg_segment_ptr(__constant unsigned char ** out)
554 *out = __builtin_amdgcn_kernarg_segment_ptr();
557 // CHECK-LABEL: @test_implicitarg_ptr
558 // CHECK: call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
559 void test_implicitarg_ptr(__constant unsigned char ** out)
561 *out = __builtin_amdgcn_implicitarg_ptr();
564 // CHECK-LABEL: @test_get_group_id(
565 // CHECK: tail call i32 @llvm.amdgcn.workgroup.id.x()
566 // CHECK: tail call i32 @llvm.amdgcn.workgroup.id.y()
567 // CHECK: tail call i32 @llvm.amdgcn.workgroup.id.z()
568 void test_get_group_id(int d, global int *out)
570 switch (d) {
571 case 0: *out = __builtin_amdgcn_workgroup_id_x(); break;
572 case 1: *out = __builtin_amdgcn_workgroup_id_y(); break;
573 case 2: *out = __builtin_amdgcn_workgroup_id_z(); break;
574 default: *out = 0;
578 // CHECK-LABEL: @test_s_getreg(
579 // CHECK: tail call i32 @llvm.amdgcn.s.getreg(i32 0)
580 // CHECK: tail call i32 @llvm.amdgcn.s.getreg(i32 1)
581 // CHECK: tail call i32 @llvm.amdgcn.s.getreg(i32 65535)
582 void test_s_getreg(volatile global uint *out)
584 *out = __builtin_amdgcn_s_getreg(0);
585 *out = __builtin_amdgcn_s_getreg(1);
586 *out = __builtin_amdgcn_s_getreg(65535);
589 // CHECK-LABEL: @test_get_local_id(
590 // CHECK: tail call i32 @llvm.amdgcn.workitem.id.x(), !range [[$WI_RANGE:![0-9]*]], !noundef
591 // CHECK: tail call i32 @llvm.amdgcn.workitem.id.y(), !range [[$WI_RANGE]], !noundef
592 // CHECK: tail call i32 @llvm.amdgcn.workitem.id.z(), !range [[$WI_RANGE]], !noundef
593 void test_get_local_id(int d, global int *out)
595 switch (d) {
596 case 0: *out = __builtin_amdgcn_workitem_id_x(); break;
597 case 1: *out = __builtin_amdgcn_workitem_id_y(); break;
598 case 2: *out = __builtin_amdgcn_workitem_id_z(); break;
599 default: *out = 0;
603 // CHECK-LABEL: @test_get_workgroup_size(
604 // CHECK: call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
605 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 4
606 // CHECK: load i16, ptr addrspace(4) %{{.*}}, align 4, !range [[$WS_RANGE:![0-9]*]], !invariant.load{{.*}}, !noundef
607 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 6
608 // CHECK: load i16, ptr addrspace(4) %{{.*}}, align 2, !range [[$WS_RANGE:![0-9]*]], !invariant.load{{.*}}, !noundef
609 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 8
610 // CHECK: load i16, ptr addrspace(4) %{{.*}}, align 4, !range [[$WS_RANGE:![0-9]*]], !invariant.load{{.*}}, !noundef
611 void test_get_workgroup_size(int d, global int *out)
613 switch (d) {
614 case 0: *out = __builtin_amdgcn_workgroup_size_x() + 1; break;
615 case 1: *out = __builtin_amdgcn_workgroup_size_y(); break;
616 case 2: *out = __builtin_amdgcn_workgroup_size_z(); break;
617 default: *out = 0;
621 // CHECK-LABEL: @test_get_grid_size(
622 // CHECK: call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
623 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 12
624 // CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !invariant.load
625 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 16
626 // CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !invariant.load
627 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 20
628 // CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !invariant.load
629 void test_get_grid_size(int d, global int *out)
631 switch (d) {
632 case 0: *out = __builtin_amdgcn_grid_size_x(); break;
633 case 1: *out = __builtin_amdgcn_grid_size_y(); break;
634 case 2: *out = __builtin_amdgcn_grid_size_z(); break;
635 default: *out = 0;
639 // CHECK-LABEL: @test_fmed3_f32
640 // CHECK: call float @llvm.amdgcn.fmed3.f32(
641 void test_fmed3_f32(global float* out, float a, float b, float c)
643 *out = __builtin_amdgcn_fmed3f(a, b, c);
646 // CHECK-LABEL: @test_s_getpc
647 // CHECK: call i64 @llvm.amdgcn.s.getpc()
648 void test_s_getpc(global ulong* out)
650 *out = __builtin_amdgcn_s_getpc();
653 // CHECK-LABEL: @test_ds_append_lds(
654 // CHECK: call i32 @llvm.amdgcn.ds.append.p3(ptr addrspace(3) %ptr, i1 false)
655 kernel void test_ds_append_lds(global int* out, local int* ptr) {
656 *out = __builtin_amdgcn_ds_append(ptr);
659 // CHECK-LABEL: @test_ds_consume_lds(
660 // CHECK: call i32 @llvm.amdgcn.ds.consume.p3(ptr addrspace(3) %ptr, i1 false)
661 kernel void test_ds_consume_lds(global int* out, local int* ptr) {
662 *out = __builtin_amdgcn_ds_consume(ptr);
665 // CHECK-LABEL: @test_gws_init(
666 // CHECK: call void @llvm.amdgcn.ds.gws.init(i32 %value, i32 %id)
667 kernel void test_gws_init(uint value, uint id) {
668 __builtin_amdgcn_ds_gws_init(value, id);
671 // CHECK-LABEL: @test_gws_barrier(
672 // CHECK: call void @llvm.amdgcn.ds.gws.barrier(i32 %value, i32 %id)
673 kernel void test_gws_barrier(uint value, uint id) {
674 __builtin_amdgcn_ds_gws_barrier(value, id);
677 // CHECK-LABEL: @test_gws_sema_v(
678 // CHECK: call void @llvm.amdgcn.ds.gws.sema.v(i32 %id)
679 kernel void test_gws_sema_v(uint id) {
680 __builtin_amdgcn_ds_gws_sema_v(id);
683 // CHECK-LABEL: @test_gws_sema_br(
684 // CHECK: call void @llvm.amdgcn.ds.gws.sema.br(i32 %value, i32 %id)
685 kernel void test_gws_sema_br(uint value, uint id) {
686 __builtin_amdgcn_ds_gws_sema_br(value, id);
689 // CHECK-LABEL: @test_gws_sema_p(
690 // CHECK: call void @llvm.amdgcn.ds.gws.sema.p(i32 %id)
691 kernel void test_gws_sema_p(uint id) {
692 __builtin_amdgcn_ds_gws_sema_p(id);
695 // CHECK-LABEL: @test_mbcnt_lo(
696 // CHECK: call i32 @llvm.amdgcn.mbcnt.lo(i32 %src0, i32 %src1)
697 kernel void test_mbcnt_lo(global uint* out, uint src0, uint src1) {
698 *out = __builtin_amdgcn_mbcnt_lo(src0, src1);
701 // CHECK-LABEL: @test_mbcnt_hi(
702 // CHECK: call i32 @llvm.amdgcn.mbcnt.hi(i32 %src0, i32 %src1)
703 kernel void test_mbcnt_hi(global uint* out, uint src0, uint src1) {
704 *out = __builtin_amdgcn_mbcnt_hi(src0, src1);
707 // CHECK-LABEL: @test_alignbit(
708 // CHECK: tail call i32 @llvm.fshr.i32(i32 %src0, i32 %src1, i32 %src2)
709 kernel void test_alignbit(global uint* out, uint src0, uint src1, uint src2) {
710 *out = __builtin_amdgcn_alignbit(src0, src1, src2);
713 // CHECK-LABEL: @test_alignbyte(
714 // CHECK: tail call i32 @llvm.amdgcn.alignbyte(i32 %src0, i32 %src1, i32 %src2)
715 kernel void test_alignbyte(global uint* out, uint src0, uint src1, uint src2) {
716 *out = __builtin_amdgcn_alignbyte(src0, src1, src2);
719 // CHECK-LABEL: @test_ubfe(
720 // CHECK: tail call i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 %src1, i32 %src2)
721 kernel void test_ubfe(global uint* out, uint src0, uint src1, uint src2) {
722 *out = __builtin_amdgcn_ubfe(src0, src1, src2);
725 // CHECK-LABEL: @test_sbfe(
726 // CHECK: tail call i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 %src1, i32 %src2)
727 kernel void test_sbfe(global uint* out, uint src0, uint src1, uint src2) {
728 *out = __builtin_amdgcn_sbfe(src0, src1, src2);
731 // CHECK-LABEL: @test_cvt_pkrtz(
732 // CHECK: tail call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %src0, float %src1)
733 kernel void test_cvt_pkrtz(global half2* out, float src0, float src1) {
734 *out = __builtin_amdgcn_cvt_pkrtz(src0, src1);
737 // CHECK-LABEL: @test_cvt_pknorm_i16(
738 // CHECK: tail call <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %src0, float %src1)
739 kernel void test_cvt_pknorm_i16(global short2* out, float src0, float src1) {
740 *out = __builtin_amdgcn_cvt_pknorm_i16(src0, src1);
743 // CHECK-LABEL: @test_cvt_pknorm_u16(
744 // CHECK: tail call <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %src0, float %src1)
745 kernel void test_cvt_pknorm_u16(global ushort2* out, float src0, float src1) {
746 *out = __builtin_amdgcn_cvt_pknorm_u16(src0, src1);
749 // CHECK-LABEL: @test_cvt_pk_i16(
750 // CHECK: tail call <2 x i16> @llvm.amdgcn.cvt.pk.i16(i32 %src0, i32 %src1)
751 kernel void test_cvt_pk_i16(global short2* out, int src0, int src1) {
752 *out = __builtin_amdgcn_cvt_pk_i16(src0, src1);
755 // CHECK-LABEL: @test_cvt_pk_u16(
756 // CHECK: tail call <2 x i16> @llvm.amdgcn.cvt.pk.u16(i32 %src0, i32 %src1)
757 kernel void test_cvt_pk_u16(global ushort2* out, uint src0, uint src1) {
758 *out = __builtin_amdgcn_cvt_pk_u16(src0, src1);
761 // CHECK-LABEL: @test_cvt_pk_u8_f32
762 // CHECK: tail call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src0, i32 %src1, i32 %src2)
763 kernel void test_cvt_pk_u8_f32(global uint* out, float src0, uint src1, uint src2) {
764 *out = __builtin_amdgcn_cvt_pk_u8_f32(src0, src1, src2);
767 // CHECK-LABEL: @test_sad_u8(
768 // CHECK: tail call i32 @llvm.amdgcn.sad.u8(i32 %src0, i32 %src1, i32 %src2)
769 kernel void test_sad_u8(global uint* out, uint src0, uint src1, uint src2) {
770 *out = __builtin_amdgcn_sad_u8(src0, src1, src2);
773 // CHECK-LABEL: test_msad_u8(
774 // CHECK: call i32 @llvm.amdgcn.msad.u8(i32 %src0, i32 %src1, i32 %src2)
775 kernel void test_msad_u8(global uint* out, uint src0, uint src1, uint src2) {
776 *out = __builtin_amdgcn_msad_u8(src0, src1, src2);
779 // CHECK-LABEL: test_sad_hi_u8(
780 // CHECK: call i32 @llvm.amdgcn.sad.hi.u8(i32 %src0, i32 %src1, i32 %src2)
781 kernel void test_sad_hi_u8(global uint* out, uint src0, uint src1, uint src2) {
782 *out = __builtin_amdgcn_sad_hi_u8(src0, src1, src2);
785 // CHECK-LABEL: @test_sad_u16(
786 // CHECK: call i32 @llvm.amdgcn.sad.u16(i32 %src0, i32 %src1, i32 %src2)
787 kernel void test_sad_u16(global uint* out, uint src0, uint src1, uint src2) {
788 *out = __builtin_amdgcn_sad_u16(src0, src1, src2);
791 // CHECK-LABEL: @test_qsad_pk_u16_u8(
792 // CHECK: call i64 @llvm.amdgcn.qsad.pk.u16.u8(i64 %src0, i32 %src1, i64 %src2)
793 kernel void test_qsad_pk_u16_u8(global ulong* out, ulong src0, uint src1, ulong src2) {
794 *out = __builtin_amdgcn_qsad_pk_u16_u8(src0, src1, src2);
797 // CHECK-LABEL: @test_mqsad_pk_u16_u8(
798 // CHECK: call i64 @llvm.amdgcn.mqsad.pk.u16.u8(i64 %src0, i32 %src1, i64 %src2)
799 kernel void test_mqsad_pk_u16_u8(global ulong* out, ulong src0, uint src1, ulong src2) {
800 *out = __builtin_amdgcn_mqsad_pk_u16_u8(src0, src1, src2);
803 // CHECK-LABEL: test_mqsad_u32_u8(
804 // CHECK: call <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src0, i32 %src1, <4 x i32> %src2)
805 kernel void test_mqsad_u32_u8(global uint4* out, ulong src0, uint src1, uint4 src2) {
806 *out = __builtin_amdgcn_mqsad_u32_u8(src0, src1, src2);
809 // CHECK-LABEL: test_s_setreg(
810 // CHECK: call void @llvm.amdgcn.s.setreg(i32 8193, i32 %val)
811 kernel void test_s_setreg(uint val) {
812 __builtin_amdgcn_s_setreg(8193, val);
815 // CHECK-LABEL test_atomic_inc_dec(
816 void test_atomic_inc_dec(local uint *lptr, global uint *gptr, uint val) {
817 uint res;
819 // CHECK: atomicrmw uinc_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
820 res = __builtin_amdgcn_atomic_inc32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
822 // CHECK: atomicrmw udec_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
823 res = __builtin_amdgcn_atomic_dec32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
825 // CHECK: atomicrmw uinc_wrap ptr addrspace(1) %gptr, i32 %val syncscope("agent") seq_cst, align 4
826 res = __builtin_amdgcn_atomic_inc32(gptr, val, __ATOMIC_SEQ_CST, "agent");
828 // CHECK: atomicrmw udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4
829 res = __builtin_amdgcn_atomic_dec32(gptr, val, __ATOMIC_SEQ_CST, "");
831 // CHECK: atomicrmw volatile udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4
832 res = __builtin_amdgcn_atomic_dec32((volatile global uint*)gptr, val, __ATOMIC_SEQ_CST, "");
835 // CHECK-DAG: [[$WI_RANGE]] = !{i32 0, i32 1024}
836 // CHECK-DAG: [[$WS_RANGE]] = !{i16 1, i16 1025}
837 // CHECK-DAG: attributes #[[$NOUNWIND_READONLY]] = { convergent mustprogress nocallback nofree nounwind willreturn memory(none) }