[flang][cuda] Adapt ExternalNameConversion to work in gpu module (#117039)
[llvm-project.git] / clang / test / CodeGenOpenCL / builtins-amdgcn.cl
blob3bc6107b7fd40d22890acdff32a464b00e63ee0a
1 // REQUIRES: amdgpu-registered-target
2 // RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -target-cpu tahiti -emit-llvm -o - %s | FileCheck -enable-var-scope --check-prefixes=CHECK-AMDGCN,CHECK %s
3 // RUN: %clang_cc1 -cl-std=CL2.0 -triple spirv64-amd-amdhsa -emit-llvm -o - %s | FileCheck -enable-var-scope --check-prefix=CHECK %s
6 #pragma OPENCL EXTENSION cl_khr_fp64 : enable
8 typedef unsigned long ulong;
9 typedef unsigned int uint;
10 typedef unsigned short ushort;
11 typedef half __attribute__((ext_vector_type(2))) half2;
12 typedef short __attribute__((ext_vector_type(2))) short2;
13 typedef ushort __attribute__((ext_vector_type(2))) ushort2;
14 typedef uint __attribute__((ext_vector_type(4))) uint4;
16 // CHECK-LABEL: @test_div_scale_f64
17 // CHECK: {{.*}}call{{.*}} { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true)
18 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { double, i1 } %{{.+}}, 1
19 // CHECK-DAG: [[VAL:%.+]] = extractvalue { double, i1 } %{{.+}}, 0
20 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i32
21 // CHECK: store i32 [[FLAGEXT]]
22 void test_div_scale_f64(global double* out, global int* flagout, double a, double b)
24 bool flag;
25 *out = __builtin_amdgcn_div_scale(a, b, true, &flag);
26 *flagout = flag;
29 // CHECK-LABEL: @test_div_scale_f32(
30 // CHECK: {{.*}}call{{.*}} { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
31 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { float, i1 } %{{.+}}, 1
32 // CHECK-DAG: [[VAL:%.+]] = extractvalue { float, i1 } %{{.+}}, 0
33 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i8
34 // CHECK: store i8 [[FLAGEXT]]
35 void test_div_scale_f32(global float* out, global bool* flagout, float a, float b)
37 bool flag;
38 *out = __builtin_amdgcn_div_scalef(a, b, true, &flag);
39 *flagout = flag;
42 // CHECK-LABEL: @test_div_scale_f32_global_ptr(
43 // CHECK: {{.*}}call{{.*}} { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
44 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { float, i1 } %{{.+}}, 1
45 // CHECK-DAG: [[VAL:%.+]] = extractvalue { float, i1 } %{{.+}}, 0
46 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i8
47 // CHECK: store i8 [[FLAGEXT]]
48 void test_div_scale_f32_global_ptr(global float* out, global int* flagout, float a, float b, global bool* flag)
50 *out = __builtin_amdgcn_div_scalef(a, b, true, flag);
53 // CHECK-LABEL: @test_div_scale_f32_generic_ptr(
54 // CHECK: {{.*}}call{{.*}} { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
55 // CHECK-DAG: [[FLAG:%.+]] = extractvalue { float, i1 } %{{.+}}, 1
56 // CHECK-DAG: [[VAL:%.+]] = extractvalue { float, i1 } %{{.+}}, 0
57 // CHECK: [[FLAGEXT:%.+]] = zext i1 [[FLAG]] to i8
58 // CHECK: store i8 [[FLAGEXT]]
59 void test_div_scale_f32_generic_ptr(global float* out, global int* flagout, float a, float b, global bool* flag_arg)
61 generic bool* flag = flag_arg;
62 *out = __builtin_amdgcn_div_scalef(a, b, true, flag);
65 // CHECK-LABEL: @test_div_fmas_f32
66 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.div.fmas.f32
67 void test_div_fmas_f32(global float* out, float a, float b, float c, int d)
69 *out = __builtin_amdgcn_div_fmasf(a, b, c, d);
72 // CHECK-LABEL: @test_div_fmas_f64
73 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.div.fmas.f64
74 void test_div_fmas_f64(global double* out, double a, double b, double c, int d)
76 *out = __builtin_amdgcn_div_fmas(a, b, c, d);
79 // CHECK-LABEL: @test_div_fixup_f32
80 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.div.fixup.f32
81 void test_div_fixup_f32(global float* out, float a, float b, float c)
83 *out = __builtin_amdgcn_div_fixupf(a, b, c);
86 // CHECK-LABEL: @test_div_fixup_f64
87 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.div.fixup.f64
88 void test_div_fixup_f64(global double* out, double a, double b, double c)
90 *out = __builtin_amdgcn_div_fixup(a, b, c);
93 // CHECK-LABEL: @test_trig_preop_f32
94 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.trig.preop.f32
95 void test_trig_preop_f32(global float* out, float a, int b)
97 *out = __builtin_amdgcn_trig_preopf(a, b);
100 // CHECK-LABEL: @test_trig_preop_f64
101 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.trig.preop.f64
102 void test_trig_preop_f64(global double* out, double a, int b)
104 *out = __builtin_amdgcn_trig_preop(a, b);
107 // CHECK-LABEL: @test_rcp_f32
108 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.rcp.f32
109 void test_rcp_f32(global float* out, float a)
111 *out = __builtin_amdgcn_rcpf(a);
114 // CHECK-LABEL: @test_rcp_f64
115 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.rcp.f64
116 void test_rcp_f64(global double* out, double a)
118 *out = __builtin_amdgcn_rcp(a);
121 // CHECK-LABEL: @test_sqrt_f32
122 // CHECK: {{.*}}call{{.*}} float @llvm.{{((amdgcn.){0,1})}}sqrt.f32
123 void test_sqrt_f32(global float* out, float a)
125 *out = __builtin_amdgcn_sqrtf(a);
128 // CHECK-LABEL: @test_sqrt_f64
129 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.sqrt.f64
130 void test_sqrt_f64(global double* out, double a)
132 *out = __builtin_amdgcn_sqrt(a);
135 // CHECK-LABEL: @test_rsq_f32
136 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.rsq.f32
137 void test_rsq_f32(global float* out, float a)
139 *out = __builtin_amdgcn_rsqf(a);
142 // CHECK-LABEL: @test_rsq_f64
143 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.rsq.f64
144 void test_rsq_f64(global double* out, double a)
146 *out = __builtin_amdgcn_rsq(a);
149 // CHECK-LABEL: @test_rsq_clamp_f32
150 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.rsq.clamp.f32
151 void test_rsq_clamp_f32(global float* out, float a)
153 *out = __builtin_amdgcn_rsq_clampf(a);
156 // CHECK-LABEL: @test_rsq_clamp_f64
157 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.rsq.clamp.f64
158 void test_rsq_clamp_f64(global double* out, double a)
160 *out = __builtin_amdgcn_rsq_clamp(a);
163 // CHECK-LABEL: @test_sin_f32
164 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.sin.f32
165 void test_sin_f32(global float* out, float a)
167 *out = __builtin_amdgcn_sinf(a);
170 // CHECK-LABEL: @test_cos_f32
171 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.cos.f32
172 void test_cos_f32(global float* out, float a)
174 *out = __builtin_amdgcn_cosf(a);
177 // CHECK-LABEL: @test_log_f32
178 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.log.f32
179 void test_log_f32(global float* out, float a)
181 *out = __builtin_amdgcn_logf(a);
184 // CHECK-LABEL: @test_exp2_f32
185 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.exp2.f32
186 void test_exp2_f32(global float* out, float a)
188 *out = __builtin_amdgcn_exp2f(a);
191 // CHECK-LABEL: @test_log_clamp_f32
192 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.log.clamp.f32
193 void test_log_clamp_f32(global float* out, float a)
195 *out = __builtin_amdgcn_log_clampf(a);
198 // CHECK-LABEL: @test_ldexp_f32
199 // CHECK: {{.*}}call{{.*}} float @llvm.ldexp.f32.i32
200 void test_ldexp_f32(global float* out, float a, int b)
202 *out = __builtin_amdgcn_ldexpf(a, b);
205 // CHECK-LABEL: @test_ldexp_f64
206 // CHECK: {{.*}}call{{.*}} double @llvm.ldexp.f64.i32
207 void test_ldexp_f64(global double* out, double a, int b)
209 *out = __builtin_amdgcn_ldexp(a, b);
212 // CHECK-LABEL: @test_frexp_mant_f32
213 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.frexp.mant.f32
214 void test_frexp_mant_f32(global float* out, float a)
216 *out = __builtin_amdgcn_frexp_mantf(a);
219 // CHECK-LABEL: @test_frexp_mant_f64
220 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.frexp.mant.f64
221 void test_frexp_mant_f64(global double* out, double a)
223 *out = __builtin_amdgcn_frexp_mant(a);
226 // CHECK-LABEL: @test_frexp_exp_f32
227 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.frexp.exp.i32.f32
228 void test_frexp_exp_f32(global int* out, float a)
230 *out = __builtin_amdgcn_frexp_expf(a);
233 // CHECK-LABEL: @test_frexp_exp_f64
234 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.frexp.exp.i32.f64
235 void test_frexp_exp_f64(global int* out, double a)
237 *out = __builtin_amdgcn_frexp_exp(a);
240 // CHECK-LABEL: @test_fract_f32
241 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.fract.f32
242 void test_fract_f32(global int* out, float a)
244 *out = __builtin_amdgcn_fractf(a);
247 // CHECK-LABEL: @test_fract_f64
248 // CHECK: {{.*}}call{{.*}} double @llvm.amdgcn.fract.f64
249 void test_fract_f64(global int* out, double a)
251 *out = __builtin_amdgcn_fract(a);
254 // CHECK-LABEL: @test_lerp
255 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.lerp
256 void test_lerp(global int* out, int a, int b, int c)
258 *out = __builtin_amdgcn_lerp(a, b, c);
261 // CHECK-LABEL: @test_sicmp_i32
262 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 32)
263 void test_sicmp_i32(global ulong* out, int a, int b)
265 *out = __builtin_amdgcn_sicmp(a, b, 32);
268 // CHECK-LABEL: @test_uicmp_i32
269 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 32)
270 void test_uicmp_i32(global ulong* out, uint a, uint b)
272 *out = __builtin_amdgcn_uicmp(a, b, 32);
275 // CHECK-LABEL: @test_sicmp_i64
276 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.icmp.i64.i64(i64 %a, i64 %b, i32 38)
277 void test_sicmp_i64(global ulong* out, long a, long b)
279 *out = __builtin_amdgcn_sicmpl(a, b, 39-1);
282 // CHECK-LABEL: @test_uicmp_i64
283 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.icmp.i64.i64(i64 %a, i64 %b, i32 35)
284 void test_uicmp_i64(global ulong* out, ulong a, ulong b)
286 *out = __builtin_amdgcn_uicmpl(a, b, 30+5);
289 // CHECK-LABEL: @test_ds_swizzle
290 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.ds.swizzle(i32 %a, i32 32)
291 void test_ds_swizzle(global int* out, int a)
293 *out = __builtin_amdgcn_ds_swizzle(a, 32);
296 // CHECK-LABEL: @test_ds_permute
297 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.ds.permute(i32 %a, i32 %b)
298 void test_ds_permute(global int* out, int a, int b)
300 out[0] = __builtin_amdgcn_ds_permute(a, b);
303 // CHECK-LABEL: @test_ds_bpermute
304 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.ds.bpermute(i32 %a, i32 %b)
305 void test_ds_bpermute(global int* out, int a, int b)
307 *out = __builtin_amdgcn_ds_bpermute(a, b);
310 // CHECK-LABEL: @test_readfirstlane
311 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.readfirstlane.i32(i32 %a)
312 void test_readfirstlane(global int* out, int a)
314 *out = __builtin_amdgcn_readfirstlane(a);
317 // CHECK-LABEL: @test_readlane
318 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.readlane.i32(i32 %a, i32 %b)
319 void test_readlane(global int* out, int a, int b)
321 *out = __builtin_amdgcn_readlane(a, b);
324 // CHECK-LABEL: @test_fcmp_f32
325 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.fcmp.i64.f32(float %a, float %b, i32 5)
326 void test_fcmp_f32(global ulong* out, float a, float b)
328 *out = __builtin_amdgcn_fcmpf(a, b, 5);
331 // CHECK-LABEL: @test_fcmp_f64
332 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.fcmp.i64.f64(double %a, double %b, i32 6)
333 void test_fcmp_f64(global ulong* out, double a, double b)
335 *out = __builtin_amdgcn_fcmp(a, b, 3+3);
338 // CHECK-LABEL: @test_class_f32
339 // CHECK: {{.*}}call{{.*}} i1 @llvm.amdgcn.class.f32
340 void test_class_f32(global float* out, float a, int b)
342 *out = __builtin_amdgcn_classf(a, b);
345 // CHECK-LABEL: @test_class_f64
346 // CHECK: {{.*}}call{{.*}} i1 @llvm.amdgcn.class.f64
347 void test_class_f64(global double* out, double a, int b)
349 *out = __builtin_amdgcn_class(a, b);
352 // CHECK-LABEL: @test_buffer_wbinvl1
353 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.buffer.wbinvl1(
354 void test_buffer_wbinvl1()
356 __builtin_amdgcn_buffer_wbinvl1();
359 // CHECK-LABEL: @test_s_dcache_inv
360 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.dcache.inv(
361 void test_s_dcache_inv()
363 __builtin_amdgcn_s_dcache_inv();
366 // CHECK-LABEL: @test_s_waitcnt
367 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.waitcnt(
368 void test_s_waitcnt()
370 __builtin_amdgcn_s_waitcnt(0);
373 // CHECK-LABEL: @test_s_sendmsg
374 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.sendmsg(
375 void test_s_sendmsg()
377 __builtin_amdgcn_s_sendmsg(1, 0);
380 // CHECK-LABEL: @test_s_sendmsg_var
381 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.sendmsg(
382 void test_s_sendmsg_var(int in)
384 __builtin_amdgcn_s_sendmsg(1, in);
387 // CHECK-LABEL: @test_s_sendmsghalt
388 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.sendmsghalt(
389 void test_s_sendmsghalt()
391 __builtin_amdgcn_s_sendmsghalt(1, 0);
394 // CHECK-LABEL: @test_s_sendmsghalt
395 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.sendmsghalt(
396 void test_s_sendmsghalt_var(int in)
398 __builtin_amdgcn_s_sendmsghalt(1, in);
401 // CHECK-LABEL: @test_s_barrier
402 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.barrier(
403 void test_s_barrier()
405 __builtin_amdgcn_s_barrier();
408 // CHECK-LABEL: @test_wave_barrier
409 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.wave.barrier(
410 void test_wave_barrier()
412 __builtin_amdgcn_wave_barrier();
415 // CHECK-LABEL: @test_sched_barrier
416 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.barrier(i32 0)
417 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.barrier(i32 1)
418 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.barrier(i32 4)
419 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.barrier(i32 15)
420 void test_sched_barrier()
422 __builtin_amdgcn_sched_barrier(0);
423 __builtin_amdgcn_sched_barrier(1);
424 __builtin_amdgcn_sched_barrier(4);
425 __builtin_amdgcn_sched_barrier(15);
428 // CHECK-LABEL: @test_sched_group_barrier
429 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.group.barrier(i32 0, i32 1, i32 2)
430 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.group.barrier(i32 1, i32 2, i32 4)
431 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.group.barrier(i32 4, i32 8, i32 16)
432 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.sched.group.barrier(i32 15, i32 10000, i32 -1)
433 void test_sched_group_barrier()
435 __builtin_amdgcn_sched_group_barrier(0, 1, 2);
436 __builtin_amdgcn_sched_group_barrier(1, 2, 4);
437 __builtin_amdgcn_sched_group_barrier(4, 8, 16);
438 __builtin_amdgcn_sched_group_barrier(15, 10000, -1);
441 // CHECK-LABEL: @test_iglp_opt
442 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.iglp.opt(i32 0)
443 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.iglp.opt(i32 1)
444 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.iglp.opt(i32 4)
445 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.iglp.opt(i32 15)
446 void test_iglp_opt()
448 __builtin_amdgcn_iglp_opt(0);
449 __builtin_amdgcn_iglp_opt(1);
450 __builtin_amdgcn_iglp_opt(4);
451 __builtin_amdgcn_iglp_opt(15);
454 // CHECK-LABEL: @test_s_sleep
455 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.sleep(i32 1)
456 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.sleep(i32 15)
457 void test_s_sleep()
459 __builtin_amdgcn_s_sleep(1);
460 __builtin_amdgcn_s_sleep(15);
463 // CHECK-LABEL: @test_s_incperflevel
464 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.incperflevel(i32 1)
465 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.incperflevel(i32 15)
466 void test_s_incperflevel()
468 __builtin_amdgcn_s_incperflevel(1);
469 __builtin_amdgcn_s_incperflevel(15);
472 // CHECK-LABEL: @test_s_decperflevel
473 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.decperflevel(i32 1)
474 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.decperflevel(i32 15)
475 void test_s_decperflevel()
477 __builtin_amdgcn_s_decperflevel(1);
478 __builtin_amdgcn_s_decperflevel(15);
481 // CHECK-LABEL: @test_s_setprio
482 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.setprio(i16 0)
483 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.setprio(i16 3)
484 void test_s_setprio()
486 __builtin_amdgcn_s_setprio(0);
487 __builtin_amdgcn_s_setprio(3);
490 // CHECK-LABEL: @test_cubeid(
491 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.cubeid(float %a, float %b, float %c)
492 void test_cubeid(global float* out, float a, float b, float c) {
493 *out = __builtin_amdgcn_cubeid(a, b, c);
496 // CHECK-LABEL: @test_cubesc(
497 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.cubesc(float %a, float %b, float %c)
498 void test_cubesc(global float* out, float a, float b, float c) {
499 *out = __builtin_amdgcn_cubesc(a, b, c);
502 // CHECK-LABEL: @test_cubetc(
503 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.cubetc(float %a, float %b, float %c)
504 void test_cubetc(global float* out, float a, float b, float c) {
505 *out = __builtin_amdgcn_cubetc(a, b, c);
508 // CHECK-LABEL: @test_cubema(
509 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.cubema(float %a, float %b, float %c)
510 void test_cubema(global float* out, float a, float b, float c) {
511 *out = __builtin_amdgcn_cubema(a, b, c);
514 // CHECK-LABEL: @test_read_exec(
515 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.ballot.i64(i1 true)
516 void test_read_exec(global ulong* out) {
517 *out = __builtin_amdgcn_read_exec();
520 // CHECK: declare i64 @llvm.amdgcn.ballot.i64(i1){{.*}} #[[$NOUNWIND_READONLY:[0-9]+]]
522 // CHECK-LABEL: @test_read_exec_lo(
523 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.ballot.i32(i1 true)
524 void test_read_exec_lo(global uint* out) {
525 *out = __builtin_amdgcn_read_exec_lo();
528 // CHECK: declare i32 @llvm.amdgcn.ballot.i32(i1){{.*}} #[[$NOUNWIND_READONLY:[0-9]+]]
530 // CHECK-LABEL: @test_read_exec_hi(
531 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.ballot.i64(i1 true)
532 // CHECK: lshr i64 [[A:%.*]], 32
533 // CHECK: trunc nuw i64 [[B:%.*]] to i32
534 void test_read_exec_hi(global uint* out) {
535 *out = __builtin_amdgcn_read_exec_hi();
538 // CHECK-LABEL: @test_dispatch_ptr
539 // CHECK: {{.*}}call align 4 dereferenceable(64){{.*}} ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
540 #if !defined(__SPIRV__)
541 void test_dispatch_ptr(__constant unsigned char ** out)
542 #else
543 void test_dispatch_ptr(__attribute__((address_space(4))) unsigned char ** out)
544 #endif
546 *out = __builtin_amdgcn_dispatch_ptr();
549 // CHECK-LABEL: @test_queue_ptr
550 // CHECK: {{.*}}call{{.*}} ptr addrspace(4) @llvm.amdgcn.queue.ptr()
551 #if !defined(__SPIRV__)
552 void test_queue_ptr(__constant unsigned char ** out)
553 #else
554 void test_queue_ptr(__attribute__((address_space(4))) unsigned char ** out)
555 #endif
557 *out = __builtin_amdgcn_queue_ptr();
560 // CHECK-LABEL: @test_kernarg_segment_ptr
561 // CHECK: {{.*}}call{{.*}} ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
562 #if !defined(__SPIRV__)
563 void test_kernarg_segment_ptr(__constant unsigned char ** out)
564 #else
565 void test_kernarg_segment_ptr(__attribute__((address_space(4))) unsigned char ** out)
566 #endif
568 *out = __builtin_amdgcn_kernarg_segment_ptr();
571 // CHECK-LABEL: @test_implicitarg_ptr
572 // CHECK: {{.*}}call{{.*}} ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
573 #if !defined(__SPIRV__)
574 void test_implicitarg_ptr(__constant unsigned char ** out)
575 #else
576 void test_implicitarg_ptr(__attribute__((address_space(4))) unsigned char ** out)
577 #endif
579 *out = __builtin_amdgcn_implicitarg_ptr();
582 // CHECK-LABEL: @test_get_group_id(
583 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.workgroup.id.x()
584 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.workgroup.id.y()
585 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.workgroup.id.z()
586 void test_get_group_id(int d, global int *out)
588 switch (d) {
589 case 0: *out = __builtin_amdgcn_workgroup_id_x(); break;
590 case 1: *out = __builtin_amdgcn_workgroup_id_y(); break;
591 case 2: *out = __builtin_amdgcn_workgroup_id_z(); break;
592 default: *out = 0;
596 // CHECK-LABEL: @test_s_getreg(
597 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.s.getreg(i32 0)
598 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.s.getreg(i32 1)
599 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.s.getreg(i32 65535)
600 void test_s_getreg(volatile global uint *out)
602 *out = __builtin_amdgcn_s_getreg(0);
603 *out = __builtin_amdgcn_s_getreg(1);
604 *out = __builtin_amdgcn_s_getreg(65535);
607 // CHECK-LABEL: @test_get_local_id(
608 // CHECK: tail call noundef range(i32 0, 1024){{.*}} i32 @llvm.amdgcn.workitem.id.x()
609 // CHECK: tail call noundef range(i32 0, 1024){{.*}} i32 @llvm.amdgcn.workitem.id.y()
610 // CHECK: tail call noundef range(i32 0, 1024){{.*}} i32 @llvm.amdgcn.workitem.id.z()
611 void test_get_local_id(int d, global int *out)
613 switch (d) {
614 case 0: *out = __builtin_amdgcn_workitem_id_x(); break;
615 case 1: *out = __builtin_amdgcn_workitem_id_y(); break;
616 case 2: *out = __builtin_amdgcn_workitem_id_z(); break;
617 default: *out = 0;
621 // CHECK-LABEL: @test_get_workgroup_size(
622 // CHECK: {{.*}}call align 8 dereferenceable(256){{.*}} ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
623 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 12
624 // CHECK: load i16, ptr addrspace(4) %{{.*}}, align 4, !range [[$WS_RANGE:![0-9]*]], !invariant.load{{.*}}, !noundef
625 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 14
626 // CHECK: load i16, ptr addrspace(4) %{{.*}}, align 2, !range [[$WS_RANGE:![0-9]*]], !invariant.load{{.*}}, !noundef
627 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 16
628 // CHECK: load i16, ptr addrspace(4) %{{.*}}, align 8, !range [[$WS_RANGE:![0-9]*]], !invariant.load{{.*}}, !noundef
629 void test_get_workgroup_size(int d, global int *out)
631 switch (d) {
632 case 0: *out = __builtin_amdgcn_workgroup_size_x() + 1; break;
633 case 1: *out = __builtin_amdgcn_workgroup_size_y(); break;
634 case 2: *out = __builtin_amdgcn_workgroup_size_z(); break;
635 default: *out = 0;
639 // CHECK-LABEL: @test_get_grid_size(
640 // CHECK: {{.*}}call align 4 dereferenceable(64){{.*}} ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
641 // CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 %{{.+}}
642 // CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !range [[$GRID_RANGE:![0-9]+]], !invariant.load
643 void test_get_grid_size(int d, global int *out)
645 switch (d) {
646 case 0: *out = __builtin_amdgcn_grid_size_x(); break;
647 case 1: *out = __builtin_amdgcn_grid_size_y(); break;
648 case 2: *out = __builtin_amdgcn_grid_size_z(); break;
649 default: *out = 0;
653 // CHECK-LABEL: @test_fmed3_f32
654 // CHECK: {{.*}}call{{.*}} float @llvm.amdgcn.fmed3.f32(
655 void test_fmed3_f32(global float* out, float a, float b, float c)
657 *out = __builtin_amdgcn_fmed3f(a, b, c);
660 // CHECK-LABEL: @test_s_getpc
661 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.s.getpc()
662 void test_s_getpc(global ulong* out)
664 *out = __builtin_amdgcn_s_getpc();
667 // CHECK-LABEL: @test_ds_append_lds(
668 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.ds.append.p3(ptr addrspace(3) %{{.+}}, i1 false)
669 #if !defined(__SPIRV__)
670 kernel void test_ds_append_lds(global int* out, local int* ptr) {
671 #else
672 kernel void test_ds_append_lds(__attribute__((address_space(1))) int* out, __attribute__((address_space(3))) int* ptr) {
673 #endif
674 *out = __builtin_amdgcn_ds_append(ptr);
677 // CHECK-LABEL: @test_ds_consume_lds(
678 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.ds.consume.p3(ptr addrspace(3) %{{.+}}, i1 false)
680 #if !defined(__SPIRV__)
681 kernel void test_ds_consume_lds(global int* out, local int* ptr) {
682 #else
683 kernel void test_ds_consume_lds(__attribute__((address_space(1))) int* out, __attribute__((address_space(3))) int* ptr) {
684 #endif
685 *out = __builtin_amdgcn_ds_consume(ptr);
688 // CHECK-LABEL: @test_gws_init(
689 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.ds.gws.init(i32 %value, i32 %id)
690 kernel void test_gws_init(uint value, uint id) {
691 __builtin_amdgcn_ds_gws_init(value, id);
694 // CHECK-LABEL: @test_gws_barrier(
695 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.ds.gws.barrier(i32 %value, i32 %id)
696 kernel void test_gws_barrier(uint value, uint id) {
697 __builtin_amdgcn_ds_gws_barrier(value, id);
700 // CHECK-LABEL: @test_gws_sema_v(
701 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.ds.gws.sema.v(i32 %id)
702 kernel void test_gws_sema_v(uint id) {
703 __builtin_amdgcn_ds_gws_sema_v(id);
706 // CHECK-LABEL: @test_gws_sema_br(
707 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.ds.gws.sema.br(i32 %value, i32 %id)
708 kernel void test_gws_sema_br(uint value, uint id) {
709 __builtin_amdgcn_ds_gws_sema_br(value, id);
712 // CHECK-LABEL: @test_gws_sema_p(
713 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.ds.gws.sema.p(i32 %id)
714 kernel void test_gws_sema_p(uint id) {
715 __builtin_amdgcn_ds_gws_sema_p(id);
718 // CHECK-LABEL: @test_mbcnt_lo(
719 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.mbcnt.lo(i32 %src0, i32 %src1)
720 kernel void test_mbcnt_lo(global uint* out, uint src0, uint src1) {
721 *out = __builtin_amdgcn_mbcnt_lo(src0, src1);
724 // CHECK-LABEL: @test_mbcnt_hi(
725 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.mbcnt.hi(i32 %src0, i32 %src1)
726 kernel void test_mbcnt_hi(global uint* out, uint src0, uint src1) {
727 *out = __builtin_amdgcn_mbcnt_hi(src0, src1);
730 // CHECK-LABEL: @test_alignbit(
731 // CHECK: tail call{{.*}} i32 @llvm.fshr.i32(i32 %src0, i32 %src1, i32 %src2)
732 kernel void test_alignbit(global uint* out, uint src0, uint src1, uint src2) {
733 *out = __builtin_amdgcn_alignbit(src0, src1, src2);
736 // CHECK-LABEL: @test_alignbyte(
737 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.alignbyte(i32 %src0, i32 %src1, i32 %src2)
738 kernel void test_alignbyte(global uint* out, uint src0, uint src1, uint src2) {
739 *out = __builtin_amdgcn_alignbyte(src0, src1, src2);
742 // CHECK-LABEL: @test_ubfe(
743 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 %src1, i32 %src2)
744 kernel void test_ubfe(global uint* out, uint src0, uint src1, uint src2) {
745 *out = __builtin_amdgcn_ubfe(src0, src1, src2);
748 // CHECK-LABEL: @test_sbfe(
749 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 %src1, i32 %src2)
750 kernel void test_sbfe(global uint* out, uint src0, uint src1, uint src2) {
751 *out = __builtin_amdgcn_sbfe(src0, src1, src2);
754 // CHECK-LABEL: @test_cvt_pkrtz(
755 // CHECK: tail call{{.*}} <2 x half> @llvm.amdgcn.cvt.pkrtz(float %src0, float %src1)
756 kernel void test_cvt_pkrtz(global half2* out, float src0, float src1) {
757 *out = __builtin_amdgcn_cvt_pkrtz(src0, src1);
760 // CHECK-LABEL: @test_cvt_pknorm_i16(
761 // CHECK: tail call{{.*}} <2 x i16> @llvm.amdgcn.cvt.pknorm.i16(float %src0, float %src1)
762 kernel void test_cvt_pknorm_i16(global short2* out, float src0, float src1) {
763 *out = __builtin_amdgcn_cvt_pknorm_i16(src0, src1);
766 // CHECK-LABEL: @test_cvt_pknorm_u16(
767 // CHECK: tail call{{.*}} <2 x i16> @llvm.amdgcn.cvt.pknorm.u16(float %src0, float %src1)
768 kernel void test_cvt_pknorm_u16(global ushort2* out, float src0, float src1) {
769 *out = __builtin_amdgcn_cvt_pknorm_u16(src0, src1);
772 // CHECK-LABEL: @test_cvt_pk_i16(
773 // CHECK: tail call{{.*}} <2 x i16> @llvm.amdgcn.cvt.pk.i16(i32 %src0, i32 %src1)
774 kernel void test_cvt_pk_i16(global short2* out, int src0, int src1) {
775 *out = __builtin_amdgcn_cvt_pk_i16(src0, src1);
778 // CHECK-LABEL: @test_cvt_pk_u16(
779 // CHECK: tail call{{.*}} <2 x i16> @llvm.amdgcn.cvt.pk.u16(i32 %src0, i32 %src1)
780 kernel void test_cvt_pk_u16(global ushort2* out, uint src0, uint src1) {
781 *out = __builtin_amdgcn_cvt_pk_u16(src0, src1);
784 // CHECK-LABEL: @test_cvt_pk_u8_f32
785 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src0, i32 %src1, i32 %src2)
786 kernel void test_cvt_pk_u8_f32(global uint* out, float src0, uint src1, uint src2) {
787 *out = __builtin_amdgcn_cvt_pk_u8_f32(src0, src1, src2);
790 // CHECK-LABEL: @test_sad_u8(
791 // CHECK: tail call{{.*}} i32 @llvm.amdgcn.sad.u8(i32 %src0, i32 %src1, i32 %src2)
792 kernel void test_sad_u8(global uint* out, uint src0, uint src1, uint src2) {
793 *out = __builtin_amdgcn_sad_u8(src0, src1, src2);
796 // CHECK-LABEL: test_msad_u8(
797 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.msad.u8(i32 %src0, i32 %src1, i32 %src2)
798 kernel void test_msad_u8(global uint* out, uint src0, uint src1, uint src2) {
799 *out = __builtin_amdgcn_msad_u8(src0, src1, src2);
802 // CHECK-LABEL: test_sad_hi_u8(
803 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.sad.hi.u8(i32 %src0, i32 %src1, i32 %src2)
804 kernel void test_sad_hi_u8(global uint* out, uint src0, uint src1, uint src2) {
805 *out = __builtin_amdgcn_sad_hi_u8(src0, src1, src2);
808 // CHECK-LABEL: @test_sad_u16(
809 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.sad.u16(i32 %src0, i32 %src1, i32 %src2)
810 kernel void test_sad_u16(global uint* out, uint src0, uint src1, uint src2) {
811 *out = __builtin_amdgcn_sad_u16(src0, src1, src2);
814 // CHECK-LABEL: @test_qsad_pk_u16_u8(
815 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.qsad.pk.u16.u8(i64 %src0, i32 %src1, i64 %src2)
816 kernel void test_qsad_pk_u16_u8(global ulong* out, ulong src0, uint src1, ulong src2) {
817 *out = __builtin_amdgcn_qsad_pk_u16_u8(src0, src1, src2);
820 // CHECK-LABEL: @test_mqsad_pk_u16_u8(
821 // CHECK: {{.*}}call{{.*}} i64 @llvm.amdgcn.mqsad.pk.u16.u8(i64 %src0, i32 %src1, i64 %src2)
822 kernel void test_mqsad_pk_u16_u8(global ulong* out, ulong src0, uint src1, ulong src2) {
823 *out = __builtin_amdgcn_mqsad_pk_u16_u8(src0, src1, src2);
826 // CHECK-LABEL: test_mqsad_u32_u8(
827 // CHECK: {{.*}}call{{.*}} <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src0, i32 %src1, <4 x i32> %src2)
828 kernel void test_mqsad_u32_u8(global uint4* out, ulong src0, uint src1, uint4 src2) {
829 *out = __builtin_amdgcn_mqsad_u32_u8(src0, src1, src2);
832 // CHECK-LABEL: test_s_setreg(
833 // CHECK: {{.*}}call{{.*}} void @llvm.amdgcn.s.setreg(i32 8193, i32 %val)
834 kernel void test_s_setreg(uint val) {
835 __builtin_amdgcn_s_setreg(8193, val);
838 // CHECK-LABEL test_atomic_inc_dec(
839 #if !defined(__SPIRV__)
840 void test_atomic_inc_dec(local uint *lptr, global uint *gptr, uint val) {
841 #else
842 void test_atomic_inc_dec(__attribute__((address_space(3))) uint *lptr, __attribute__((address_space(1))) uint *gptr, uint val) {
843 #endif
844 uint res;
846 // CHECK: atomicrmw uinc_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
847 res = __builtin_amdgcn_atomic_inc32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
849 // CHECK: atomicrmw udec_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
850 res = __builtin_amdgcn_atomic_dec32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
852 // CHECK: atomicrmw uinc_wrap ptr addrspace(1) %gptr, i32 %val syncscope("agent") seq_cst, align 4
853 res = __builtin_amdgcn_atomic_inc32(gptr, val, __ATOMIC_SEQ_CST, "agent");
855 // CHECK: atomicrmw udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4
856 res = __builtin_amdgcn_atomic_dec32(gptr, val, __ATOMIC_SEQ_CST, "");
858 // CHECK: atomicrmw volatile udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4
859 #if !defined(__SPIRV__)
860 res = __builtin_amdgcn_atomic_dec32((volatile global uint*)gptr, val, __ATOMIC_SEQ_CST, "");
861 #else
862 res = __builtin_amdgcn_atomic_dec32((volatile __attribute__((address_space(1))) uint*)gptr, val, __ATOMIC_SEQ_CST, "");
863 #endif
866 // CHECK-LABEL test_wavefrontsize(
867 unsigned test_wavefrontsize() {
869 // CHECK: {{.*}}call{{.*}} i32 @llvm.amdgcn.wavefrontsize()
870 return __builtin_amdgcn_wavefrontsize();
873 // CHECK-LABEL test_flt_rounds(
874 unsigned test_flt_rounds() {
876 // CHECK: {{.*}}call{{.*}} i32 @llvm.get.rounding()
877 unsigned mode = __builtin_flt_rounds();
879 #if !defined(__SPIRV__)
880 // CHECK-AMDGCN: call void @llvm.set.rounding(i32 %0)
881 __builtin_set_flt_rounds(mode);
882 #endif
884 return mode;
887 // CHECK-LABEL test_get_fpenv(
888 unsigned long test_get_fpenv() {
889 // CHECK: {{.*}}call{{.*}} i64 @llvm.get.fpenv.i64()
890 return __builtin_amdgcn_get_fpenv();
893 // CHECK-LABEL test_set_fpenv(
894 void test_set_fpenv(unsigned long env) {
895 // CHECK: {{.*}}call{{.*}} void @llvm.set.fpenv.i64(i64 %[[ENV:.+]])
896 __builtin_amdgcn_set_fpenv(env);
899 // CHECK-DAG: [[$GRID_RANGE]] = !{i32 1, i32 0}
900 // CHECK-DAG: [[$WS_RANGE]] = !{i16 1, i16 1025}
901 // CHECK-DAG: attributes #[[$NOUNWIND_READONLY]] = { convergent mustprogress nocallback nofree nounwind willreturn memory(none) }