1 // RUN: %clang_cc1 -x hip %s --hipstdpar -triple amdgcn-amd-amdhsa --std=c++17 \
2 // RUN: -fcuda-is-device -emit-llvm -o /dev/null -verify
4 // Note: These would happen implicitly, within the implementation of the
5 // accelerator specific algorithm library, and not from user code.
7 // Calls from the accelerator side to implicitly host (i.e. unannotated)
10 // expected-no-diagnostics
12 #define __device__ __attribute__((device))
13 #define __global__ __attribute__((global))
15 extern "C" void host_fn() {}
27 __device__
void hd() { host_fn(); }
29 __device__
void hd3();
34 void operator-(const T
&) {}
36 operator Dummy() { return Dummy(); }
39 __device__
void T::hd3() { host_fn(); }
41 template <typename T
> __device__
void hd2() { host_fn(); }
43 __global__
void kernel() { hd2
<int>(); }
45 __device__
void hd() { host_fn(); }
47 template <typename T
> __device__
void hd3() { host_fn(); }
48 __device__
void device_fn() { hd3
<int>(); }
50 __device__
void local_var() {
54 __device__
void explicit_destructor(S
*s
) {
58 __device__
void hd_member_fn() {
64 __device__
void h_member_fn() {
69 __device__
void unaryOp() {
74 __device__
void binaryOp() {
79 __device__
void implicitConversion() {
86 template <typename U
> __device__
void fn() {}
91 __device__
void TmplStruct
<int>::fn
<int>() { host_fn(); }
93 __device__
void double_specialization() { TmplStruct
<int>().fn
<int>(); }