1 // REQUIRES: amdgpu-registered-target
3 // RUN: %clang_cc1 -fopenmp -x c++ -w -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -fopenmp -x c++ -w -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -target-cpu gfx906 -o - | FileCheck %s
5 // expected-no-diagnostics
8 /*===-----------------------------------------------------------------------===
10 Inspired from SOLLVE tests:
11 - 5.0/metadirective/test_metadirective_arch_is_nvidia.c
14 ===------------------------------------------------------------------------===*/
19 int metadirective1() {
21 int v1
[N
], v2
[N
], v3
[N
];
23 int target_device_num
, host_device_num
, default_device
;
26 #pragma omp target map(to:v1,v2) map(from:v3, target_device_num) device(default_device)
28 #pragma omp metadirective \
29 when(device={arch("amdgcn")}: teams distribute parallel for) \
32 for (int i
= 0; i
< N
; i
++) {
33 #pragma omp atomic write
34 v3
[i
] = v1
[i
] * v2
[i
];
41 // CHECK: define weak_odr protected amdgpu_kernel void @[[METADIRECTIVE:.+metadirective1[a-z0-9_]+]]
43 // CHECK: %{{[0-9]}} = call i32 @__kmpc_target_init
44 // CHECK: user_code.entry:
45 // CHECK: call void @[[METADIRECTIVE]]_omp_outlined
46 // CHECK-NOT: call void @__kmpc_parallel_51
50 // CHECK: define internal void @[[METADIRECTIVE]]_omp_outlined
52 // CHECK: call void @__kmpc_distribute_static_init
53 // CHECK: omp.loop.exit:
54 // CHECK: call void @__kmpc_distribute_static_fini
57 // CHECK: define internal void @[[METADIRECTIVE]]_omp_outlined_omp_outlined
59 // CHECK: call void @__kmpc_for_static_init_4
60 // CHECK: omp.inner.for.body:
61 // CHECK: store atomic {{.*}} monotonic
62 // CHECK: omp.loop.exit:
63 // CHECK-NEXT: call void @__kmpc_for_static_fini
64 // CHECK-NEXT: ret void