Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / openmp / libomptarget / test / offloading / default_thread_limit.c
blobd32e7df418cbbd041504d579ecbd595c97bee060
1 // clang-format off
2 // RUN: %libomptarget-compile-generic
3 // RUN: env LIBOMPTARGET_INFO=16 \
4 // RUN: %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefix=DEFAULT
6 // UNSUPPORTED: nvptx64-nvidia-cuda
7 // UNSUPPORTED: nvptx64-nvidia-cuda-LTO
8 // UNSUPPORTED: aarch64-unknown-linux-gnu
9 // UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
10 // UNSUPPORTED: x86_64-pc-linux-gnu
11 // UNSUPPORTED: x86_64-pc-linux-gnu-LTO
13 __attribute__((optnone)) int optnone() { return 1; }
15 int main() {
16 int N = optnone() * 4098 * 32;
18 // DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
19 #pragma omp target teams distribute parallel for simd
20 for (int i = 0; i < N; ++i) {
21 optnone();
23 // DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
24 #pragma omp target teams distribute parallel for simd
25 for (int i = 0; i < N; ++i) {
26 optnone();
28 // DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
29 #pragma omp target teams distribute parallel for simd
30 for (int i = 0; i < N; ++i) {
31 optnone();
33 // DEFAULT: [[NT:(128|256)]] (MaxFlatWorkGroupSize: [[NT]]
34 #pragma omp target
35 #pragma omp teams distribute parallel for
36 for (int i = 0; i < N; ++i) {
37 optnone();
39 // DEFAULT: 42 (MaxFlatWorkGroupSize: 1024
40 #pragma omp target thread_limit(optnone() * 42)
41 #pragma omp teams distribute parallel for
42 for (int i = 0; i < N; ++i) {
43 optnone();
45 // DEFAULT: 42 (MaxFlatWorkGroupSize: 42
46 #pragma omp target thread_limit(optnone() * 42) ompx_attribute(__attribute__((amdgpu_flat_work_group_size(42, 42))))
47 #pragma omp teams distribute parallel for
48 for (int i = 0; i < N; ++i) {
49 optnone();
51 // DEFAULT: 42 (MaxFlatWorkGroupSize: 42
52 #pragma omp target ompx_attribute(__attribute__((amdgpu_flat_work_group_size(42, 42))))
53 #pragma omp teams distribute parallel for
54 for (int i = 0; i < N; ++i) {
55 optnone();
57 // DEFAULT: MaxFlatWorkGroupSize: 1024
58 #pragma omp target
59 #pragma omp teams distribute parallel for num_threads(optnone() * 42)
60 for (int i = 0; i < N; ++i) {
61 optnone();
63 // DEFAULT: MaxFlatWorkGroupSize: 1024
64 #pragma omp target teams distribute parallel for thread_limit(optnone() * 42)
65 for (int i = 0; i < N; ++i) {
66 optnone();
68 // DEFAULT: MaxFlatWorkGroupSize: 1024
69 #pragma omp target teams distribute parallel for num_threads(optnone() * 42)
70 for (int i = 0; i < N; ++i) {
71 optnone();
73 // DEFAULT: 9 (MaxFlatWorkGroupSize: 9
74 #pragma omp target
75 #pragma omp teams distribute parallel for num_threads(9)
76 for (int i = 0; i < N; ++i) {
77 optnone();
79 // DEFAULT: 4 (MaxFlatWorkGroupSize: 4
80 #pragma omp target thread_limit(4)
81 #pragma omp teams distribute parallel for
82 for (int i = 0; i < N; ++i) {
83 optnone();
85 // DEFAULT: 4 (MaxFlatWorkGroupSize: 4
86 #pragma omp target
87 #pragma omp teams distribute parallel for thread_limit(4)
88 for (int i = 0; i < N; ++i) {
89 optnone();
91 // DEFAULT: 9 (MaxFlatWorkGroupSize: 9
92 #pragma omp target teams distribute parallel for num_threads(9)
93 for (int i = 0; i < N; ++i) {
94 optnone();
96 // DEFAULT: 4 (MaxFlatWorkGroupSize: 4
97 #pragma omp target teams distribute parallel for simd thread_limit(4)
98 for (int i = 0; i < N; ++i) {
99 optnone();