1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
2 // RUN: %clang_cc1 -emit-llvm -o - -fopenmp \
3 // RUN: -triple i386-unknown-unknown %s | \
4 // RUN: FileCheck %s --check-prefix=CHECK-32
5 // RUN: %clang_cc1 -emit-llvm -o - -fopenmp \
6 // RUN: -triple x86_64-unknown-linux-gnu %s | FileCheck %s
7 // RUN: %clang_cc1 -fopenmp \
8 // RUN: -triple x86_64-unknown-linux-gnu \
9 // RUN: -emit-pch %s -o %t
10 // RUN: %clang_cc1 -fopenmp \
11 // RUN: -triple x86_64-unknown-linux-gnu \
12 // RUN: -include-pch %t -emit-llvm %s -o - | FileCheck %s
13 // expected-no-diagnostics
17 typedef enum omp_allocator_handle_t
{
18 omp_null_allocator
= 0,
19 omp_default_mem_alloc
= 1,
20 omp_large_cap_mem_alloc
= 2,
21 omp_const_mem_alloc
= 3,
22 omp_high_bw_mem_alloc
= 4,
23 omp_low_lat_mem_alloc
= 5,
24 omp_cgroup_mem_alloc
= 6,
25 omp_pteam_mem_alloc
= 7,
26 omp_thread_mem_alloc
= 8,
27 KMP_ALLOCATOR_MAX_HANDLE
= __UINTPTR_MAX__
28 } omp_allocator_handle_t
;
40 omp_allocator_handle_t MyAlloc
= omp_large_cap_mem_alloc
;
42 #pragma omp allocate(foo0) align(1)
43 #pragma omp allocate(foo1) allocator(omp_pteam_mem_alloc) align(2)
44 #pragma omp allocate(foo2) align(4) allocator(omp_cgroup_mem_alloc)
45 #pragma omp allocate(foo3) align(8) allocator(omp_low_lat_mem_alloc)
46 #pragma omp allocate(foo4) align(16) allocator(omp_high_bw_mem_alloc)
47 #pragma omp allocate(foo5) align(32) allocator(omp_const_mem_alloc)
48 #pragma omp allocate(foo6) align(64) allocator(omp_large_cap_mem_alloc)
49 #pragma omp allocate(foo7) align(32) allocator(omp_thread_mem_alloc)
50 #pragma omp allocate(foo8) align(16) allocator(omp_null_allocator)
54 #pragma omp allocate(foo9) align(8) allocator(omp_thread_mem_alloc)
55 #pragma omp allocate(foo10) align(128)
64 #pragma omp allocate(bar1, bar2, bar3) align(2) allocator(MyAlloc)
65 #pragma omp allocate(bar4, bar5, bar6) align(16)
69 // Verify align clause in template with non-type template parameter.
70 template <typename T
, unsigned size
, unsigned align
>
73 #pragma omp allocate(foo) align(align) allocator(omp_cgroup_mem_alloc)
79 result
= run
<double, 1000, 16>();
83 // CHECK-32-LABEL: define {{[^@]+}}@main
84 // CHECK-32-SAME: () #[[ATTR0:[0-9]+]] {
85 // CHECK-32-NEXT: entry:
86 // CHECK-32-NEXT: [[MYALLOC:%.*]] = alloca i32, align 4
87 // CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
88 // CHECK-32-NEXT: [[DOTFOO0__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 20, ptr null)
89 // CHECK-32-NEXT: [[DOTFOO1__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 40, ptr inttoptr (i32 7 to ptr))
90 // CHECK-32-NEXT: [[DOTFOO2__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 80, ptr inttoptr (i32 6 to ptr))
91 // CHECK-32-NEXT: [[DOTFOO3__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 8, i32 120, ptr inttoptr (i32 5 to ptr))
92 // CHECK-32-NEXT: [[DOTFOO4__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 160, ptr inttoptr (i32 4 to ptr))
93 // CHECK-32-NEXT: [[DOTFOO5__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 32, i32 200, ptr inttoptr (i32 3 to ptr))
94 // CHECK-32-NEXT: [[DOTFOO6__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 64, i32 240, ptr inttoptr (i32 2 to ptr))
95 // CHECK-32-NEXT: [[DOTFOO7__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 32, i32 280, ptr inttoptr (i32 8 to ptr))
96 // CHECK-32-NEXT: [[DOTFOO8__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 320, ptr null)
97 // CHECK-32-NEXT: store i32 2, ptr [[MYALLOC]], align 4
98 // CHECK-32-NEXT: [[DOTFOO9__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 8, i32 640, ptr inttoptr (i32 8 to ptr))
99 // CHECK-32-NEXT: [[DOTFOO10__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 128, i32 720, ptr null)
100 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO10__VOID_ADDR]], ptr null)
101 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO9__VOID_ADDR]], ptr inttoptr (i32 8 to ptr))
102 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, ptr [[MYALLOC]], align 4
103 // CHECK-32-NEXT: [[CONV:%.*]] = inttoptr i32 [[TMP1]] to ptr
104 // CHECK-32-NEXT: [[DOTBAR1__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 4, ptr [[CONV]])
105 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[MYALLOC]], align 4
106 // CHECK-32-NEXT: [[CONV1:%.*]] = inttoptr i32 [[TMP2]] to ptr
107 // CHECK-32-NEXT: [[DOTBAR2__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 40, ptr [[CONV1]])
108 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[MYALLOC]], align 4
109 // CHECK-32-NEXT: [[CONV2:%.*]] = inttoptr i32 [[TMP3]] to ptr
110 // CHECK-32-NEXT: [[DOTBAR3__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 80, ptr [[CONV2]])
111 // CHECK-32-NEXT: [[DOTBAR4__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 4, ptr null)
112 // CHECK-32-NEXT: [[DOTBAR5__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 4, ptr null)
113 // CHECK-32-NEXT: [[DOTBAR6__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 240, ptr null)
114 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR6__VOID_ADDR]], ptr null)
115 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR5__VOID_ADDR]], ptr null)
116 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR4__VOID_ADDR]], ptr null)
117 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[MYALLOC]], align 4
118 // CHECK-32-NEXT: [[CONV3:%.*]] = inttoptr i32 [[TMP4]] to ptr
119 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR3__VOID_ADDR]], ptr [[CONV3]])
120 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[MYALLOC]], align 4
121 // CHECK-32-NEXT: [[CONV4:%.*]] = inttoptr i32 [[TMP5]] to ptr
122 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR2__VOID_ADDR]], ptr [[CONV4]])
123 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[MYALLOC]], align 4
124 // CHECK-32-NEXT: [[CONV5:%.*]] = inttoptr i32 [[TMP6]] to ptr
125 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR1__VOID_ADDR]], ptr [[CONV5]])
126 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO8__VOID_ADDR]], ptr null)
127 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO7__VOID_ADDR]], ptr inttoptr (i32 8 to ptr))
128 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO6__VOID_ADDR]], ptr inttoptr (i32 2 to ptr))
129 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO5__VOID_ADDR]], ptr inttoptr (i32 3 to ptr))
130 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO4__VOID_ADDR]], ptr inttoptr (i32 4 to ptr))
131 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO3__VOID_ADDR]], ptr inttoptr (i32 5 to ptr))
132 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO2__VOID_ADDR]], ptr inttoptr (i32 6 to ptr))
133 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO1__VOID_ADDR]], ptr inttoptr (i32 7 to ptr))
134 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO0__VOID_ADDR]], ptr null)
135 // CHECK-32-NEXT: ret i32 0
138 // CHECK-32-LABEL: define {{[^@]+}}@_Z13template_testv
139 // CHECK-32-SAME: () #[[ATTR2:[0-9]+]] {
140 // CHECK-32-NEXT: entry:
141 // CHECK-32-NEXT: [[RESULT:%.*]] = alloca double, align 8
142 // CHECK-32-NEXT: [[CALL:%.*]] = call noundef double @_Z3runIdLj1000ELj16EET_v()
143 // CHECK-32-NEXT: store double [[CALL]], ptr [[RESULT]], align 8
144 // CHECK-32-NEXT: ret i32 0
147 // CHECK-32-LABEL: define {{[^@]+}}@_Z3runIdLj1000ELj16EET_v
148 // CHECK-32-SAME: () #[[ATTR2]] comdat {
149 // CHECK-32-NEXT: entry:
150 // CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
151 // CHECK-32-NEXT: [[DOTFOO__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 8000, ptr inttoptr (i32 6 to ptr))
152 // CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x double], ptr [[DOTFOO__VOID_ADDR]], i32 0, i32 0
153 // CHECK-32-NEXT: [[TMP1:%.*]] = load double, ptr [[ARRAYIDX]], align 8
154 // CHECK-32-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO__VOID_ADDR]], ptr inttoptr (i32 6 to ptr))
155 // CHECK-32-NEXT: ret double [[TMP1]]
158 // CHECK-LABEL: define {{[^@]+}}@main
159 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
160 // CHECK-NEXT: entry:
161 // CHECK-NEXT: [[MYALLOC:%.*]] = alloca i64, align 8
162 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
163 // CHECK-NEXT: [[DOTFOO0__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 32, ptr null)
164 // CHECK-NEXT: [[DOTFOO1__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 48, ptr inttoptr (i64 7 to ptr))
165 // CHECK-NEXT: [[DOTFOO2__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 80, ptr inttoptr (i64 6 to ptr))
166 // CHECK-NEXT: [[DOTFOO3__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 128, ptr inttoptr (i64 5 to ptr))
167 // CHECK-NEXT: [[DOTFOO4__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 160, ptr inttoptr (i64 4 to ptr))
168 // CHECK-NEXT: [[DOTFOO5__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 32, i64 208, ptr inttoptr (i64 3 to ptr))
169 // CHECK-NEXT: [[DOTFOO6__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 64, i64 240, ptr inttoptr (i64 2 to ptr))
170 // CHECK-NEXT: [[DOTFOO7__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 32, i64 288, ptr inttoptr (i64 8 to ptr))
171 // CHECK-NEXT: [[DOTFOO8__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 320, ptr null)
172 // CHECK-NEXT: store i64 2, ptr [[MYALLOC]], align 8
173 // CHECK-NEXT: [[DOTFOO9__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 640, ptr inttoptr (i64 8 to ptr))
174 // CHECK-NEXT: [[DOTFOO10__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 128, i64 720, ptr null)
175 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO10__VOID_ADDR]], ptr null)
176 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO9__VOID_ADDR]], ptr inttoptr (i64 8 to ptr))
177 // CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[MYALLOC]], align 8
178 // CHECK-NEXT: [[CONV:%.*]] = inttoptr i64 [[TMP1]] to ptr
179 // CHECK-NEXT: [[DOTBAR1__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr [[CONV]])
180 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[MYALLOC]], align 8
181 // CHECK-NEXT: [[CONV1:%.*]] = inttoptr i64 [[TMP2]] to ptr
182 // CHECK-NEXT: [[DOTBAR2__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 48, ptr [[CONV1]])
183 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[MYALLOC]], align 8
184 // CHECK-NEXT: [[CONV2:%.*]] = inttoptr i64 [[TMP3]] to ptr
185 // CHECK-NEXT: [[DOTBAR3__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 80, ptr [[CONV2]])
186 // CHECK-NEXT: [[DOTBAR4__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 8, ptr null)
187 // CHECK-NEXT: [[DOTBAR5__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 4, ptr null)
188 // CHECK-NEXT: [[DOTBAR6__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 240, ptr null)
189 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR6__VOID_ADDR]], ptr null)
190 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR5__VOID_ADDR]], ptr null)
191 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR4__VOID_ADDR]], ptr null)
192 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[MYALLOC]], align 8
193 // CHECK-NEXT: [[CONV3:%.*]] = inttoptr i64 [[TMP4]] to ptr
194 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR3__VOID_ADDR]], ptr [[CONV3]])
195 // CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[MYALLOC]], align 8
196 // CHECK-NEXT: [[CONV4:%.*]] = inttoptr i64 [[TMP5]] to ptr
197 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR2__VOID_ADDR]], ptr [[CONV4]])
198 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[MYALLOC]], align 8
199 // CHECK-NEXT: [[CONV5:%.*]] = inttoptr i64 [[TMP6]] to ptr
200 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTBAR1__VOID_ADDR]], ptr [[CONV5]])
201 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO8__VOID_ADDR]], ptr null)
202 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO7__VOID_ADDR]], ptr inttoptr (i64 8 to ptr))
203 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO6__VOID_ADDR]], ptr inttoptr (i64 2 to ptr))
204 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO5__VOID_ADDR]], ptr inttoptr (i64 3 to ptr))
205 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO4__VOID_ADDR]], ptr inttoptr (i64 4 to ptr))
206 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO3__VOID_ADDR]], ptr inttoptr (i64 5 to ptr))
207 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO2__VOID_ADDR]], ptr inttoptr (i64 6 to ptr))
208 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO1__VOID_ADDR]], ptr inttoptr (i64 7 to ptr))
209 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO0__VOID_ADDR]], ptr null)
210 // CHECK-NEXT: ret i32 0
213 // CHECK-LABEL: define {{[^@]+}}@_Z13template_testv
214 // CHECK-SAME: () #[[ATTR2:[0-9]+]] {
215 // CHECK-NEXT: entry:
216 // CHECK-NEXT: [[RESULT:%.*]] = alloca double, align 8
217 // CHECK-NEXT: [[CALL:%.*]] = call noundef double @_Z3runIdLj1000ELj16EET_v()
218 // CHECK-NEXT: store double [[CALL]], ptr [[RESULT]], align 8
219 // CHECK-NEXT: ret i32 0
222 // CHECK-LABEL: define {{[^@]+}}@_Z3runIdLj1000ELj16EET_v
223 // CHECK-SAME: () #[[ATTR2]] comdat {
224 // CHECK-NEXT: entry:
225 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
226 // CHECK-NEXT: [[DOTFOO__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 8000, ptr inttoptr (i64 6 to ptr))
227 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x double], ptr [[DOTFOO__VOID_ADDR]], i64 0, i64 0
228 // CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARRAYIDX]], align 16
229 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTFOO__VOID_ADDR]], ptr inttoptr (i64 6 to ptr))
230 // CHECK-NEXT: ret double [[TMP1]]