2 * kmp_error.cpp -- KPTS functions for error checking at runtime
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
14 #include "kmp_error.h"
18 /* ------------------------------------------------------------------------ */
22 static char const *cons_text_c
[] = {
25 "work-sharing", /* this is not called "for"
26 because of lowering of
28 "\"ordered\" work-sharing", /* this is not called "for ordered" because of
29 lowering of "sections" pragmas */
31 "work-sharing", /* this is not called "single" because of lowering of
34 "\"ordered\"", /* in PARALLEL */
35 "\"ordered\"", /* in PDO */
41 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
43 #define PUSH_MSG(ct, ident) \
44 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
46 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
47 get_src((p)->stack_data[tos].ident)
49 static int const cons_text_c_num
= sizeof(cons_text_c
) / sizeof(char const *);
51 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
53 static void __kmp_check_null_func(void) { /* nothing to do */
56 static void __kmp_expand_cons_stack(int gtid
, struct cons_header
*p
) {
60 /* TODO for monitor perhaps? */
62 __kmp_check_null_func();
64 KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid
, __kmp_get_gtid()));
68 p
->stack_size
= (p
->stack_size
* 2) + 100;
70 /* TODO free the old data */
71 p
->stack_data
= (struct cons_data
*)__kmp_allocate(sizeof(struct cons_data
) *
74 for (i
= p
->stack_top
; i
>= 0; --i
)
75 p
->stack_data
[i
] = d
[i
];
77 /* NOTE: we do not free the old stack_data */
80 // NOTE: Function returns allocated memory, caller must free it!
81 static char *__kmp_pragma(int ct
, ident_t
const *ident
) {
82 char const *cons
= NULL
; // Construct name.
83 char *file
= NULL
; // File name.
84 char *func
= NULL
; // Function (routine) name.
85 char *line
= NULL
; // Line number.
88 __kmp_str_buf_init(&buffer
);
89 if (0 < ct
&& ct
< cons_text_c_num
) {
90 cons
= cons_text_c
[ct
];
94 if (ident
!= NULL
&& ident
->psource
!= NULL
) {
96 __kmp_str_buf_print(&buffer
, "%s",
97 ident
->psource
); // Copy source to buffer.
98 // Split string in buffer to file, func, and line.
100 __kmp_str_split(tail
, ';', NULL
, &tail
);
101 __kmp_str_split(tail
, ';', &file
, &tail
);
102 __kmp_str_split(tail
, ';', &func
, &tail
);
103 __kmp_str_split(tail
, ';', &line
, &tail
);
105 prgm
= __kmp_msg_format(kmp_i18n_fmt_Pragma
, cons
, file
, func
, line
);
106 __kmp_str_buf_free(&buffer
);
110 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
112 void __kmp_error_construct(kmp_i18n_id_t id
, // Message identifier.
113 enum cons_type ct
, // Construct type.
114 ident_t
const *ident
// Construct ident.
116 char *construct
= __kmp_pragma(ct
, ident
);
117 __kmp_fatal(__kmp_msg_format(id
, construct
), __kmp_msg_null
);
118 KMP_INTERNAL_FREE(construct
);
121 void __kmp_error_construct2(kmp_i18n_id_t id
, // Message identifier.
122 enum cons_type ct
, // First construct type.
123 ident_t
const *ident
, // First construct ident.
124 struct cons_data
const *cons
// Second construct.
126 char *construct1
= __kmp_pragma(ct
, ident
);
127 char *construct2
= __kmp_pragma(cons
->type
, cons
->ident
);
128 __kmp_fatal(__kmp_msg_format(id
, construct1
, construct2
), __kmp_msg_null
);
129 KMP_INTERNAL_FREE(construct1
);
130 KMP_INTERNAL_FREE(construct2
);
133 struct cons_header
*__kmp_allocate_cons_stack(int gtid
) {
134 struct cons_header
*p
;
136 /* TODO for monitor perhaps? */
138 __kmp_check_null_func();
140 KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid
));
141 p
= (struct cons_header
*)__kmp_allocate(sizeof(struct cons_header
));
142 p
->p_top
= p
->w_top
= p
->s_top
= 0;
143 p
->stack_data
= (struct cons_data
*)__kmp_allocate(sizeof(struct cons_data
) *
145 p
->stack_size
= MIN_STACK
;
147 p
->stack_data
[0].type
= ct_none
;
148 p
->stack_data
[0].prev
= 0;
149 p
->stack_data
[0].ident
= NULL
;
153 void __kmp_free_cons_stack(void *ptr
) {
154 struct cons_header
*p
= (struct cons_header
*)ptr
;
156 if (p
->stack_data
!= NULL
) {
157 __kmp_free(p
->stack_data
);
158 p
->stack_data
= NULL
;
165 static void dump_cons_stack(int gtid
, struct cons_header
*p
) {
167 int tos
= p
->stack_top
;
168 kmp_str_buf_t buffer
;
169 __kmp_str_buf_init(&buffer
);
172 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
173 __kmp_str_buf_print(&buffer
,
174 "Begin construct stack with %d items for thread %d\n",
176 __kmp_str_buf_print(&buffer
, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos
,
177 p
->p_top
, p
->w_top
, p
->s_top
);
178 for (i
= tos
; i
> 0; i
--) {
179 struct cons_data
*c
= &(p
->stack_data
[i
]);
181 &buffer
, " stack_data[%2d] = { %s (%s) %d %p }\n", i
,
182 cons_text_c
[c
->type
], get_src(c
->ident
), c
->prev
, c
->name
);
184 __kmp_str_buf_print(&buffer
, "End construct stack for thread %d\n", gtid
);
187 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
188 __kmp_debug_printf("%s", buffer
.str
);
189 __kmp_str_buf_free(&buffer
);
193 void __kmp_push_parallel(int gtid
, ident_t
const *ident
) {
195 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
197 KMP_DEBUG_ASSERT(__kmp_threads
[gtid
]->th
.th_cons
);
198 KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid
, __kmp_get_gtid()));
199 KE_TRACE(100, (PUSH_MSG(ct_parallel
, ident
)));
200 if (p
->stack_top
>= p
->stack_size
) {
201 __kmp_expand_cons_stack(gtid
, p
);
203 tos
= ++p
->stack_top
;
204 p
->stack_data
[tos
].type
= ct_parallel
;
205 p
->stack_data
[tos
].prev
= p
->p_top
;
206 p
->stack_data
[tos
].ident
= ident
;
207 p
->stack_data
[tos
].name
= NULL
;
209 KE_DUMP(1000, dump_cons_stack(gtid
, p
));
212 void __kmp_check_workshare(int gtid
, enum cons_type ct
, ident_t
const *ident
) {
213 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
215 KMP_DEBUG_ASSERT(__kmp_threads
[gtid
]->th
.th_cons
);
216 KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid
, __kmp_get_gtid()));
218 if (p
->stack_top
>= p
->stack_size
) {
219 __kmp_expand_cons_stack(gtid
, p
);
221 if (p
->w_top
> p
->p_top
) {
222 // We are already in a WORKSHARE construct for this PARALLEL region.
223 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
224 &p
->stack_data
[p
->w_top
]);
226 if (p
->s_top
> p
->p_top
) {
227 // We are already in a SYNC construct for this PARALLEL region.
228 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
229 &p
->stack_data
[p
->s_top
]);
233 void __kmp_push_workshare(int gtid
, enum cons_type ct
, ident_t
const *ident
) {
235 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
236 KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid
, __kmp_get_gtid()));
237 __kmp_check_workshare(gtid
, ct
, ident
);
238 KE_TRACE(100, (PUSH_MSG(ct
, ident
)));
239 tos
= ++p
->stack_top
;
240 p
->stack_data
[tos
].type
= ct
;
241 p
->stack_data
[tos
].prev
= p
->w_top
;
242 p
->stack_data
[tos
].ident
= ident
;
243 p
->stack_data
[tos
].name
= NULL
;
245 KE_DUMP(1000, dump_cons_stack(gtid
, p
));
249 #if KMP_USE_DYNAMIC_LOCK
250 __kmp_check_sync( int gtid
, enum cons_type ct
, ident_t
const * ident
, kmp_user_lock_p lck
, kmp_uint32 seq
)
252 __kmp_check_sync( int gtid
, enum cons_type ct
, ident_t
const * ident
, kmp_user_lock_p lck
)
255 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
257 KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
259 if (p
->stack_top
>= p
->stack_size
)
260 __kmp_expand_cons_stack(gtid
, p
);
262 if (ct
== ct_ordered_in_parallel
|| ct
== ct_ordered_in_pdo
) {
263 if (p
->w_top
<= p
->p_top
) {
264 /* we are not in a worksharing construct */
265 #ifdef BUILD_PARALLEL_ORDERED
266 /* do not report error messages for PARALLEL ORDERED */
267 KMP_ASSERT(ct
== ct_ordered_in_parallel
);
269 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing
, ct
, ident
);
270 #endif /* BUILD_PARALLEL_ORDERED */
272 /* inside a WORKSHARING construct for this PARALLEL region */
273 if (!IS_CONS_TYPE_ORDERED(p
->stack_data
[p
->w_top
].type
)) {
274 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause
, ct
, ident
,
275 &p
->stack_data
[p
->w_top
]);
278 if (p
->s_top
> p
->p_top
&& p
->s_top
> p
->w_top
) {
279 /* inside a sync construct which is inside a worksharing construct */
280 int index
= p
->s_top
;
281 enum cons_type stack_type
;
283 stack_type
= p
->stack_data
[index
].type
;
285 if (stack_type
== ct_critical
||
286 ((stack_type
== ct_ordered_in_parallel
||
287 stack_type
== ct_ordered_in_pdo
) &&
288 /* C doesn't allow named ordered; ordered in ordered gets error */
289 p
->stack_data
[index
].ident
!= NULL
&&
290 (p
->stack_data
[index
].ident
->flags
& KMP_IDENT_KMPC
))) {
291 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
292 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
293 &p
->stack_data
[index
]);
296 } else if (ct
== ct_critical
) {
297 #if KMP_USE_DYNAMIC_LOCK
299 __kmp_get_user_lock_owner(lck
, seq
) ==
300 gtid
) { /* this thread already has lock for this critical section */
303 __kmp_get_user_lock_owner(lck
) ==
304 gtid
) { /* this thread already has lock for this critical section */
306 int index
= p
->s_top
;
307 struct cons_data cons
= {NULL
, ct_critical
, 0, NULL
};
308 /* walk up construct stack and try to find critical with matching name */
309 while (index
!= 0 && p
->stack_data
[index
].name
!= lck
) {
310 index
= p
->stack_data
[index
].prev
;
313 /* found match on the stack (may not always because of interleaved
314 * critical for Fortran) */
315 cons
= p
->stack_data
[index
];
317 /* we are in CRITICAL which is inside a CRITICAL construct of same name */
318 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName
, ct
, ident
, &cons
);
320 } else if (ct
== ct_master
|| ct
== ct_masked
|| ct
== ct_reduce
) {
321 if (p
->w_top
> p
->p_top
) {
322 /* inside a WORKSHARING construct for this PARALLEL region */
323 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
324 &p
->stack_data
[p
->w_top
]);
326 if (ct
== ct_reduce
&& p
->s_top
> p
->p_top
) {
327 /* inside a another SYNC construct for this PARALLEL region */
328 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
329 &p
->stack_data
[p
->s_top
]);
335 #if KMP_USE_DYNAMIC_LOCK
336 __kmp_push_sync( int gtid
, enum cons_type ct
, ident_t
const * ident
, kmp_user_lock_p lck
, kmp_uint32 seq
)
338 __kmp_push_sync( int gtid
, enum cons_type ct
, ident_t
const * ident
, kmp_user_lock_p lck
)
342 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
344 KMP_ASSERT(gtid
== __kmp_get_gtid());
345 KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid
));
346 #if KMP_USE_DYNAMIC_LOCK
347 __kmp_check_sync(gtid
, ct
, ident
, lck
, seq
);
349 __kmp_check_sync(gtid
, ct
, ident
, lck
);
351 KE_TRACE(100, (PUSH_MSG(ct
, ident
)));
352 tos
= ++p
->stack_top
;
353 p
->stack_data
[tos
].type
= ct
;
354 p
->stack_data
[tos
].prev
= p
->s_top
;
355 p
->stack_data
[tos
].ident
= ident
;
356 p
->stack_data
[tos
].name
= lck
;
358 KE_DUMP(1000, dump_cons_stack(gtid
, p
));
361 /* ------------------------------------------------------------------------ */
363 void __kmp_pop_parallel(int gtid
, ident_t
const *ident
) {
365 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
367 KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid
, __kmp_get_gtid()));
368 if (tos
== 0 || p
->p_top
== 0) {
369 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd
, ct_parallel
, ident
);
371 if (tos
!= p
->p_top
|| p
->stack_data
[tos
].type
!= ct_parallel
) {
372 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd
, ct_parallel
, ident
,
373 &p
->stack_data
[tos
]);
375 KE_TRACE(100, (POP_MSG(p
)));
376 p
->p_top
= p
->stack_data
[tos
].prev
;
377 p
->stack_data
[tos
].type
= ct_none
;
378 p
->stack_data
[tos
].ident
= NULL
;
379 p
->stack_top
= tos
- 1;
380 KE_DUMP(1000, dump_cons_stack(gtid
, p
));
383 enum cons_type
__kmp_pop_workshare(int gtid
, enum cons_type ct
,
384 ident_t
const *ident
) {
386 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
389 KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid
, __kmp_get_gtid()));
390 if (tos
== 0 || p
->w_top
== 0) {
391 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd
, ct
, ident
);
394 if (tos
!= p
->w_top
||
395 (p
->stack_data
[tos
].type
!= ct
&&
396 // below is the exception to the rule that construct types must match
397 !(p
->stack_data
[tos
].type
== ct_pdo_ordered
&& ct
== ct_pdo
))) {
398 __kmp_check_null_func();
399 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd
, ct
, ident
,
400 &p
->stack_data
[tos
]);
402 KE_TRACE(100, (POP_MSG(p
)));
403 p
->w_top
= p
->stack_data
[tos
].prev
;
404 p
->stack_data
[tos
].type
= ct_none
;
405 p
->stack_data
[tos
].ident
= NULL
;
406 p
->stack_top
= tos
- 1;
407 KE_DUMP(1000, dump_cons_stack(gtid
, p
));
408 return p
->stack_data
[p
->w_top
].type
;
411 void __kmp_pop_sync(int gtid
, enum cons_type ct
, ident_t
const *ident
) {
413 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
415 KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid
, __kmp_get_gtid()));
416 if (tos
== 0 || p
->s_top
== 0) {
417 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd
, ct
, ident
);
419 if (tos
!= p
->s_top
|| p
->stack_data
[tos
].type
!= ct
) {
420 __kmp_check_null_func();
421 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd
, ct
, ident
,
422 &p
->stack_data
[tos
]);
424 KE_TRACE(100, (POP_MSG(p
)));
425 p
->s_top
= p
->stack_data
[tos
].prev
;
426 p
->stack_data
[tos
].type
= ct_none
;
427 p
->stack_data
[tos
].ident
= NULL
;
428 p
->stack_top
= tos
- 1;
429 KE_DUMP(1000, dump_cons_stack(gtid
, p
));
432 /* ------------------------------------------------------------------------ */
434 void __kmp_check_barrier(int gtid
, enum cons_type ct
, ident_t
const *ident
) {
435 struct cons_header
*p
= __kmp_threads
[gtid
]->th
.th_cons
;
436 KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident
, gtid
,
439 __kmp_check_null_func();
441 if (p
->w_top
> p
->p_top
) {
442 /* we are already in a WORKSHARING construct for this PARALLEL region */
443 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
444 &p
->stack_data
[p
->w_top
]);
446 if (p
->s_top
> p
->p_top
) {
447 /* we are already in a SYNC construct for this PARALLEL region */
448 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting
, ct
, ident
,
449 &p
->stack_data
[p
->s_top
]);