2 * kmp_threadprivate.cpp -- OpenMP threadprivate support library
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
17 #define USE_CHECKS_COMMON
19 #define KMP_INLINE_SUBR 1
21 void kmp_threadprivate_insert_private_data(int gtid
, void *pc_addr
,
22 void *data_addr
, size_t pc_size
);
23 struct private_common
*kmp_threadprivate_insert(int gtid
, void *pc_addr
,
27 struct shared_table __kmp_threadprivate_d_table
;
30 #ifdef KMP_INLINE_SUBR
33 struct private_common
*
34 __kmp_threadprivate_find_task_common(struct common_table
*tbl
, int gtid
,
38 struct private_common
*tn
;
40 #ifdef KMP_TASK_COMMON_DEBUG
41 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "
47 for (tn
= tbl
->data
[KMP_HASH(pc_addr
)]; tn
; tn
= tn
->next
) {
48 if (tn
->gbl_addr
== pc_addr
) {
49 #ifdef KMP_TASK_COMMON_DEBUG
50 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "
61 #ifdef KMP_INLINE_SUBR
64 struct shared_common
*
65 __kmp_find_shared_task_common(struct shared_table
*tbl
, int gtid
,
67 struct shared_common
*tn
;
69 for (tn
= tbl
->data
[KMP_HASH(pc_addr
)]; tn
; tn
= tn
->next
) {
70 if (tn
->gbl_addr
== pc_addr
) {
71 #ifdef KMP_TASK_COMMON_DEBUG
74 ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
83 // Create a template for the data initialized storage. Either the template is
84 // NULL indicating zero fill, or the template is a copy of the original data.
85 static struct private_data
*__kmp_init_common_data(void *pc_addr
,
87 struct private_data
*d
;
91 d
= (struct private_data
*)__kmp_allocate(sizeof(struct private_data
));
93 d->data = 0; // AC: commented out because __kmp_allocate zeroes the
102 for (i
= pc_size
; i
> 0; --i
) {
104 d
->data
= __kmp_allocate(pc_size
);
105 KMP_MEMCPY(d
->data
, pc_addr
, pc_size
);
113 // Initialize the data area from the template.
114 static void __kmp_copy_common_data(void *pc_addr
, struct private_data
*d
) {
115 char *addr
= (char *)pc_addr
;
117 for (size_t offset
= 0; d
!= 0; d
= d
->next
) {
118 for (int i
= d
->more
; i
> 0; --i
) {
120 memset(&addr
[offset
], '\0', d
->size
);
122 KMP_MEMCPY(&addr
[offset
], d
->data
, d
->size
);
128 /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
129 void __kmp_common_initialize(void) {
130 if (!TCR_4(__kmp_init_common
)) {
136 __kmp_threadpriv_cache_list
= NULL
;
139 /* verify the uber masters were initialized */
140 for (gtid
= 0; gtid
< __kmp_threads_capacity
; gtid
++)
141 if (__kmp_root
[gtid
]) {
142 KMP_DEBUG_ASSERT(__kmp_root
[gtid
]->r
.r_uber_thread
);
143 for (q
= 0; q
< KMP_HASH_TABLE_SIZE
; ++q
)
145 !__kmp_root
[gtid
]->r
.r_uber_thread
->th
.th_pri_common
->data
[q
]);
146 /* __kmp_root[ gitd ]-> r.r_uber_thread ->
147 * th.th_pri_common -> data[ q ] = 0;*/
149 #endif /* KMP_DEBUG */
151 for (q
= 0; q
< KMP_HASH_TABLE_SIZE
; ++q
)
152 __kmp_threadprivate_d_table
.data
[q
] = 0;
154 TCW_4(__kmp_init_common
, TRUE
);
158 /* Call all destructors for threadprivate data belonging to all threads.
160 void __kmp_common_destroy(void) {
161 if (TCR_4(__kmp_init_common
)) {
164 TCW_4(__kmp_init_common
, FALSE
);
166 for (q
= 0; q
< KMP_HASH_TABLE_SIZE
; ++q
) {
168 struct private_common
*tn
;
169 struct shared_common
*d_tn
;
171 /* C++ destructors need to be called once per thread before exiting.
172 Don't call destructors for primary thread though unless we used copy
175 for (d_tn
= __kmp_threadprivate_d_table
.data
[q
]; d_tn
;
178 if (d_tn
->dt
.dtorv
!= 0) {
179 for (gtid
= 0; gtid
< __kmp_all_nth
; ++gtid
) {
180 if (__kmp_threads
[gtid
]) {
181 if ((__kmp_foreign_tp
) ? (!KMP_INITIAL_GTID(gtid
))
182 : (!KMP_UBER_GTID(gtid
))) {
183 tn
= __kmp_threadprivate_find_task_common(
184 __kmp_threads
[gtid
]->th
.th_pri_common
, gtid
,
187 (*d_tn
->dt
.dtorv
)(tn
->par_addr
, d_tn
->vec_len
);
192 if (d_tn
->obj_init
!= 0) {
193 (*d_tn
->dt
.dtorv
)(d_tn
->obj_init
, d_tn
->vec_len
);
197 if (d_tn
->dt
.dtor
!= 0) {
198 for (gtid
= 0; gtid
< __kmp_all_nth
; ++gtid
) {
199 if (__kmp_threads
[gtid
]) {
200 if ((__kmp_foreign_tp
) ? (!KMP_INITIAL_GTID(gtid
))
201 : (!KMP_UBER_GTID(gtid
))) {
202 tn
= __kmp_threadprivate_find_task_common(
203 __kmp_threads
[gtid
]->th
.th_pri_common
, gtid
,
206 (*d_tn
->dt
.dtor
)(tn
->par_addr
);
211 if (d_tn
->obj_init
!= 0) {
212 (*d_tn
->dt
.dtor
)(d_tn
->obj_init
);
217 __kmp_threadprivate_d_table
.data
[q
] = 0;
222 /* Call all destructors for threadprivate data belonging to this thread */
223 void __kmp_common_destroy_gtid(int gtid
) {
224 struct private_common
*tn
;
225 struct shared_common
*d_tn
;
227 if (!TCR_4(__kmp_init_gtid
)) {
228 // This is possible when one of multiple roots initiates early library
229 // termination in a sequential region while other teams are active, and its
230 // child threads are about to end.
234 KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid
));
235 if ((__kmp_foreign_tp
) ? (!KMP_INITIAL_GTID(gtid
)) : (!KMP_UBER_GTID(gtid
))) {
237 if (TCR_4(__kmp_init_common
)) {
239 /* Cannot do this here since not all threads have destroyed their data */
240 /* TCW_4(__kmp_init_common, FALSE); */
242 for (tn
= __kmp_threads
[gtid
]->th
.th_pri_head
; tn
; tn
= tn
->link
) {
244 d_tn
= __kmp_find_shared_task_common(&__kmp_threadprivate_d_table
, gtid
,
249 if (d_tn
->dt
.dtorv
!= 0) {
250 (void)(*d_tn
->dt
.dtorv
)(tn
->par_addr
, d_tn
->vec_len
);
252 if (d_tn
->obj_init
!= 0) {
253 (void)(*d_tn
->dt
.dtorv
)(d_tn
->obj_init
, d_tn
->vec_len
);
256 if (d_tn
->dt
.dtor
!= 0) {
257 (void)(*d_tn
->dt
.dtor
)(tn
->par_addr
);
259 if (d_tn
->obj_init
!= 0) {
260 (void)(*d_tn
->dt
.dtor
)(d_tn
->obj_init
);
264 KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "
271 #ifdef KMP_TASK_COMMON_DEBUG
272 static void dump_list(void) {
275 for (p
= 0; p
< __kmp_all_nth
; ++p
) {
276 if (!__kmp_threads
[p
])
278 for (q
= 0; q
< KMP_HASH_TABLE_SIZE
; ++q
) {
279 if (__kmp_threads
[p
]->th
.th_pri_common
->data
[q
]) {
280 struct private_common
*tn
;
282 KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p
));
284 for (tn
= __kmp_threads
[p
]->th
.th_pri_common
->data
[q
]; tn
;
287 ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
288 tn
->gbl_addr
, tn
->par_addr
));
294 #endif /* KMP_TASK_COMMON_DEBUG */
296 // NOTE: this routine is to be called only from the serial part of the program.
297 void kmp_threadprivate_insert_private_data(int gtid
, void *pc_addr
,
298 void *data_addr
, size_t pc_size
) {
299 struct shared_common
**lnk_tn
, *d_tn
;
300 KMP_DEBUG_ASSERT(__kmp_threads
[gtid
] &&
301 __kmp_threads
[gtid
]->th
.th_root
->r
.r_active
== 0);
303 d_tn
= __kmp_find_shared_task_common(&__kmp_threadprivate_d_table
, gtid
,
307 d_tn
= (struct shared_common
*)__kmp_allocate(sizeof(struct shared_common
));
309 d_tn
->gbl_addr
= pc_addr
;
310 d_tn
->pod_init
= __kmp_init_common_data(data_addr
, pc_size
);
312 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
315 d_tn->cct.cctor = 0;;
317 d_tn->is_vec = FALSE;
320 d_tn
->cmn_size
= pc_size
;
322 __kmp_acquire_lock(&__kmp_global_lock
, gtid
);
324 lnk_tn
= &(__kmp_threadprivate_d_table
.data
[KMP_HASH(pc_addr
)]);
326 d_tn
->next
= *lnk_tn
;
329 __kmp_release_lock(&__kmp_global_lock
, gtid
);
333 struct private_common
*kmp_threadprivate_insert(int gtid
, void *pc_addr
,
336 struct private_common
*tn
, **tt
;
337 struct shared_common
*d_tn
;
339 /* +++++++++ START OF CRITICAL SECTION +++++++++ */
340 __kmp_acquire_lock(&__kmp_global_lock
, gtid
);
342 tn
= (struct private_common
*)__kmp_allocate(sizeof(struct private_common
));
344 tn
->gbl_addr
= pc_addr
;
346 d_tn
= __kmp_find_shared_task_common(
347 &__kmp_threadprivate_d_table
, gtid
,
348 pc_addr
); /* Only the MASTER data table exists. */
351 /* This threadprivate variable has already been seen. */
353 if (d_tn
->pod_init
== 0 && d_tn
->obj_init
== 0) {
354 d_tn
->cmn_size
= pc_size
;
357 if (d_tn
->ct
.ctorv
!= 0) {
358 /* Construct from scratch so no prototype exists */
360 } else if (d_tn
->cct
.cctorv
!= 0) {
361 /* Now data initialize the prototype since it was previously
363 d_tn
->obj_init
= (void *)__kmp_allocate(d_tn
->cmn_size
);
364 (void)(*d_tn
->cct
.cctorv
)(d_tn
->obj_init
, pc_addr
, d_tn
->vec_len
);
366 d_tn
->pod_init
= __kmp_init_common_data(data_addr
, d_tn
->cmn_size
);
369 if (d_tn
->ct
.ctor
!= 0) {
370 /* Construct from scratch so no prototype exists */
372 } else if (d_tn
->cct
.cctor
!= 0) {
373 /* Now data initialize the prototype since it was previously
375 d_tn
->obj_init
= (void *)__kmp_allocate(d_tn
->cmn_size
);
376 (void)(*d_tn
->cct
.cctor
)(d_tn
->obj_init
, pc_addr
);
378 d_tn
->pod_init
= __kmp_init_common_data(data_addr
, d_tn
->cmn_size
);
383 struct shared_common
**lnk_tn
;
385 d_tn
= (struct shared_common
*)__kmp_allocate(sizeof(struct shared_common
));
386 d_tn
->gbl_addr
= pc_addr
;
387 d_tn
->cmn_size
= pc_size
;
388 d_tn
->pod_init
= __kmp_init_common_data(data_addr
, pc_size
);
390 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
395 d_tn->is_vec = FALSE;
398 lnk_tn
= &(__kmp_threadprivate_d_table
.data
[KMP_HASH(pc_addr
)]);
400 d_tn
->next
= *lnk_tn
;
404 tn
->cmn_size
= d_tn
->cmn_size
;
406 if ((__kmp_foreign_tp
) ? (KMP_INITIAL_GTID(gtid
)) : (KMP_UBER_GTID(gtid
))) {
407 tn
->par_addr
= (void *)pc_addr
;
409 tn
->par_addr
= (void *)__kmp_allocate(tn
->cmn_size
);
412 __kmp_release_lock(&__kmp_global_lock
, gtid
);
413 /* +++++++++ END OF CRITICAL SECTION +++++++++ */
415 #ifdef USE_CHECKS_COMMON
416 if (pc_size
> d_tn
->cmn_size
) {
418 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
419 " ,%" KMP_UINTPTR_SPEC
")\n",
420 pc_addr
, pc_size
, d_tn
->cmn_size
));
421 KMP_FATAL(TPCommonBlocksInconsist
);
423 #endif /* USE_CHECKS_COMMON */
425 tt
= &(__kmp_threads
[gtid
]->th
.th_pri_common
->data
[KMP_HASH(pc_addr
)]);
427 #ifdef KMP_TASK_COMMON_DEBUG
431 ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
438 #ifdef KMP_TASK_COMMON_DEBUG
440 ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
445 /* Link the node into a simple list */
447 tn
->link
= __kmp_threads
[gtid
]->th
.th_pri_head
;
448 __kmp_threads
[gtid
]->th
.th_pri_head
= tn
;
450 if ((__kmp_foreign_tp
) ? (KMP_INITIAL_GTID(gtid
)) : (KMP_UBER_GTID(gtid
)))
453 /* if C++ object with copy constructor, use it;
454 * else if C++ object with constructor, use it for the non-primary thread
456 * else use pod_init and memcpy
458 * C++ constructors need to be called once for each non-primary thread on
460 * C++ copy constructors need to be called once for each thread on allocate */
462 /* C++ object with constructors/destructors; don't call constructors for
463 primary thread though */
465 if (d_tn
->ct
.ctorv
!= 0) {
466 (void)(*d_tn
->ct
.ctorv
)(tn
->par_addr
, d_tn
->vec_len
);
467 } else if (d_tn
->cct
.cctorv
!= 0) {
468 (void)(*d_tn
->cct
.cctorv
)(tn
->par_addr
, d_tn
->obj_init
, d_tn
->vec_len
);
469 } else if (tn
->par_addr
!= tn
->gbl_addr
) {
470 __kmp_copy_common_data(tn
->par_addr
, d_tn
->pod_init
);
473 if (d_tn
->ct
.ctor
!= 0) {
474 (void)(*d_tn
->ct
.ctor
)(tn
->par_addr
);
475 } else if (d_tn
->cct
.cctor
!= 0) {
476 (void)(*d_tn
->cct
.cctor
)(tn
->par_addr
, d_tn
->obj_init
);
477 } else if (tn
->par_addr
!= tn
->gbl_addr
) {
478 __kmp_copy_common_data(tn
->par_addr
, d_tn
->pod_init
);
482 if (tn->par_addr != tn->gbl_addr)
483 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
488 /* ------------------------------------------------------------------------ */
489 /* We are currently parallel, and we know the thread id. */
490 /* ------------------------------------------------------------------------ */
493 @ingroup THREADPRIVATE
495 @param loc source location information
496 @param data pointer to data being privatized
497 @param ctor pointer to constructor function for data
498 @param cctor pointer to copy constructor function for data
499 @param dtor pointer to destructor function for data
501 Register constructors and destructors for thread private data.
502 This function is called when executing in parallel, when we know the thread id.
504 void __kmpc_threadprivate_register(ident_t
*loc
, void *data
, kmpc_ctor ctor
,
505 kmpc_cctor cctor
, kmpc_dtor dtor
) {
506 struct shared_common
*d_tn
, **lnk_tn
;
508 KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"));
510 #ifdef USE_CHECKS_COMMON
511 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
512 KMP_ASSERT(cctor
== 0);
513 #endif /* USE_CHECKS_COMMON */
515 /* Only the global data table exists. */
516 d_tn
= __kmp_find_shared_task_common(&__kmp_threadprivate_d_table
, -1, data
);
519 d_tn
= (struct shared_common
*)__kmp_allocate(sizeof(struct shared_common
));
520 d_tn
->gbl_addr
= data
;
522 d_tn
->ct
.ctor
= ctor
;
523 d_tn
->cct
.cctor
= cctor
;
524 d_tn
->dt
.dtor
= dtor
;
526 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate
532 lnk_tn
= &(__kmp_threadprivate_d_table
.data
[KMP_HASH(data
)]);
534 d_tn
->next
= *lnk_tn
;
539 void *__kmpc_threadprivate(ident_t
*loc
, kmp_int32 global_tid
, void *data
,
542 struct private_common
*tn
;
544 KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid
));
546 #ifdef USE_CHECKS_COMMON
547 if (!__kmp_init_serial
)
548 KMP_FATAL(RTLNotInitialized
);
549 #endif /* USE_CHECKS_COMMON */
551 if (!__kmp_threads
[global_tid
]->th
.th_root
->r
.r_active
&& !__kmp_foreign_tp
) {
552 /* The parallel address will NEVER overlap with the data_address */
553 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the
554 * data_address; use data_address = data */
556 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",
558 kmp_threadprivate_insert_private_data(global_tid
, data
, data
, size
);
564 ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
566 tn
= __kmp_threadprivate_find_task_common(
567 __kmp_threads
[global_tid
]->th
.th_pri_common
, global_tid
, data
);
570 KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid
));
571 #ifdef USE_CHECKS_COMMON
572 if ((size_t)size
> tn
->cmn_size
) {
573 KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
574 " ,%" KMP_UINTPTR_SPEC
")\n",
575 data
, size
, tn
->cmn_size
));
576 KMP_FATAL(TPCommonBlocksInconsist
);
578 #endif /* USE_CHECKS_COMMON */
580 /* The parallel address will NEVER overlap with the data_address */
581 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use
582 * data_address = data */
583 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid
));
584 tn
= kmp_threadprivate_insert(global_tid
, data
, data
, size
);
589 KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
595 static kmp_cached_addr_t
*__kmp_find_cache(void *data
) {
596 kmp_cached_addr_t
*ptr
= __kmp_threadpriv_cache_list
;
597 while (ptr
&& ptr
->data
!= data
)
603 @ingroup THREADPRIVATE
604 @param loc source location information
605 @param global_tid global thread number
606 @param data pointer to data to privatize
607 @param size size of data to privatize
608 @param cache pointer to cache
609 @return pointer to private storage
611 Allocate private storage for threadprivate data.
614 __kmpc_threadprivate_cached(ident_t
*loc
,
615 kmp_int32 global_tid
, // gtid.
616 void *data
, // Pointer to original global variable.
617 size_t size
, // Size of original global variable.
619 KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "
620 "address: %p, size: %" KMP_SIZE_T_SPEC
"\n",
621 global_tid
, *cache
, data
, size
));
623 if (TCR_PTR(*cache
) == 0) {
624 __kmp_acquire_lock(&__kmp_global_lock
, global_tid
);
626 if (TCR_PTR(*cache
) == 0) {
627 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock
);
628 // Compiler often passes in NULL cache, even if it's already been created
630 kmp_cached_addr_t
*tp_cache_addr
;
631 // Look for an existing cache
632 tp_cache_addr
= __kmp_find_cache(data
);
633 if (!tp_cache_addr
) { // Cache was never created; do it now
635 KMP_ITT_IGNORE(my_cache
= (void **)__kmp_allocate(
636 sizeof(void *) * __kmp_tp_capacity
+
637 sizeof(kmp_cached_addr_t
)););
638 // No need to zero the allocated memory; __kmp_allocate does that.
639 KC_TRACE(50, ("__kmpc_threadprivate_cached: T#%d allocated cache at "
641 global_tid
, my_cache
));
642 /* TODO: free all this memory in __kmp_common_destroy using
643 * __kmp_threadpriv_cache_list */
644 /* Add address of mycache to linked list for cleanup later */
645 tp_cache_addr
= (kmp_cached_addr_t
*)&my_cache
[__kmp_tp_capacity
];
646 tp_cache_addr
->addr
= my_cache
;
647 tp_cache_addr
->data
= data
;
648 tp_cache_addr
->compiler_cache
= cache
;
649 tp_cache_addr
->next
= __kmp_threadpriv_cache_list
;
650 __kmp_threadpriv_cache_list
= tp_cache_addr
;
651 } else { // A cache was already created; use it
652 my_cache
= tp_cache_addr
->addr
;
653 tp_cache_addr
->compiler_cache
= cache
;
657 TCW_PTR(*cache
, my_cache
);
658 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock
);
662 __kmp_release_lock(&__kmp_global_lock
, global_tid
);
666 if ((ret
= TCR_PTR((*cache
)[global_tid
])) == 0) {
667 ret
= __kmpc_threadprivate(loc
, global_tid
, data
, (size_t)size
);
669 TCW_PTR((*cache
)[global_tid
], ret
);
672 ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
677 // This function should only be called when both __kmp_tp_cached_lock and
678 // kmp_forkjoin_lock are held.
679 void __kmp_threadprivate_resize_cache(int newCapacity
) {
680 KC_TRACE(10, ("__kmp_threadprivate_resize_cache: called with size: %d\n",
683 kmp_cached_addr_t
*ptr
= __kmp_threadpriv_cache_list
;
686 if (ptr
->data
) { // this location has an active cache; resize it
688 KMP_ITT_IGNORE(my_cache
=
689 (void **)__kmp_allocate(sizeof(void *) * newCapacity
+
690 sizeof(kmp_cached_addr_t
)););
691 // No need to zero the allocated memory; __kmp_allocate does that.
692 KC_TRACE(50, ("__kmp_threadprivate_resize_cache: allocated cache at %p\n",
694 // Now copy old cache into new cache
695 void **old_cache
= ptr
->addr
;
696 for (int i
= 0; i
< __kmp_tp_capacity
; ++i
) {
697 my_cache
[i
] = old_cache
[i
];
700 // Add address of new my_cache to linked list for cleanup later
701 kmp_cached_addr_t
*tp_cache_addr
;
702 tp_cache_addr
= (kmp_cached_addr_t
*)&my_cache
[newCapacity
];
703 tp_cache_addr
->addr
= my_cache
;
704 tp_cache_addr
->data
= ptr
->data
;
705 tp_cache_addr
->compiler_cache
= ptr
->compiler_cache
;
706 tp_cache_addr
->next
= __kmp_threadpriv_cache_list
;
707 __kmp_threadpriv_cache_list
= tp_cache_addr
;
709 // Copy new cache to compiler's location: We can copy directly
710 // to (*compiler_cache) if compiler guarantees it will keep
711 // using the same location for the cache. This is not yet true
712 // for some compilers, in which case we have to check if
713 // compiler_cache is still pointing at old cache, and if so, we
714 // can point it at the new cache with an atomic compare&swap
715 // operation. (Old method will always work, but we should shift
716 // to new method (commented line below) when Intel and Clang
717 // compilers use new method.)
718 (void)KMP_COMPARE_AND_STORE_PTR(tp_cache_addr
->compiler_cache
, old_cache
,
720 // TCW_PTR(*(tp_cache_addr->compiler_cache), my_cache);
722 // If the store doesn't happen here, the compiler's old behavior will
723 // inevitably call __kmpc_threadprivate_cache with a new location for the
724 // cache, and that function will store the resized cache there at that
727 // Nullify old cache's data pointer so we skip it next time
732 // After all caches are resized, update __kmp_tp_capacity to the new size
733 *(volatile int *)&__kmp_tp_capacity
= newCapacity
;
737 @ingroup THREADPRIVATE
738 @param loc source location information
739 @param data pointer to data being privatized
740 @param ctor pointer to constructor function for data
741 @param cctor pointer to copy constructor function for data
742 @param dtor pointer to destructor function for data
743 @param vector_length length of the vector (bytes or elements?)
744 Register vector constructors and destructors for thread private data.
746 void __kmpc_threadprivate_register_vec(ident_t
*loc
, void *data
,
747 kmpc_ctor_vec ctor
, kmpc_cctor_vec cctor
,
749 size_t vector_length
) {
750 struct shared_common
*d_tn
, **lnk_tn
;
752 KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"));
754 #ifdef USE_CHECKS_COMMON
755 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
756 KMP_ASSERT(cctor
== 0);
757 #endif /* USE_CHECKS_COMMON */
759 d_tn
= __kmp_find_shared_task_common(
760 &__kmp_threadprivate_d_table
, -1,
761 data
); /* Only the global data table exists. */
764 d_tn
= (struct shared_common
*)__kmp_allocate(sizeof(struct shared_common
));
765 d_tn
->gbl_addr
= data
;
767 d_tn
->ct
.ctorv
= ctor
;
768 d_tn
->cct
.cctorv
= cctor
;
769 d_tn
->dt
.dtorv
= dtor
;
771 d_tn
->vec_len
= (size_t)vector_length
;
772 // d_tn->obj_init = 0; // AC: __kmp_allocate zeroes the memory
773 // d_tn->pod_init = 0;
774 lnk_tn
= &(__kmp_threadprivate_d_table
.data
[KMP_HASH(data
)]);
776 d_tn
->next
= *lnk_tn
;
781 void __kmp_cleanup_threadprivate_caches() {
782 kmp_cached_addr_t
*ptr
= __kmp_threadpriv_cache_list
;
785 void **cache
= ptr
->addr
;
786 __kmp_threadpriv_cache_list
= ptr
->next
;
787 if (*ptr
->compiler_cache
)
788 *ptr
->compiler_cache
= NULL
;
789 ptr
->compiler_cache
= NULL
;
793 // Threadprivate data pointed at by cache entries are destroyed at end of
794 // __kmp_launch_thread with __kmp_common_destroy_gtid.
795 __kmp_free(cache
); // implicitly frees ptr too
796 ptr
= __kmp_threadpriv_cache_list
;