1 /* Copyright (C) 2020-2024 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains wrappers for the system allocation routines. Most
27 places in the OpenMP API do not make any provision for failure, so in
28 general we cannot allow memory allocation to fail. */
34 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
38 /* Keeping track whether a Fortran scalar allocatable/pointer has been
39 allocated via 'omp allocators'/'omp allocate'. */
41 struct fort_alloc_splay_tree_key_s
{
45 typedef struct fort_alloc_splay_tree_node_s
*fort_alloc_splay_tree_node
;
46 typedef struct fort_alloc_splay_tree_s
*fort_alloc_splay_tree
;
47 typedef struct fort_alloc_splay_tree_key_s
*fort_alloc_splay_tree_key
;
50 fort_alloc_splay_compare (fort_alloc_splay_tree_key x
, fort_alloc_splay_tree_key y
)
58 #define splay_tree_prefix fort_alloc
59 #define splay_tree_static
60 #include "splay-tree.h"
62 #define splay_tree_prefix fort_alloc
63 #define splay_tree_static
65 #include "splay-tree.h"
67 static struct fort_alloc_splay_tree_s fort_alloc_scalars
;
69 /* Add pointer as being alloced by GOMP_alloc. */
71 GOMP_add_alloc (void *ptr
)
75 fort_alloc_splay_tree_node item
;
76 item
= gomp_malloc (sizeof (struct splay_tree_node_s
));
80 fort_alloc_splay_tree_insert (&fort_alloc_scalars
, item
);
83 /* Remove pointer, either called by FREE or by REALLOC,
84 either of them can change the allocation status. */
86 GOMP_is_alloc (void *ptr
)
88 struct fort_alloc_splay_tree_key_s needle
;
89 fort_alloc_splay_tree_node n
;
91 n
= fort_alloc_splay_tree_lookup_node (&fort_alloc_scalars
, &needle
);
94 fort_alloc_splay_tree_remove (&fort_alloc_scalars
, &n
->key
);
101 #define omp_max_predefined_alloc omp_thread_mem_alloc
102 #define ompx_gnu_min_predefined_alloc ompx_gnu_pinned_mem_alloc
103 #define ompx_gnu_max_predefined_alloc ompx_gnu_pinned_mem_alloc
105 /* These macros may be overridden in config/<target>/allocator.c.
106 The defaults (no override) are to return NULL for pinned memory requests
107 and pass through to the regular OS calls otherwise.
108 The following definitions (ab)use comma operators to avoid unused
110 #ifndef MEMSPACE_ALLOC
111 #define MEMSPACE_ALLOC(MEMSPACE, SIZE, PIN) \
112 (PIN ? NULL : malloc (((void)(MEMSPACE), (SIZE))))
114 #ifndef MEMSPACE_CALLOC
115 #define MEMSPACE_CALLOC(MEMSPACE, SIZE, PIN) \
116 (PIN ? NULL : calloc (1, (((void)(MEMSPACE), (SIZE)))))
118 #ifndef MEMSPACE_REALLOC
119 #define MEMSPACE_REALLOC(MEMSPACE, ADDR, OLDSIZE, SIZE, OLDPIN, PIN) \
120 ((PIN) || (OLDPIN) ? NULL \
121 : realloc (ADDR, (((void)(MEMSPACE), (void)(OLDSIZE), (SIZE)))))
123 #ifndef MEMSPACE_FREE
124 #define MEMSPACE_FREE(MEMSPACE, ADDR, SIZE, PIN) \
125 if (PIN) free (((void)(MEMSPACE), (void)(SIZE), (ADDR)))
127 #ifndef MEMSPACE_VALIDATE
128 #define MEMSPACE_VALIDATE(MEMSPACE, ACCESS, PIN) \
129 (PIN ? 0 : ((void)(MEMSPACE), (void)(ACCESS), 1))
132 /* Map the predefined allocators to the correct memory space.
133 The index to this table is the omp_allocator_handle_t enum value.
134 When the user calls omp_alloc with a predefined allocator this
135 table determines what memory they get. */
136 static const omp_memspace_handle_t predefined_omp_alloc_mapping
[] = {
137 omp_default_mem_space
, /* omp_null_allocator doesn't actually use this. */
138 omp_default_mem_space
, /* omp_default_mem_alloc. */
139 omp_large_cap_mem_space
, /* omp_large_cap_mem_alloc. */
140 omp_const_mem_space
, /* omp_const_mem_alloc. */
141 omp_high_bw_mem_space
, /* omp_high_bw_mem_alloc. */
142 omp_low_lat_mem_space
, /* omp_low_lat_mem_alloc. */
143 omp_low_lat_mem_space
, /* omp_cgroup_mem_alloc (implementation defined). */
144 omp_low_lat_mem_space
, /* omp_pteam_mem_alloc (implementation defined). */
145 omp_low_lat_mem_space
, /* omp_thread_mem_alloc (implementation defined). */
147 static const omp_memspace_handle_t predefined_ompx_gnu_alloc_mapping
[] = {
148 omp_default_mem_space
, /* ompx_gnu_pinned_mem_alloc. */
151 #define ARRAY_SIZE(A) (sizeof (A) / sizeof ((A)[0]))
152 _Static_assert (ARRAY_SIZE (predefined_omp_alloc_mapping
)
153 == omp_max_predefined_alloc
+ 1,
154 "predefined_omp_alloc_mapping must match omp_memspace_handle_t");
155 _Static_assert (ARRAY_SIZE (predefined_ompx_gnu_alloc_mapping
)
156 == (ompx_gnu_max_predefined_alloc
157 - ompx_gnu_min_predefined_alloc
) + 1,
158 "predefined_ompx_gnu_alloc_mapping must match"
159 " omp_memspace_handle_t");
162 predefined_allocator_p (omp_allocator_handle_t allocator
)
164 return allocator
<= ompx_gnu_max_predefined_alloc
;
167 static inline omp_memspace_handle_t
168 predefined_alloc_mapping (omp_allocator_handle_t allocator
)
170 if (allocator
<= omp_max_predefined_alloc
)
171 return predefined_omp_alloc_mapping
[allocator
];
172 else if (allocator
>= ompx_gnu_min_predefined_alloc
173 && allocator
<= ompx_gnu_max_predefined_alloc
)
175 int index
= allocator
- ompx_gnu_min_predefined_alloc
;
176 return predefined_ompx_gnu_alloc_mapping
[index
];
179 /* This should never happen. */
180 return omp_default_mem_space
;
183 enum gomp_numa_memkind_kind
185 GOMP_MEMKIND_NONE
= 0,
186 #define GOMP_MEMKIND_KINDS \
187 GOMP_MEMKIND_KIND (HBW_INTERLEAVE), \
188 GOMP_MEMKIND_KIND (HBW_PREFERRED), \
189 GOMP_MEMKIND_KIND (DAX_KMEM_ALL), \
190 GOMP_MEMKIND_KIND (DAX_KMEM), \
191 GOMP_MEMKIND_KIND (INTERLEAVE), \
192 GOMP_MEMKIND_KIND (DEFAULT)
193 #define GOMP_MEMKIND_KIND(kind) GOMP_MEMKIND_##kind
195 #undef GOMP_MEMKIND_KIND
197 GOMP_MEMKIND_LIBNUMA
= GOMP_MEMKIND_COUNT
200 struct omp_allocator_data
202 omp_memspace_handle_t memspace
;
203 omp_uintptr_t alignment
;
204 omp_uintptr_t pool_size
;
205 omp_uintptr_t used_pool_size
;
206 omp_allocator_handle_t fb_data
;
207 unsigned int sync_hint
: 8;
208 unsigned int access
: 8;
209 unsigned int fallback
: 8;
210 unsigned int pinned
: 1;
211 unsigned int partition
: 7;
212 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
213 unsigned int memkind
: 8;
215 #ifndef HAVE_SYNC_BUILTINS
220 struct omp_mem_header
224 omp_allocator_handle_t allocator
;
228 struct gomp_libnuma_data
231 void *(*numa_alloc_local
) (size_t);
232 void *(*numa_realloc
) (void *, size_t, size_t);
233 void (*numa_free
) (void *, size_t);
236 struct gomp_memkind_data
238 void *memkind_handle
;
239 void *(*memkind_malloc
) (void *, size_t);
240 void *(*memkind_calloc
) (void *, size_t, size_t);
241 void *(*memkind_realloc
) (void *, void *, size_t);
242 void (*memkind_free
) (void *, void *);
243 int (*memkind_check_available
) (void *);
244 void **kinds
[GOMP_MEMKIND_COUNT
];
247 #ifdef LIBGOMP_USE_LIBNUMA
248 static struct gomp_libnuma_data
*libnuma_data
;
249 static pthread_once_t libnuma_data_once
= PTHREAD_ONCE_INIT
;
252 gomp_init_libnuma (void)
254 void *handle
= dlopen ("libnuma.so.1", RTLD_LAZY
);
255 struct gomp_libnuma_data
*data
;
257 data
= calloc (1, sizeof (struct gomp_libnuma_data
));
266 int (*numa_available
) (void);
268 = (__typeof (numa_available
)) dlsym (handle
, "numa_available");
269 if (!numa_available
|| numa_available () != 0)
277 __atomic_store_n (&libnuma_data
, data
, MEMMODEL_RELEASE
);
280 data
->numa_handle
= handle
;
281 data
->numa_alloc_local
282 = (__typeof (data
->numa_alloc_local
)) dlsym (handle
, "numa_alloc_local");
284 = (__typeof (data
->numa_realloc
)) dlsym (handle
, "numa_realloc");
286 = (__typeof (data
->numa_free
)) dlsym (handle
, "numa_free");
287 __atomic_store_n (&libnuma_data
, data
, MEMMODEL_RELEASE
);
290 static struct gomp_libnuma_data
*
291 gomp_get_libnuma (void)
293 struct gomp_libnuma_data
*data
294 = __atomic_load_n (&libnuma_data
, MEMMODEL_ACQUIRE
);
297 pthread_once (&libnuma_data_once
, gomp_init_libnuma
);
298 return __atomic_load_n (&libnuma_data
, MEMMODEL_ACQUIRE
);
302 #ifdef LIBGOMP_USE_MEMKIND
303 static struct gomp_memkind_data
*memkind_data
;
304 static pthread_once_t memkind_data_once
= PTHREAD_ONCE_INIT
;
307 gomp_init_memkind (void)
309 void *handle
= dlopen ("libmemkind.so.0", RTLD_LAZY
);
310 struct gomp_memkind_data
*data
;
312 static const char *kinds
[] = {
314 #define GOMP_MEMKIND_KIND(kind) "MEMKIND_" #kind
316 #undef GOMP_MEMKIND_KIND
319 data
= calloc (1, sizeof (struct gomp_memkind_data
));
328 __atomic_store_n (&memkind_data
, data
, MEMMODEL_RELEASE
);
331 data
->memkind_handle
= handle
;
333 = (__typeof (data
->memkind_malloc
)) dlsym (handle
, "memkind_malloc");
335 = (__typeof (data
->memkind_calloc
)) dlsym (handle
, "memkind_calloc");
336 data
->memkind_realloc
337 = (__typeof (data
->memkind_realloc
)) dlsym (handle
, "memkind_realloc");
339 = (__typeof (data
->memkind_free
)) dlsym (handle
, "memkind_free");
340 data
->memkind_check_available
341 = (__typeof (data
->memkind_check_available
))
342 dlsym (handle
, "memkind_check_available");
343 if (data
->memkind_malloc
344 && data
->memkind_calloc
345 && data
->memkind_realloc
346 && data
->memkind_free
347 && data
->memkind_check_available
)
348 for (i
= 1; i
< GOMP_MEMKIND_COUNT
; ++i
)
350 data
->kinds
[i
] = (void **) dlsym (handle
, kinds
[i
]);
351 if (data
->kinds
[i
] && data
->memkind_check_available (*data
->kinds
[i
]))
352 data
->kinds
[i
] = NULL
;
354 __atomic_store_n (&memkind_data
, data
, MEMMODEL_RELEASE
);
357 static struct gomp_memkind_data
*
358 gomp_get_memkind (void)
360 struct gomp_memkind_data
*data
361 = __atomic_load_n (&memkind_data
, MEMMODEL_ACQUIRE
);
364 pthread_once (&memkind_data_once
, gomp_init_memkind
);
365 return __atomic_load_n (&memkind_data
, MEMMODEL_ACQUIRE
);
369 omp_allocator_handle_t
370 omp_init_allocator (omp_memspace_handle_t memspace
, int ntraits
,
371 const omp_alloctrait_t traits
[])
373 struct omp_allocator_data data
374 = { memspace
, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended
, omp_atv_all
,
375 omp_atv_default_mem_fb
, omp_atv_false
, omp_atv_environment
,
376 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
380 struct omp_allocator_data
*ret
;
383 if (memspace
> omp_low_lat_mem_space
)
384 return omp_null_allocator
;
385 for (i
= 0; i
< ntraits
; i
++)
386 switch (traits
[i
].key
)
388 case omp_atk_sync_hint
:
389 switch (traits
[i
].value
)
391 case omp_atv_default
:
392 data
.sync_hint
= omp_atv_contended
;
394 case omp_atv_contended
:
395 case omp_atv_uncontended
:
396 case omp_atv_serialized
:
397 case omp_atv_private
:
398 data
.sync_hint
= traits
[i
].value
;
401 return omp_null_allocator
;
404 case omp_atk_alignment
:
405 if (traits
[i
].value
== omp_atv_default
)
410 if ((traits
[i
].value
& (traits
[i
].value
- 1)) != 0
412 return omp_null_allocator
;
413 data
.alignment
= traits
[i
].value
;
416 switch (traits
[i
].value
)
418 case omp_atv_default
:
419 data
.access
= omp_atv_all
;
425 data
.access
= traits
[i
].value
;
428 return omp_null_allocator
;
431 case omp_atk_pool_size
:
432 if (traits
[i
].value
== omp_atv_default
)
433 data
.pool_size
= ~(uintptr_t) 0;
435 data
.pool_size
= traits
[i
].value
;
437 case omp_atk_fallback
:
438 switch (traits
[i
].value
)
440 case omp_atv_default
:
441 data
.fallback
= omp_atv_default_mem_fb
;
443 case omp_atv_default_mem_fb
:
444 case omp_atv_null_fb
:
445 case omp_atv_abort_fb
:
446 case omp_atv_allocator_fb
:
447 data
.fallback
= traits
[i
].value
;
450 return omp_null_allocator
;
453 case omp_atk_fb_data
:
454 data
.fb_data
= traits
[i
].value
;
457 switch (traits
[i
].value
)
459 case omp_atv_default
:
461 data
.pinned
= omp_atv_false
;
464 data
.pinned
= omp_atv_true
;
467 return omp_null_allocator
;
470 case omp_atk_partition
:
471 switch (traits
[i
].value
)
473 case omp_atv_default
:
474 data
.partition
= omp_atv_environment
;
476 case omp_atv_environment
:
477 case omp_atv_nearest
:
478 case omp_atv_blocked
:
479 case omp_atv_interleaved
:
480 data
.partition
= traits
[i
].value
;
483 return omp_null_allocator
;
487 return omp_null_allocator
;
490 if (data
.alignment
< sizeof (void *))
491 data
.alignment
= sizeof (void *);
495 #ifdef LIBGOMP_USE_MEMKIND
496 case omp_high_bw_mem_space
:
497 struct gomp_memkind_data
*memkind_data
;
498 memkind_data
= gomp_get_memkind ();
499 if (data
.partition
== omp_atv_interleaved
500 && memkind_data
->kinds
[GOMP_MEMKIND_HBW_INTERLEAVE
])
502 data
.memkind
= GOMP_MEMKIND_HBW_INTERLEAVE
;
505 else if (memkind_data
->kinds
[GOMP_MEMKIND_HBW_PREFERRED
])
507 data
.memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
511 case omp_large_cap_mem_space
:
512 memkind_data
= gomp_get_memkind ();
513 if (memkind_data
->kinds
[GOMP_MEMKIND_DAX_KMEM_ALL
])
514 data
.memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
515 else if (memkind_data
->kinds
[GOMP_MEMKIND_DAX_KMEM
])
516 data
.memkind
= GOMP_MEMKIND_DAX_KMEM
;
520 #ifdef LIBGOMP_USE_MEMKIND
521 if (data
.partition
== omp_atv_interleaved
)
523 memkind_data
= gomp_get_memkind ();
524 if (memkind_data
->kinds
[GOMP_MEMKIND_INTERLEAVE
])
525 data
.memkind
= GOMP_MEMKIND_INTERLEAVE
;
531 #ifdef LIBGOMP_USE_LIBNUMA
532 if (data
.memkind
== GOMP_MEMKIND_NONE
&& data
.partition
== omp_atv_nearest
)
534 libnuma_data
= gomp_get_libnuma ();
535 if (libnuma_data
->numa_alloc_local
!= NULL
)
536 data
.memkind
= GOMP_MEMKIND_LIBNUMA
;
540 /* Reject unsupported memory spaces. */
541 if (!MEMSPACE_VALIDATE (data
.memspace
, data
.access
, data
.pinned
))
542 return omp_null_allocator
;
544 ret
= gomp_malloc (sizeof (struct omp_allocator_data
));
546 #ifndef HAVE_SYNC_BUILTINS
547 gomp_mutex_init (&ret
->lock
);
549 return (omp_allocator_handle_t
) ret
;
553 omp_destroy_allocator (omp_allocator_handle_t allocator
)
555 if (allocator
!= omp_null_allocator
)
557 #ifndef HAVE_SYNC_BUILTINS
558 gomp_mutex_destroy (&((struct omp_allocator_data
*) allocator
)->lock
);
560 free ((void *) allocator
);
564 ialias (omp_init_allocator
)
565 ialias (omp_destroy_allocator
)
568 omp_aligned_alloc (size_t alignment
, size_t size
,
569 omp_allocator_handle_t allocator
)
571 struct omp_allocator_data
*allocator_data
;
572 size_t new_size
, new_alignment
;
574 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
575 enum gomp_numa_memkind_kind memkind
;
578 if (__builtin_expect (size
== 0, 0))
582 new_alignment
= alignment
;
583 if (allocator
== omp_null_allocator
)
585 struct gomp_thread
*thr
= gomp_thread ();
586 if (thr
->ts
.def_allocator
== omp_null_allocator
)
587 thr
->ts
.def_allocator
= gomp_def_allocator
;
588 allocator
= (omp_allocator_handle_t
) thr
->ts
.def_allocator
;
591 if (!predefined_allocator_p (allocator
))
593 allocator_data
= (struct omp_allocator_data
*) allocator
;
594 if (new_alignment
< allocator_data
->alignment
)
595 new_alignment
= allocator_data
->alignment
;
596 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
597 memkind
= allocator_data
->memkind
;
602 allocator_data
= NULL
;
603 if (new_alignment
< sizeof (void *))
604 new_alignment
= sizeof (void *);
605 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
606 memkind
= GOMP_MEMKIND_NONE
;
608 #ifdef LIBGOMP_USE_MEMKIND
609 if (allocator
== omp_high_bw_mem_alloc
)
610 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
611 else if (allocator
== omp_large_cap_mem_alloc
)
612 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
615 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
616 if (!memkind_data
->kinds
[memkind
])
617 memkind
= GOMP_MEMKIND_NONE
;
622 new_size
= sizeof (struct omp_mem_header
);
623 if (new_alignment
> sizeof (void *))
624 new_size
+= new_alignment
- sizeof (void *);
625 if (__builtin_add_overflow (size
, new_size
, &new_size
))
627 #ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
628 if (allocator
== omp_low_lat_mem_alloc
)
632 if (__builtin_expect (allocator_data
633 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
635 uintptr_t used_pool_size
;
636 if (new_size
> allocator_data
->pool_size
)
638 #ifdef HAVE_SYNC_BUILTINS
639 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
643 uintptr_t new_pool_size
;
644 if (__builtin_add_overflow (used_pool_size
, new_size
,
646 || new_pool_size
> allocator_data
->pool_size
)
648 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
649 &used_pool_size
, new_pool_size
,
650 true, MEMMODEL_RELAXED
,
656 gomp_mutex_lock (&allocator_data
->lock
);
657 if (__builtin_add_overflow (allocator_data
->used_pool_size
, new_size
,
659 || used_pool_size
> allocator_data
->pool_size
)
661 gomp_mutex_unlock (&allocator_data
->lock
);
664 allocator_data
->used_pool_size
= used_pool_size
;
665 gomp_mutex_unlock (&allocator_data
->lock
);
667 #ifdef LIBGOMP_USE_LIBNUMA
668 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
669 ptr
= libnuma_data
->numa_alloc_local (new_size
);
670 # ifdef LIBGOMP_USE_MEMKIND
674 #ifdef LIBGOMP_USE_MEMKIND
677 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
678 void *kind
= *memkind_data
->kinds
[memkind
];
679 ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
683 ptr
= MEMSPACE_ALLOC (allocator_data
->memspace
, new_size
,
684 allocator_data
->pinned
);
687 #ifdef HAVE_SYNC_BUILTINS
688 __atomic_add_fetch (&allocator_data
->used_pool_size
, -new_size
,
691 gomp_mutex_lock (&allocator_data
->lock
);
692 allocator_data
->used_pool_size
-= new_size
;
693 gomp_mutex_unlock (&allocator_data
->lock
);
700 #ifdef LIBGOMP_USE_LIBNUMA
701 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
702 ptr
= libnuma_data
->numa_alloc_local (new_size
);
703 # ifdef LIBGOMP_USE_MEMKIND
707 #ifdef LIBGOMP_USE_MEMKIND
710 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
711 void *kind
= *memkind_data
->kinds
[memkind
];
712 ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
717 omp_memspace_handle_t memspace
;
718 memspace
= (allocator_data
719 ? allocator_data
->memspace
720 : predefined_alloc_mapping (allocator
));
721 int pinned
= (allocator_data
722 ? allocator_data
->pinned
723 : allocator
== ompx_gnu_pinned_mem_alloc
);
724 ptr
= MEMSPACE_ALLOC (memspace
, new_size
, pinned
);
730 if (new_alignment
> sizeof (void *))
731 ret
= (void *) (((uintptr_t) ptr
732 + sizeof (struct omp_mem_header
)
733 + new_alignment
- sizeof (void *))
734 & ~(new_alignment
- 1));
736 ret
= (char *) ptr
+ sizeof (struct omp_mem_header
);
737 ((struct omp_mem_header
*) ret
)[-1].ptr
= ptr
;
738 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
739 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
743 int fallback
= (allocator_data
744 ? allocator_data
->fallback
745 : (allocator
== omp_default_mem_alloc
746 || allocator
== ompx_gnu_pinned_mem_alloc
)
748 : omp_atv_default_mem_fb
);
751 case omp_atv_default_mem_fb
:
752 allocator
= omp_default_mem_alloc
;
754 case omp_atv_null_fb
:
757 case omp_atv_abort_fb
:
758 gomp_fatal ("Out of memory allocating %lu bytes",
759 (unsigned long) size
);
760 case omp_atv_allocator_fb
:
761 allocator
= allocator_data
->fb_data
;
767 ialias (omp_aligned_alloc
)
770 omp_alloc (size_t size
, omp_allocator_handle_t allocator
)
772 return ialias_call (omp_aligned_alloc
) (1, size
, allocator
);
775 /* Like omp_aligned_alloc, but apply on top of that:
776 "For allocations that arise from this ... the null_fb value of the
777 fallback allocator trait behaves as if the abort_fb had been specified." */
780 GOMP_alloc (size_t alignment
, size_t size
, uintptr_t allocator
)
783 = ialias_call (omp_aligned_alloc
) (alignment
, size
,
784 (omp_allocator_handle_t
) allocator
);
785 if (__builtin_expect (ret
== NULL
, 0) && size
)
786 gomp_fatal ("Out of memory allocating %lu bytes",
787 (unsigned long) size
);
792 omp_free (void *ptr
, omp_allocator_handle_t allocator
)
794 struct omp_mem_header
*data
;
795 omp_memspace_handle_t memspace
= omp_default_mem_space
;
801 data
= &((struct omp_mem_header
*) ptr
)[-1];
802 if (!predefined_allocator_p (data
->allocator
))
804 struct omp_allocator_data
*allocator_data
805 = (struct omp_allocator_data
*) (data
->allocator
);
806 if (allocator_data
->pool_size
< ~(uintptr_t) 0)
808 #ifdef HAVE_SYNC_BUILTINS
809 __atomic_add_fetch (&allocator_data
->used_pool_size
, -data
->size
,
812 gomp_mutex_lock (&allocator_data
->lock
);
813 allocator_data
->used_pool_size
-= data
->size
;
814 gomp_mutex_unlock (&allocator_data
->lock
);
817 #ifdef LIBGOMP_USE_LIBNUMA
818 if (allocator_data
->memkind
== GOMP_MEMKIND_LIBNUMA
)
820 libnuma_data
->numa_free (data
->ptr
, data
->size
);
823 # ifdef LIBGOMP_USE_MEMKIND
827 #ifdef LIBGOMP_USE_MEMKIND
828 if (allocator_data
->memkind
)
830 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
831 void *kind
= *memkind_data
->kinds
[allocator_data
->memkind
];
832 memkind_data
->memkind_free (kind
, data
->ptr
);
837 memspace
= allocator_data
->memspace
;
838 pinned
= allocator_data
->pinned
;
842 #ifdef LIBGOMP_USE_MEMKIND
843 enum gomp_numa_memkind_kind memkind
= GOMP_MEMKIND_NONE
;
844 if (data
->allocator
== omp_high_bw_mem_alloc
)
845 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
846 else if (data
->allocator
== omp_large_cap_mem_alloc
)
847 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
850 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
851 if (memkind_data
->kinds
[memkind
])
853 void *kind
= *memkind_data
->kinds
[memkind
];
854 memkind_data
->memkind_free (kind
, data
->ptr
);
860 memspace
= predefined_alloc_mapping (data
->allocator
);
861 pinned
= (data
->allocator
== ompx_gnu_pinned_mem_alloc
);
864 MEMSPACE_FREE (memspace
, data
->ptr
, data
->size
, pinned
);
870 GOMP_free (void *ptr
, uintptr_t allocator
)
872 return ialias_call (omp_free
) (ptr
, (omp_allocator_handle_t
) allocator
);
876 omp_aligned_calloc (size_t alignment
, size_t nmemb
, size_t size
,
877 omp_allocator_handle_t allocator
)
879 struct omp_allocator_data
*allocator_data
;
880 size_t new_size
, size_temp
, new_alignment
;
882 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
883 enum gomp_numa_memkind_kind memkind
;
886 if (__builtin_expect (size
== 0 || nmemb
== 0, 0))
890 new_alignment
= alignment
;
891 if (allocator
== omp_null_allocator
)
893 struct gomp_thread
*thr
= gomp_thread ();
894 if (thr
->ts
.def_allocator
== omp_null_allocator
)
895 thr
->ts
.def_allocator
= gomp_def_allocator
;
896 allocator
= (omp_allocator_handle_t
) thr
->ts
.def_allocator
;
899 if (!predefined_allocator_p (allocator
))
901 allocator_data
= (struct omp_allocator_data
*) allocator
;
902 if (new_alignment
< allocator_data
->alignment
)
903 new_alignment
= allocator_data
->alignment
;
904 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
905 memkind
= allocator_data
->memkind
;
910 allocator_data
= NULL
;
911 if (new_alignment
< sizeof (void *))
912 new_alignment
= sizeof (void *);
913 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
914 memkind
= GOMP_MEMKIND_NONE
;
916 #ifdef LIBGOMP_USE_MEMKIND
917 if (allocator
== omp_high_bw_mem_alloc
)
918 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
919 else if (allocator
== omp_large_cap_mem_alloc
)
920 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
923 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
924 if (!memkind_data
->kinds
[memkind
])
925 memkind
= GOMP_MEMKIND_NONE
;
930 new_size
= sizeof (struct omp_mem_header
);
931 if (new_alignment
> sizeof (void *))
932 new_size
+= new_alignment
- sizeof (void *);
933 if (__builtin_mul_overflow (size
, nmemb
, &size_temp
))
935 if (__builtin_add_overflow (size_temp
, new_size
, &new_size
))
937 #ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
938 if (allocator
== omp_low_lat_mem_alloc
)
942 if (__builtin_expect (allocator_data
943 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
945 uintptr_t used_pool_size
;
946 if (new_size
> allocator_data
->pool_size
)
948 #ifdef HAVE_SYNC_BUILTINS
949 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
953 uintptr_t new_pool_size
;
954 if (__builtin_add_overflow (used_pool_size
, new_size
,
956 || new_pool_size
> allocator_data
->pool_size
)
958 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
959 &used_pool_size
, new_pool_size
,
960 true, MEMMODEL_RELAXED
,
966 gomp_mutex_lock (&allocator_data
->lock
);
967 if (__builtin_add_overflow (allocator_data
->used_pool_size
, new_size
,
969 || used_pool_size
> allocator_data
->pool_size
)
971 gomp_mutex_unlock (&allocator_data
->lock
);
974 allocator_data
->used_pool_size
= used_pool_size
;
975 gomp_mutex_unlock (&allocator_data
->lock
);
977 #ifdef LIBGOMP_USE_LIBNUMA
978 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
979 /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning
980 memory that is initialized to zero. */
981 ptr
= libnuma_data
->numa_alloc_local (new_size
);
982 # ifdef LIBGOMP_USE_MEMKIND
986 #ifdef LIBGOMP_USE_MEMKIND
989 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
990 void *kind
= *memkind_data
->kinds
[memkind
];
991 ptr
= memkind_data
->memkind_calloc (kind
, 1, new_size
);
995 ptr
= MEMSPACE_CALLOC (allocator_data
->memspace
, new_size
,
996 allocator_data
->pinned
);
999 #ifdef HAVE_SYNC_BUILTINS
1000 __atomic_add_fetch (&allocator_data
->used_pool_size
, -new_size
,
1003 gomp_mutex_lock (&allocator_data
->lock
);
1004 allocator_data
->used_pool_size
-= new_size
;
1005 gomp_mutex_unlock (&allocator_data
->lock
);
1012 #ifdef LIBGOMP_USE_LIBNUMA
1013 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1014 /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning
1015 memory that is initialized to zero. */
1016 ptr
= libnuma_data
->numa_alloc_local (new_size
);
1017 # ifdef LIBGOMP_USE_MEMKIND
1021 #ifdef LIBGOMP_USE_MEMKIND
1024 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1025 void *kind
= *memkind_data
->kinds
[memkind
];
1026 ptr
= memkind_data
->memkind_calloc (kind
, 1, new_size
);
1031 omp_memspace_handle_t memspace
;
1032 memspace
= (allocator_data
1033 ? allocator_data
->memspace
1034 : predefined_alloc_mapping (allocator
));
1035 int pinned
= (allocator_data
1036 ? allocator_data
->pinned
1037 : allocator
== ompx_gnu_pinned_mem_alloc
);
1038 ptr
= MEMSPACE_CALLOC (memspace
, new_size
, pinned
);
1044 if (new_alignment
> sizeof (void *))
1045 ret
= (void *) (((uintptr_t) ptr
1046 + sizeof (struct omp_mem_header
)
1047 + new_alignment
- sizeof (void *))
1048 & ~(new_alignment
- 1));
1050 ret
= (char *) ptr
+ sizeof (struct omp_mem_header
);
1051 ((struct omp_mem_header
*) ret
)[-1].ptr
= ptr
;
1052 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1053 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1057 int fallback
= (allocator_data
1058 ? allocator_data
->fallback
1059 : (allocator
== omp_default_mem_alloc
1060 || allocator
== ompx_gnu_pinned_mem_alloc
)
1062 : omp_atv_default_mem_fb
);
1065 case omp_atv_default_mem_fb
:
1066 allocator
= omp_default_mem_alloc
;
1068 case omp_atv_null_fb
:
1071 case omp_atv_abort_fb
:
1072 gomp_fatal ("Out of memory allocating %lu bytes",
1073 (unsigned long) (size
* nmemb
));
1074 case omp_atv_allocator_fb
:
1075 allocator
= allocator_data
->fb_data
;
1081 ialias (omp_aligned_calloc
)
1084 omp_calloc (size_t nmemb
, size_t size
, omp_allocator_handle_t allocator
)
1086 return ialias_call (omp_aligned_calloc
) (1, nmemb
, size
, allocator
);
1090 omp_realloc (void *ptr
, size_t size
, omp_allocator_handle_t allocator
,
1091 omp_allocator_handle_t free_allocator
)
1093 struct omp_allocator_data
*allocator_data
, *free_allocator_data
;
1094 size_t new_size
, old_size
, new_alignment
, old_alignment
;
1095 void *new_ptr
, *ret
;
1096 struct omp_mem_header
*data
;
1097 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1098 enum gomp_numa_memkind_kind memkind
, free_memkind
;
1101 if (__builtin_expect (ptr
== NULL
, 0))
1102 return ialias_call (omp_aligned_alloc
) (1, size
, allocator
);
1104 if (__builtin_expect (size
== 0, 0))
1106 ialias_call (omp_free
) (ptr
, free_allocator
);
1110 data
= &((struct omp_mem_header
*) ptr
)[-1];
1111 free_allocator
= data
->allocator
;
1114 new_alignment
= sizeof (void *);
1115 if (allocator
== omp_null_allocator
)
1116 allocator
= free_allocator
;
1118 if (!predefined_allocator_p (allocator
))
1120 allocator_data
= (struct omp_allocator_data
*) allocator
;
1121 if (new_alignment
< allocator_data
->alignment
)
1122 new_alignment
= allocator_data
->alignment
;
1123 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1124 memkind
= allocator_data
->memkind
;
1129 allocator_data
= NULL
;
1130 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1131 memkind
= GOMP_MEMKIND_NONE
;
1133 #ifdef LIBGOMP_USE_MEMKIND
1134 if (allocator
== omp_high_bw_mem_alloc
)
1135 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
1136 else if (allocator
== omp_large_cap_mem_alloc
)
1137 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
1140 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1141 if (!memkind_data
->kinds
[memkind
])
1142 memkind
= GOMP_MEMKIND_NONE
;
1146 if (!predefined_allocator_p (free_allocator
))
1148 free_allocator_data
= (struct omp_allocator_data
*) free_allocator
;
1149 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1150 free_memkind
= free_allocator_data
->memkind
;
1155 free_allocator_data
= NULL
;
1156 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1157 free_memkind
= GOMP_MEMKIND_NONE
;
1159 #ifdef LIBGOMP_USE_MEMKIND
1160 if (free_allocator
== omp_high_bw_mem_alloc
)
1161 free_memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
1162 else if (free_allocator
== omp_large_cap_mem_alloc
)
1163 free_memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
1166 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1167 if (!memkind_data
->kinds
[free_memkind
])
1168 free_memkind
= GOMP_MEMKIND_NONE
;
1172 old_alignment
= (uintptr_t) ptr
- (uintptr_t) (data
->ptr
);
1174 new_size
= sizeof (struct omp_mem_header
);
1175 if (new_alignment
> sizeof (void *))
1176 new_size
+= new_alignment
- sizeof (void *);
1177 if (__builtin_add_overflow (size
, new_size
, &new_size
))
1179 old_size
= data
->size
;
1180 #ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
1181 if (allocator
== omp_low_lat_mem_alloc
)
1185 if (__builtin_expect (allocator_data
1186 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
1188 uintptr_t used_pool_size
;
1189 size_t prev_size
= 0;
1190 /* Check if we can use realloc. Don't use it if extra alignment
1191 was used previously or newly, because realloc might return a pointer
1192 with different alignment and then we'd need to memmove the data
1194 if (free_allocator_data
1195 && free_allocator_data
== allocator_data
1196 && new_alignment
== sizeof (void *)
1197 && old_alignment
== sizeof (struct omp_mem_header
))
1198 prev_size
= old_size
;
1199 if (new_size
> prev_size
1200 && new_size
- prev_size
> allocator_data
->pool_size
)
1202 #ifdef HAVE_SYNC_BUILTINS
1203 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
1207 uintptr_t new_pool_size
;
1208 if (new_size
> prev_size
)
1210 if (__builtin_add_overflow (used_pool_size
, new_size
- prev_size
,
1212 || new_pool_size
> allocator_data
->pool_size
)
1216 new_pool_size
= used_pool_size
+ new_size
- prev_size
;
1217 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
1218 &used_pool_size
, new_pool_size
,
1219 true, MEMMODEL_RELAXED
,
1225 gomp_mutex_lock (&allocator_data
->lock
);
1226 if (new_size
> prev_size
)
1228 if (__builtin_add_overflow (allocator_data
->used_pool_size
,
1229 new_size
- prev_size
,
1231 || used_pool_size
> allocator_data
->pool_size
)
1233 gomp_mutex_unlock (&allocator_data
->lock
);
1238 used_pool_size
= (allocator_data
->used_pool_size
1239 + new_size
- prev_size
);
1240 allocator_data
->used_pool_size
= used_pool_size
;
1241 gomp_mutex_unlock (&allocator_data
->lock
);
1243 #ifdef LIBGOMP_USE_LIBNUMA
1244 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1247 new_ptr
= libnuma_data
->numa_realloc (data
->ptr
, data
->size
,
1250 new_ptr
= libnuma_data
->numa_alloc_local (new_size
);
1252 # ifdef LIBGOMP_USE_MEMKIND
1256 #ifdef LIBGOMP_USE_MEMKIND
1259 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1260 void *kind
= *memkind_data
->kinds
[memkind
];
1262 new_ptr
= memkind_data
->memkind_realloc (kind
, data
->ptr
,
1265 new_ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
1271 int was_pinned
= (free_allocator_data
1272 ? free_allocator_data
->pinned
1273 : free_allocator
== ompx_gnu_pinned_mem_alloc
);
1274 new_ptr
= MEMSPACE_REALLOC (allocator_data
->memspace
, data
->ptr
,
1275 data
->size
, new_size
, was_pinned
,
1276 allocator_data
->pinned
);
1279 new_ptr
= MEMSPACE_ALLOC (allocator_data
->memspace
, new_size
,
1280 allocator_data
->pinned
);
1281 if (new_ptr
== NULL
)
1283 #ifdef HAVE_SYNC_BUILTINS
1284 __atomic_add_fetch (&allocator_data
->used_pool_size
,
1285 prev_size
- new_size
,
1288 gomp_mutex_lock (&allocator_data
->lock
);
1289 allocator_data
->used_pool_size
-= new_size
- prev_size
;
1290 gomp_mutex_unlock (&allocator_data
->lock
);
1296 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1297 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1298 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1299 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1303 else if (new_alignment
== sizeof (void *)
1304 && old_alignment
== sizeof (struct omp_mem_header
)
1305 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1306 && memkind
== free_memkind
1308 && (free_allocator_data
== NULL
1309 || free_allocator_data
->pool_size
== ~(uintptr_t) 0))
1311 #ifdef LIBGOMP_USE_LIBNUMA
1312 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1313 new_ptr
= libnuma_data
->numa_realloc (data
->ptr
, data
->size
, new_size
);
1314 # ifdef LIBGOMP_USE_MEMKIND
1318 #ifdef LIBGOMP_USE_MEMKIND
1321 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1322 void *kind
= *memkind_data
->kinds
[memkind
];
1323 new_ptr
= memkind_data
->memkind_realloc (kind
, data
->ptr
,
1329 omp_memspace_handle_t memspace
;
1330 memspace
= (allocator_data
1331 ? allocator_data
->memspace
1332 : predefined_alloc_mapping (allocator
));
1333 int was_pinned
= (free_allocator_data
1334 ? free_allocator_data
->pinned
1335 : free_allocator
== ompx_gnu_pinned_mem_alloc
);
1336 int pinned
= (allocator_data
1337 ? allocator_data
->pinned
1338 : allocator
== ompx_gnu_pinned_mem_alloc
);
1339 new_ptr
= MEMSPACE_REALLOC (memspace
, data
->ptr
, data
->size
, new_size
,
1340 was_pinned
, pinned
);
1342 if (new_ptr
== NULL
)
1345 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1346 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1347 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1348 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1353 #ifdef LIBGOMP_USE_LIBNUMA
1354 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1355 new_ptr
= libnuma_data
->numa_alloc_local (new_size
);
1356 # ifdef LIBGOMP_USE_MEMKIND
1360 #ifdef LIBGOMP_USE_MEMKIND
1363 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1364 void *kind
= *memkind_data
->kinds
[memkind
];
1365 new_ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
1370 omp_memspace_handle_t memspace
;
1371 memspace
= (allocator_data
1372 ? allocator_data
->memspace
1373 : predefined_alloc_mapping (allocator
));
1374 int pinned
= (allocator_data
1375 ? allocator_data
->pinned
1376 : allocator
== ompx_gnu_pinned_mem_alloc
);
1377 new_ptr
= MEMSPACE_ALLOC (memspace
, new_size
, pinned
);
1379 if (new_ptr
== NULL
)
1383 if (new_alignment
> sizeof (void *))
1384 ret
= (void *) (((uintptr_t) new_ptr
1385 + sizeof (struct omp_mem_header
)
1386 + new_alignment
- sizeof (void *))
1387 & ~(new_alignment
- 1));
1389 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1390 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1391 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1392 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1393 if (old_size
- old_alignment
< size
)
1394 size
= old_size
- old_alignment
;
1395 memcpy (ret
, ptr
, size
);
1396 if (__builtin_expect (free_allocator_data
1397 && free_allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
1399 #ifdef HAVE_SYNC_BUILTINS
1400 __atomic_add_fetch (&free_allocator_data
->used_pool_size
, -data
->size
,
1403 gomp_mutex_lock (&free_allocator_data
->lock
);
1404 free_allocator_data
->used_pool_size
-= data
->size
;
1405 gomp_mutex_unlock (&free_allocator_data
->lock
);
1408 #ifdef LIBGOMP_USE_LIBNUMA
1409 if (free_memkind
== GOMP_MEMKIND_LIBNUMA
)
1411 libnuma_data
->numa_free (data
->ptr
, data
->size
);
1414 # ifdef LIBGOMP_USE_MEMKIND
1418 #ifdef LIBGOMP_USE_MEMKIND
1421 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1422 void *kind
= *memkind_data
->kinds
[free_memkind
];
1423 memkind_data
->memkind_free (kind
, data
->ptr
);
1428 omp_memspace_handle_t was_memspace
;
1429 was_memspace
= (free_allocator_data
1430 ? free_allocator_data
->memspace
1431 : predefined_alloc_mapping (free_allocator
));
1432 int was_pinned
= (free_allocator_data
1433 ? free_allocator_data
->pinned
1434 : free_allocator
== ompx_gnu_pinned_mem_alloc
);
1435 MEMSPACE_FREE (was_memspace
, data
->ptr
, data
->size
, was_pinned
);
1440 int fallback
= (allocator_data
1441 ? allocator_data
->fallback
1442 : (allocator
== omp_default_mem_alloc
1443 || allocator
== ompx_gnu_pinned_mem_alloc
)
1445 : omp_atv_default_mem_fb
);
1448 case omp_atv_default_mem_fb
:
1449 allocator
= omp_default_mem_alloc
;
1451 case omp_atv_null_fb
:
1454 case omp_atv_abort_fb
:
1455 gomp_fatal ("Out of memory allocating %lu bytes",
1456 (unsigned long) size
);
1457 case omp_atv_allocator_fb
:
1458 allocator
= allocator_data
->fb_data
;