2 * kmp_affinity.cpp -- affinity management
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
14 #include "kmp_affinity.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
31 // The machine topology
32 kmp_topology_t
*__kmp_topology
= nullptr;
33 // KMP_HW_SUBSET environment variable
34 kmp_hw_subset_t
*__kmp_hw_subset
= nullptr;
36 // Store the real or imagined machine hierarchy here
37 static hierarchy_info machine_hierarchy
;
39 void __kmp_cleanup_hierarchy() { machine_hierarchy
.fini(); }
41 #if KMP_AFFINITY_SUPPORTED
42 // Helper class to see if place lists further restrict the fullMask
43 class kmp_full_mask_modifier_t
{
44 kmp_affin_mask_t
*mask
;
47 kmp_full_mask_modifier_t() {
51 ~kmp_full_mask_modifier_t() {
55 void include(const kmp_affin_mask_t
*other
) { KMP_CPU_UNION(mask
, other
); }
56 // If the new full mask is different from the current full mask,
57 // then switch them. Returns true if full mask was affected, false otherwise.
58 bool restrict_to_mask() {
59 // See if the new mask further restricts or changes the full mask
60 if (KMP_CPU_EQUAL(__kmp_affin_fullMask
, mask
) || KMP_CPU_ISEMPTY(mask
))
62 return __kmp_topology
->restrict_to_mask(mask
);
66 static inline const char *
67 __kmp_get_affinity_env_var(const kmp_affinity_t
&affinity
,
68 bool for_binding
= false) {
69 if (affinity
.flags
.omp_places
) {
71 return "OMP_PROC_BIND";
74 return affinity
.env_var
;
76 #endif // KMP_AFFINITY_SUPPORTED
78 void __kmp_get_hierarchy(kmp_uint32 nproc
, kmp_bstate_t
*thr_bar
) {
80 // The test below is true if affinity is available, but set to "none". Need to
81 // init on first use of hierarchical barrier.
82 if (TCR_1(machine_hierarchy
.uninitialized
))
83 machine_hierarchy
.init(nproc
);
85 // Adjust the hierarchy in case num threads exceeds original
86 if (nproc
> machine_hierarchy
.base_num_threads
)
87 machine_hierarchy
.resize(nproc
);
89 depth
= machine_hierarchy
.depth
;
90 KMP_DEBUG_ASSERT(depth
> 0);
92 thr_bar
->depth
= depth
;
93 __kmp_type_convert(machine_hierarchy
.numPerLevel
[0] - 1,
94 &(thr_bar
->base_leaf_kids
));
95 thr_bar
->skip_per_level
= machine_hierarchy
.skipPerLevel
;
98 static int nCoresPerPkg
, nPackages
;
99 static int __kmp_nThreadsPerCore
;
100 #ifndef KMP_DFLT_NTH_CORES
101 static int __kmp_ncores
;
104 const char *__kmp_hw_get_catalog_string(kmp_hw_t type
, bool plural
) {
107 return ((plural
) ? KMP_I18N_STR(Sockets
) : KMP_I18N_STR(Socket
));
109 return ((plural
) ? KMP_I18N_STR(Dice
) : KMP_I18N_STR(Die
));
111 return ((plural
) ? KMP_I18N_STR(Modules
) : KMP_I18N_STR(Module
));
113 return ((plural
) ? KMP_I18N_STR(Tiles
) : KMP_I18N_STR(Tile
));
115 return ((plural
) ? KMP_I18N_STR(NumaDomains
) : KMP_I18N_STR(NumaDomain
));
117 return ((plural
) ? KMP_I18N_STR(L3Caches
) : KMP_I18N_STR(L3Cache
));
119 return ((plural
) ? KMP_I18N_STR(L2Caches
) : KMP_I18N_STR(L2Cache
));
121 return ((plural
) ? KMP_I18N_STR(L1Caches
) : KMP_I18N_STR(L1Cache
));
123 return ((plural
) ? KMP_I18N_STR(LLCaches
) : KMP_I18N_STR(LLCache
));
125 return ((plural
) ? KMP_I18N_STR(Cores
) : KMP_I18N_STR(Core
));
127 return ((plural
) ? KMP_I18N_STR(Threads
) : KMP_I18N_STR(Thread
));
128 case KMP_HW_PROC_GROUP
:
129 return ((plural
) ? KMP_I18N_STR(ProcGroups
) : KMP_I18N_STR(ProcGroup
));
132 return KMP_I18N_STR(Unknown
);
134 KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration");
135 KMP_BUILTIN_UNREACHABLE
;
138 const char *__kmp_hw_get_keyword(kmp_hw_t type
, bool plural
) {
141 return ((plural
) ? "sockets" : "socket");
143 return ((plural
) ? "dice" : "die");
145 return ((plural
) ? "modules" : "module");
147 return ((plural
) ? "tiles" : "tile");
149 return ((plural
) ? "numa_domains" : "numa_domain");
151 return ((plural
) ? "l3_caches" : "l3_cache");
153 return ((plural
) ? "l2_caches" : "l2_cache");
155 return ((plural
) ? "l1_caches" : "l1_cache");
157 return ((plural
) ? "ll_caches" : "ll_cache");
159 return ((plural
) ? "cores" : "core");
161 return ((plural
) ? "threads" : "thread");
162 case KMP_HW_PROC_GROUP
:
163 return ((plural
) ? "proc_groups" : "proc_group");
166 return ((plural
) ? "unknowns" : "unknown");
168 KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration");
169 KMP_BUILTIN_UNREACHABLE
;
172 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type
) {
174 case KMP_HW_CORE_TYPE_UNKNOWN
:
175 case KMP_HW_MAX_NUM_CORE_TYPES
:
177 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
178 case KMP_HW_CORE_TYPE_ATOM
:
179 return "Intel Atom(R) processor";
180 case KMP_HW_CORE_TYPE_CORE
:
181 return "Intel(R) Core(TM) processor";
184 KMP_ASSERT2(false, "Unhandled kmp_hw_core_type_t enumeration");
185 KMP_BUILTIN_UNREACHABLE
;
188 #if KMP_AFFINITY_SUPPORTED
189 // If affinity is supported, check the affinity
190 // verbose and warning flags before printing warning
191 #define KMP_AFF_WARNING(s, ...) \
192 if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
193 KMP_WARNING(__VA_ARGS__); \
196 #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
199 ////////////////////////////////////////////////////////////////////////////////
200 // kmp_hw_thread_t methods
201 int kmp_hw_thread_t::compare_ids(const void *a
, const void *b
) {
202 const kmp_hw_thread_t
*ahwthread
= (const kmp_hw_thread_t
*)a
;
203 const kmp_hw_thread_t
*bhwthread
= (const kmp_hw_thread_t
*)b
;
204 int depth
= __kmp_topology
->get_depth();
205 for (int level
= 0; level
< depth
; ++level
) {
206 if (ahwthread
->ids
[level
] < bhwthread
->ids
[level
])
208 else if (ahwthread
->ids
[level
] > bhwthread
->ids
[level
])
211 if (ahwthread
->os_id
< bhwthread
->os_id
)
213 else if (ahwthread
->os_id
> bhwthread
->os_id
)
218 #if KMP_AFFINITY_SUPPORTED
219 int kmp_hw_thread_t::compare_compact(const void *a
, const void *b
) {
221 const kmp_hw_thread_t
*aa
= (const kmp_hw_thread_t
*)a
;
222 const kmp_hw_thread_t
*bb
= (const kmp_hw_thread_t
*)b
;
223 int depth
= __kmp_topology
->get_depth();
224 int compact
= __kmp_topology
->compact
;
225 KMP_DEBUG_ASSERT(compact
>= 0);
226 KMP_DEBUG_ASSERT(compact
<= depth
);
227 for (i
= 0; i
< compact
; i
++) {
228 int j
= depth
- i
- 1;
229 if (aa
->sub_ids
[j
] < bb
->sub_ids
[j
])
231 if (aa
->sub_ids
[j
] > bb
->sub_ids
[j
])
234 for (; i
< depth
; i
++) {
236 if (aa
->sub_ids
[j
] < bb
->sub_ids
[j
])
238 if (aa
->sub_ids
[j
] > bb
->sub_ids
[j
])
245 void kmp_hw_thread_t::print() const {
246 int depth
= __kmp_topology
->get_depth();
247 printf("%4d ", os_id
);
248 for (int i
= 0; i
< depth
; ++i
) {
249 printf("%4d ", ids
[i
]);
252 if (attrs
.is_core_type_valid())
253 printf(" (%s)", __kmp_hw_get_core_type_string(attrs
.get_core_type()));
254 if (attrs
.is_core_eff_valid())
255 printf(" (eff=%d)", attrs
.get_core_eff());
262 ////////////////////////////////////////////////////////////////////////////////
263 // kmp_topology_t methods
265 // Add a layer to the topology based on the ids. Assume the topology
266 // is perfectly nested (i.e., so no object has more than one parent)
267 void kmp_topology_t::_insert_layer(kmp_hw_t type
, const int *ids
) {
268 // Figure out where the layer should go by comparing the ids of the current
269 // layers with the new ids
271 int previous_id
= kmp_hw_thread_t::UNKNOWN_ID
;
272 int previous_new_id
= kmp_hw_thread_t::UNKNOWN_ID
;
274 // Start from the highest layer and work down to find target layer
275 // If new layer is equal to another layer then put the new layer above
276 for (target_layer
= 0; target_layer
< depth
; ++target_layer
) {
277 bool layers_equal
= true;
278 bool strictly_above_target_layer
= false;
279 for (int i
= 0; i
< num_hw_threads
; ++i
) {
280 int id
= hw_threads
[i
].ids
[target_layer
];
282 if (id
!= previous_id
&& new_id
== previous_new_id
) {
283 // Found the layer we are strictly above
284 strictly_above_target_layer
= true;
285 layers_equal
= false;
287 } else if (id
== previous_id
&& new_id
!= previous_new_id
) {
288 // Found a layer we are below. Move to next layer and check.
289 layers_equal
= false;
293 previous_new_id
= new_id
;
295 if (strictly_above_target_layer
|| layers_equal
)
299 // Found the layer we are above. Now move everything to accommodate the new
300 // layer. And put the new ids and type into the topology.
301 for (int i
= depth
- 1, j
= depth
; i
>= target_layer
; --i
, --j
)
303 types
[target_layer
] = type
;
304 for (int k
= 0; k
< num_hw_threads
; ++k
) {
305 for (int i
= depth
- 1, j
= depth
; i
>= target_layer
; --i
, --j
)
306 hw_threads
[k
].ids
[j
] = hw_threads
[k
].ids
[i
];
307 hw_threads
[k
].ids
[target_layer
] = ids
[k
];
309 equivalent
[type
] = type
;
313 #if KMP_GROUP_AFFINITY
314 // Insert the Windows Processor Group structure into the topology
315 void kmp_topology_t::_insert_windows_proc_groups() {
316 // Do not insert the processor group structure for a single group
317 if (__kmp_num_proc_groups
== 1)
319 kmp_affin_mask_t
*mask
;
320 int *ids
= (int *)__kmp_allocate(sizeof(int) * num_hw_threads
);
322 for (int i
= 0; i
< num_hw_threads
; ++i
) {
324 KMP_CPU_SET(hw_threads
[i
].os_id
, mask
);
325 ids
[i
] = __kmp_get_proc_group(mask
);
328 _insert_layer(KMP_HW_PROC_GROUP
, ids
);
333 // Remove layers that don't add information to the topology.
334 // This is done by having the layer take on the id = UNKNOWN_ID (-1)
335 void kmp_topology_t::_remove_radix1_layers() {
336 int preference
[KMP_HW_LAST
];
337 int top_index1
, top_index2
;
338 // Set up preference associative array
339 preference
[KMP_HW_SOCKET
] = 110;
340 preference
[KMP_HW_PROC_GROUP
] = 100;
341 preference
[KMP_HW_CORE
] = 95;
342 preference
[KMP_HW_THREAD
] = 90;
343 preference
[KMP_HW_NUMA
] = 85;
344 preference
[KMP_HW_DIE
] = 80;
345 preference
[KMP_HW_TILE
] = 75;
346 preference
[KMP_HW_MODULE
] = 73;
347 preference
[KMP_HW_L3
] = 70;
348 preference
[KMP_HW_L2
] = 65;
349 preference
[KMP_HW_L1
] = 60;
350 preference
[KMP_HW_LLC
] = 5;
353 while (top_index1
< depth
- 1 && top_index2
< depth
) {
354 kmp_hw_t type1
= types
[top_index1
];
355 kmp_hw_t type2
= types
[top_index2
];
356 KMP_ASSERT_VALID_HW_TYPE(type1
);
357 KMP_ASSERT_VALID_HW_TYPE(type2
);
358 // Do not allow the three main topology levels (sockets, cores, threads) to
360 if ((type1
== KMP_HW_THREAD
|| type1
== KMP_HW_CORE
||
361 type1
== KMP_HW_SOCKET
) &&
362 (type2
== KMP_HW_THREAD
|| type2
== KMP_HW_CORE
||
363 type2
== KMP_HW_SOCKET
)) {
364 top_index1
= top_index2
++;
368 bool all_same
= true;
369 int id1
= hw_threads
[0].ids
[top_index1
];
370 int id2
= hw_threads
[0].ids
[top_index2
];
371 int pref1
= preference
[type1
];
372 int pref2
= preference
[type2
];
373 for (int hwidx
= 1; hwidx
< num_hw_threads
; ++hwidx
) {
374 if (hw_threads
[hwidx
].ids
[top_index1
] == id1
&&
375 hw_threads
[hwidx
].ids
[top_index2
] != id2
) {
379 if (hw_threads
[hwidx
].ids
[top_index2
] != id2
)
381 id1
= hw_threads
[hwidx
].ids
[top_index1
];
382 id2
= hw_threads
[hwidx
].ids
[top_index2
];
385 // Select the layer to remove based on preference
386 kmp_hw_t remove_type
, keep_type
;
387 int remove_layer
, remove_layer_ids
;
390 remove_layer
= remove_layer_ids
= top_index2
;
394 remove_layer
= remove_layer_ids
= top_index1
;
397 // If all the indexes for the second (deeper) layer are the same.
398 // e.g., all are zero, then make sure to keep the first layer's ids
400 remove_layer_ids
= top_index2
;
401 // Remove radix one type by setting the equivalence, removing the id from
402 // the hw threads and removing the layer from types and depth
403 set_equivalent_type(remove_type
, keep_type
);
404 for (int idx
= 0; idx
< num_hw_threads
; ++idx
) {
405 kmp_hw_thread_t
&hw_thread
= hw_threads
[idx
];
406 for (int d
= remove_layer_ids
; d
< depth
- 1; ++d
)
407 hw_thread
.ids
[d
] = hw_thread
.ids
[d
+ 1];
409 for (int idx
= remove_layer
; idx
< depth
- 1; ++idx
)
410 types
[idx
] = types
[idx
+ 1];
413 top_index1
= top_index2
++;
416 KMP_ASSERT(depth
> 0);
419 void kmp_topology_t::_set_last_level_cache() {
420 if (get_equivalent_type(KMP_HW_L3
) != KMP_HW_UNKNOWN
)
421 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L3
);
422 else if (get_equivalent_type(KMP_HW_L2
) != KMP_HW_UNKNOWN
)
423 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L2
);
424 #if KMP_MIC_SUPPORTED
425 else if (__kmp_mic_type
== mic3
) {
426 if (get_equivalent_type(KMP_HW_L2
) != KMP_HW_UNKNOWN
)
427 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L2
);
428 else if (get_equivalent_type(KMP_HW_TILE
) != KMP_HW_UNKNOWN
)
429 set_equivalent_type(KMP_HW_LLC
, KMP_HW_TILE
);
430 // L2/Tile wasn't detected so just say L1
432 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L1
);
435 else if (get_equivalent_type(KMP_HW_L1
) != KMP_HW_UNKNOWN
)
436 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L1
);
437 // Fallback is to set last level cache to socket or core
438 if (get_equivalent_type(KMP_HW_LLC
) == KMP_HW_UNKNOWN
) {
439 if (get_equivalent_type(KMP_HW_SOCKET
) != KMP_HW_UNKNOWN
)
440 set_equivalent_type(KMP_HW_LLC
, KMP_HW_SOCKET
);
441 else if (get_equivalent_type(KMP_HW_CORE
) != KMP_HW_UNKNOWN
)
442 set_equivalent_type(KMP_HW_LLC
, KMP_HW_CORE
);
444 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC
) != KMP_HW_UNKNOWN
);
447 // Gather the count of each topology layer and the ratio
448 void kmp_topology_t::_gather_enumeration_information() {
449 int previous_id
[KMP_HW_LAST
];
450 int max
[KMP_HW_LAST
];
452 for (int i
= 0; i
< depth
; ++i
) {
453 previous_id
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
458 int core_level
= get_level(KMP_HW_CORE
);
459 for (int i
= 0; i
< num_hw_threads
; ++i
) {
460 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
461 for (int layer
= 0; layer
< depth
; ++layer
) {
462 int id
= hw_thread
.ids
[layer
];
463 if (id
!= previous_id
[layer
]) {
464 // Add an additional increment to each count
465 for (int l
= layer
; l
< depth
; ++l
)
467 // Keep track of topology layer ratio statistics
469 for (int l
= layer
+ 1; l
< depth
; ++l
) {
470 if (max
[l
] > ratio
[l
])
474 // Figure out the number of different core types
475 // and efficiencies for hybrid CPUs
476 if (__kmp_is_hybrid_cpu() && core_level
>= 0 && layer
<= core_level
) {
477 if (hw_thread
.attrs
.is_core_eff_valid() &&
478 hw_thread
.attrs
.core_eff
>= num_core_efficiencies
) {
479 // Because efficiencies can range from 0 to max efficiency - 1,
480 // the number of efficiencies is max efficiency + 1
481 num_core_efficiencies
= hw_thread
.attrs
.core_eff
+ 1;
483 if (hw_thread
.attrs
.is_core_type_valid()) {
485 for (int j
= 0; j
< num_core_types
; ++j
) {
486 if (hw_thread
.attrs
.get_core_type() == core_types
[j
]) {
492 KMP_ASSERT(num_core_types
< KMP_HW_MAX_NUM_CORE_TYPES
);
493 core_types
[num_core_types
++] = hw_thread
.attrs
.get_core_type();
500 for (int layer
= 0; layer
< depth
; ++layer
) {
501 previous_id
[layer
] = hw_thread
.ids
[layer
];
504 for (int layer
= 0; layer
< depth
; ++layer
) {
505 if (max
[layer
] > ratio
[layer
])
506 ratio
[layer
] = max
[layer
];
510 int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t
&attr
,
512 bool find_all
) const {
513 int current
, current_max
;
514 int previous_id
[KMP_HW_LAST
];
515 for (int i
= 0; i
< depth
; ++i
)
516 previous_id
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
517 int core_level
= get_level(KMP_HW_CORE
);
520 KMP_ASSERT(above_level
< core_level
);
523 for (int i
= 0; i
< num_hw_threads
; ++i
) {
524 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
525 if (!find_all
&& hw_thread
.ids
[above_level
] != previous_id
[above_level
]) {
526 if (current
> current_max
)
527 current_max
= current
;
528 current
= hw_thread
.attrs
.contains(attr
);
530 for (int level
= above_level
+ 1; level
<= core_level
; ++level
) {
531 if (hw_thread
.ids
[level
] != previous_id
[level
]) {
532 if (hw_thread
.attrs
.contains(attr
))
538 for (int level
= 0; level
< depth
; ++level
)
539 previous_id
[level
] = hw_thread
.ids
[level
];
541 if (current
> current_max
)
542 current_max
= current
;
546 // Find out if the topology is uniform
547 void kmp_topology_t::_discover_uniformity() {
549 for (int level
= 0; level
< depth
; ++level
)
551 flags
.uniform
= (num
== count
[depth
- 1]);
554 // Set all the sub_ids for each hardware thread
555 void kmp_topology_t::_set_sub_ids() {
556 int previous_id
[KMP_HW_LAST
];
557 int sub_id
[KMP_HW_LAST
];
559 for (int i
= 0; i
< depth
; ++i
) {
563 for (int i
= 0; i
< num_hw_threads
; ++i
) {
564 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
566 for (int j
= 0; j
< depth
; ++j
) {
567 if (hw_thread
.ids
[j
] != previous_id
[j
]) {
569 for (int k
= j
+ 1; k
< depth
; ++k
) {
576 for (int j
= 0; j
< depth
; ++j
) {
577 previous_id
[j
] = hw_thread
.ids
[j
];
579 // Set the sub_ids field
580 for (int j
= 0; j
< depth
; ++j
) {
581 hw_thread
.sub_ids
[j
] = sub_id
[j
];
586 void kmp_topology_t::_set_globals() {
587 // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores
588 int core_level
, thread_level
, package_level
;
589 package_level
= get_level(KMP_HW_SOCKET
);
590 #if KMP_GROUP_AFFINITY
591 if (package_level
== -1)
592 package_level
= get_level(KMP_HW_PROC_GROUP
);
594 core_level
= get_level(KMP_HW_CORE
);
595 thread_level
= get_level(KMP_HW_THREAD
);
597 KMP_ASSERT(core_level
!= -1);
598 KMP_ASSERT(thread_level
!= -1);
600 __kmp_nThreadsPerCore
= calculate_ratio(thread_level
, core_level
);
601 if (package_level
!= -1) {
602 nCoresPerPkg
= calculate_ratio(core_level
, package_level
);
603 nPackages
= get_count(package_level
);
606 nCoresPerPkg
= get_count(core_level
);
609 #ifndef KMP_DFLT_NTH_CORES
610 __kmp_ncores
= get_count(core_level
);
614 kmp_topology_t
*kmp_topology_t::allocate(int nproc
, int ndepth
,
615 const kmp_hw_t
*types
) {
616 kmp_topology_t
*retval
;
617 // Allocate all data in one large allocation
618 size_t size
= sizeof(kmp_topology_t
) + sizeof(kmp_hw_thread_t
) * nproc
+
619 sizeof(int) * (size_t)KMP_HW_LAST
* 3;
620 char *bytes
= (char *)__kmp_allocate(size
);
621 retval
= (kmp_topology_t
*)bytes
;
623 retval
->hw_threads
= (kmp_hw_thread_t
*)(bytes
+ sizeof(kmp_topology_t
));
625 retval
->hw_threads
= nullptr;
627 retval
->num_hw_threads
= nproc
;
628 retval
->depth
= ndepth
;
630 (int *)(bytes
+ sizeof(kmp_topology_t
) + sizeof(kmp_hw_thread_t
) * nproc
);
631 retval
->types
= (kmp_hw_t
*)arr
;
632 retval
->ratio
= arr
+ (size_t)KMP_HW_LAST
;
633 retval
->count
= arr
+ 2 * (size_t)KMP_HW_LAST
;
634 retval
->num_core_efficiencies
= 0;
635 retval
->num_core_types
= 0;
637 for (int i
= 0; i
< KMP_HW_MAX_NUM_CORE_TYPES
; ++i
)
638 retval
->core_types
[i
] = KMP_HW_CORE_TYPE_UNKNOWN
;
639 KMP_FOREACH_HW_TYPE(type
) { retval
->equivalent
[type
] = KMP_HW_UNKNOWN
; }
640 for (int i
= 0; i
< ndepth
; ++i
) {
641 retval
->types
[i
] = types
[i
];
642 retval
->equivalent
[types
[i
]] = types
[i
];
647 void kmp_topology_t::deallocate(kmp_topology_t
*topology
) {
649 __kmp_free(topology
);
652 bool kmp_topology_t::check_ids() const {
653 // Assume ids have been sorted
654 if (num_hw_threads
== 0)
656 for (int i
= 1; i
< num_hw_threads
; ++i
) {
657 kmp_hw_thread_t
¤t_thread
= hw_threads
[i
];
658 kmp_hw_thread_t
&previous_thread
= hw_threads
[i
- 1];
660 for (int j
= 0; j
< depth
; ++j
) {
661 if (previous_thread
.ids
[j
] != current_thread
.ids
[j
]) {
673 void kmp_topology_t::dump() const {
674 printf("***********************\n");
675 printf("*** __kmp_topology: ***\n");
676 printf("***********************\n");
677 printf("* depth: %d\n", depth
);
680 for (int i
= 0; i
< depth
; ++i
)
681 printf("%15s ", __kmp_hw_get_keyword(types
[i
]));
685 for (int i
= 0; i
< depth
; ++i
) {
686 printf("%15d ", ratio
[i
]);
691 for (int i
= 0; i
< depth
; ++i
) {
692 printf("%15d ", count
[i
]);
696 printf("* num_core_eff: %d\n", num_core_efficiencies
);
697 printf("* num_core_types: %d\n", num_core_types
);
698 printf("* core_types: ");
699 for (int i
= 0; i
< num_core_types
; ++i
)
700 printf("%3d ", core_types
[i
]);
703 printf("* equivalent map:\n");
704 KMP_FOREACH_HW_TYPE(i
) {
705 const char *key
= __kmp_hw_get_keyword(i
);
706 const char *value
= __kmp_hw_get_keyword(equivalent
[i
]);
707 printf("%-15s -> %-15s\n", key
, value
);
710 printf("* uniform: %s\n", (is_uniform() ? "Yes" : "No"));
712 printf("* num_hw_threads: %d\n", num_hw_threads
);
713 printf("* hw_threads:\n");
714 for (int i
= 0; i
< num_hw_threads
; ++i
) {
715 hw_threads
[i
].print();
717 printf("***********************\n");
720 void kmp_topology_t::print(const char *env_var
) const {
722 int print_types_depth
;
723 __kmp_str_buf_init(&buf
);
724 kmp_hw_t print_types
[KMP_HW_LAST
+ 2];
726 // Num Available Threads
727 if (num_hw_threads
) {
728 KMP_INFORM(AvailableOSProc
, env_var
, num_hw_threads
);
730 KMP_INFORM(AvailableOSProc
, env_var
, __kmp_xproc
);
735 KMP_INFORM(Uniform
, env_var
);
737 KMP_INFORM(NonUniform
, env_var
);
741 KMP_FOREACH_HW_TYPE(type
) {
742 kmp_hw_t eq_type
= equivalent
[type
];
743 if (eq_type
!= KMP_HW_UNKNOWN
&& eq_type
!= type
) {
744 KMP_INFORM(AffEqualTopologyTypes
, env_var
,
745 __kmp_hw_get_catalog_string(type
),
746 __kmp_hw_get_catalog_string(eq_type
));
751 KMP_ASSERT(depth
> 0 && depth
<= (int)KMP_HW_LAST
);
752 // Create a print types array that always guarantees printing
753 // the core and thread level
754 print_types_depth
= 0;
755 for (int level
= 0; level
< depth
; ++level
)
756 print_types
[print_types_depth
++] = types
[level
];
757 if (equivalent
[KMP_HW_CORE
] != KMP_HW_CORE
) {
758 // Force in the core level for quick topology
759 if (print_types
[print_types_depth
- 1] == KMP_HW_THREAD
) {
760 // Force core before thread e.g., 1 socket X 2 threads/socket
761 // becomes 1 socket X 1 core/socket X 2 threads/socket
762 print_types
[print_types_depth
- 1] = KMP_HW_CORE
;
763 print_types
[print_types_depth
++] = KMP_HW_THREAD
;
765 print_types
[print_types_depth
++] = KMP_HW_CORE
;
768 // Always put threads at very end of quick topology
769 if (equivalent
[KMP_HW_THREAD
] != KMP_HW_THREAD
)
770 print_types
[print_types_depth
++] = KMP_HW_THREAD
;
772 __kmp_str_buf_clear(&buf
);
773 kmp_hw_t numerator_type
;
774 kmp_hw_t denominator_type
= KMP_HW_UNKNOWN
;
775 int core_level
= get_level(KMP_HW_CORE
);
776 int ncores
= get_count(core_level
);
778 for (int plevel
= 0, level
= 0; plevel
< print_types_depth
; ++plevel
) {
781 numerator_type
= print_types
[plevel
];
782 KMP_ASSERT_VALID_HW_TYPE(numerator_type
);
783 if (equivalent
[numerator_type
] != numerator_type
)
786 c
= get_ratio(level
++);
789 __kmp_str_buf_print(&buf
, "%d %s", c
,
790 __kmp_hw_get_catalog_string(numerator_type
, plural
));
792 __kmp_str_buf_print(&buf
, " x %d %s/%s", c
,
793 __kmp_hw_get_catalog_string(numerator_type
, plural
),
794 __kmp_hw_get_catalog_string(denominator_type
));
796 denominator_type
= numerator_type
;
798 KMP_INFORM(TopologyGeneric
, env_var
, buf
.str
, ncores
);
800 // Hybrid topology information
801 if (__kmp_is_hybrid_cpu()) {
802 for (int i
= 0; i
< num_core_types
; ++i
) {
803 kmp_hw_core_type_t core_type
= core_types
[i
];
806 attr
.set_core_type(core_type
);
807 int ncores
= get_ncores_with_attr(attr
);
809 KMP_INFORM(TopologyHybrid
, env_var
, ncores
,
810 __kmp_hw_get_core_type_string(core_type
));
811 KMP_ASSERT(num_core_efficiencies
<= KMP_HW_MAX_NUM_CORE_EFFS
)
812 for (int eff
= 0; eff
< num_core_efficiencies
; ++eff
) {
813 attr
.set_core_eff(eff
);
814 int ncores_with_eff
= get_ncores_with_attr(attr
);
815 if (ncores_with_eff
> 0) {
816 KMP_INFORM(TopologyHybridCoreEff
, env_var
, ncores_with_eff
, eff
);
823 if (num_hw_threads
<= 0) {
824 __kmp_str_buf_free(&buf
);
828 // Full OS proc to hardware thread map
829 KMP_INFORM(OSProcToPhysicalThreadMap
, env_var
);
830 for (int i
= 0; i
< num_hw_threads
; i
++) {
831 __kmp_str_buf_clear(&buf
);
832 for (int level
= 0; level
< depth
; ++level
) {
833 kmp_hw_t type
= types
[level
];
834 __kmp_str_buf_print(&buf
, "%s ", __kmp_hw_get_catalog_string(type
));
835 __kmp_str_buf_print(&buf
, "%d ", hw_threads
[i
].ids
[level
]);
837 if (__kmp_is_hybrid_cpu())
840 __kmp_hw_get_core_type_string(hw_threads
[i
].attrs
.get_core_type()));
841 KMP_INFORM(OSProcMapToPack
, env_var
, hw_threads
[i
].os_id
, buf
.str
);
844 __kmp_str_buf_free(&buf
);
847 #if KMP_AFFINITY_SUPPORTED
848 void kmp_topology_t::set_granularity(kmp_affinity_t
&affinity
) const {
849 const char *env_var
= __kmp_get_affinity_env_var(affinity
);
850 // If requested hybrid CPU attributes for granularity (either OMP_PLACES or
851 // KMP_AFFINITY), but none exist, then reset granularity and have below method
852 // select a granularity and warn user.
853 if (!__kmp_is_hybrid_cpu()) {
854 if (affinity
.core_attr_gran
.valid
) {
855 // OMP_PLACES with cores:<attribute> but non-hybrid arch, use cores
858 affinity
, AffIgnoringNonHybrid
, env_var
,
859 __kmp_hw_get_catalog_string(KMP_HW_CORE
, /*plural=*/true));
860 affinity
.gran
= KMP_HW_CORE
;
861 affinity
.gran_levels
= -1;
862 affinity
.core_attr_gran
= KMP_AFFINITY_ATTRS_UNKNOWN
;
863 affinity
.flags
.core_types_gran
= affinity
.flags
.core_effs_gran
= 0;
864 } else if (affinity
.flags
.core_types_gran
||
865 affinity
.flags
.core_effs_gran
) {
866 // OMP_PLACES=core_types|core_effs but non-hybrid, use cores instead
867 if (affinity
.flags
.omp_places
) {
869 affinity
, AffIgnoringNonHybrid
, env_var
,
870 __kmp_hw_get_catalog_string(KMP_HW_CORE
, /*plural=*/true));
872 // KMP_AFFINITY=granularity=core_type|core_eff,...
873 KMP_AFF_WARNING(affinity
, AffGranularityBad
, env_var
,
874 "Intel(R) Hybrid Technology core attribute",
875 __kmp_hw_get_catalog_string(KMP_HW_CORE
));
877 affinity
.gran
= KMP_HW_CORE
;
878 affinity
.gran_levels
= -1;
879 affinity
.core_attr_gran
= KMP_AFFINITY_ATTRS_UNKNOWN
;
880 affinity
.flags
.core_types_gran
= affinity
.flags
.core_effs_gran
= 0;
883 // Set the number of affinity granularity levels
884 if (affinity
.gran_levels
< 0) {
885 kmp_hw_t gran_type
= get_equivalent_type(affinity
.gran
);
886 // Check if user's granularity request is valid
887 if (gran_type
== KMP_HW_UNKNOWN
) {
888 // First try core, then thread, then package
889 kmp_hw_t gran_types
[3] = {KMP_HW_CORE
, KMP_HW_THREAD
, KMP_HW_SOCKET
};
890 for (auto g
: gran_types
) {
891 if (get_equivalent_type(g
) != KMP_HW_UNKNOWN
) {
896 KMP_ASSERT(gran_type
!= KMP_HW_UNKNOWN
);
897 // Warn user what granularity setting will be used instead
898 KMP_AFF_WARNING(affinity
, AffGranularityBad
, env_var
,
899 __kmp_hw_get_catalog_string(affinity
.gran
),
900 __kmp_hw_get_catalog_string(gran_type
));
901 affinity
.gran
= gran_type
;
903 #if KMP_GROUP_AFFINITY
904 // If more than one processor group exists, and the level of
905 // granularity specified by the user is too coarse, then the
906 // granularity must be adjusted "down" to processor group affinity
907 // because threads can only exist within one processor group.
908 // For example, if a user sets granularity=socket and there are two
909 // processor groups that cover a socket, then the runtime must
910 // restrict the granularity down to the processor group level.
911 if (__kmp_num_proc_groups
> 1) {
912 int gran_depth
= get_level(gran_type
);
913 int proc_group_depth
= get_level(KMP_HW_PROC_GROUP
);
914 if (gran_depth
>= 0 && proc_group_depth
>= 0 &&
915 gran_depth
< proc_group_depth
) {
916 KMP_AFF_WARNING(affinity
, AffGranTooCoarseProcGroup
, env_var
,
917 __kmp_hw_get_catalog_string(affinity
.gran
));
918 affinity
.gran
= gran_type
= KMP_HW_PROC_GROUP
;
922 affinity
.gran_levels
= 0;
923 for (int i
= depth
- 1; i
>= 0 && get_type(i
) != gran_type
; --i
)
924 affinity
.gran_levels
++;
929 void kmp_topology_t::canonicalize() {
930 #if KMP_GROUP_AFFINITY
931 _insert_windows_proc_groups();
933 _remove_radix1_layers();
934 _gather_enumeration_information();
935 _discover_uniformity();
938 _set_last_level_cache();
940 #if KMP_MIC_SUPPORTED
941 // Manually Add L2 = Tile equivalence
942 if (__kmp_mic_type
== mic3
) {
943 if (get_level(KMP_HW_L2
) != -1)
944 set_equivalent_type(KMP_HW_TILE
, KMP_HW_L2
);
945 else if (get_level(KMP_HW_TILE
) != -1)
946 set_equivalent_type(KMP_HW_L2
, KMP_HW_TILE
);
950 // Perform post canonicalization checking
951 KMP_ASSERT(depth
> 0);
952 for (int level
= 0; level
< depth
; ++level
) {
953 // All counts, ratios, and types must be valid
954 KMP_ASSERT(count
[level
] > 0 && ratio
[level
] > 0);
955 KMP_ASSERT_VALID_HW_TYPE(types
[level
]);
956 // Detected types must point to themselves
957 KMP_ASSERT(equivalent
[types
[level
]] == types
[level
]);
961 // Canonicalize an explicit packages X cores/pkg X threads/core topology
962 void kmp_topology_t::canonicalize(int npackages
, int ncores_per_pkg
,
963 int nthreads_per_core
, int ncores
) {
966 KMP_FOREACH_HW_TYPE(i
) { equivalent
[i
] = KMP_HW_UNKNOWN
; }
967 for (int level
= 0; level
< depth
; ++level
) {
971 count
[0] = npackages
;
973 count
[2] = __kmp_xproc
;
974 ratio
[0] = npackages
;
975 ratio
[1] = ncores_per_pkg
;
976 ratio
[2] = nthreads_per_core
;
977 equivalent
[KMP_HW_SOCKET
] = KMP_HW_SOCKET
;
978 equivalent
[KMP_HW_CORE
] = KMP_HW_CORE
;
979 equivalent
[KMP_HW_THREAD
] = KMP_HW_THREAD
;
980 types
[0] = KMP_HW_SOCKET
;
981 types
[1] = KMP_HW_CORE
;
982 types
[2] = KMP_HW_THREAD
;
983 //__kmp_avail_proc = __kmp_xproc;
984 _discover_uniformity();
987 // Represents running sub IDs for a single core attribute where
988 // attribute values have SIZE possibilities.
989 template <size_t SIZE
, typename IndexFunc
> struct kmp_sub_ids_t
{
990 int last_level
; // last level in topology to consider for sub_ids
991 int sub_id
[SIZE
]; // The sub ID for a given attribute value
992 int prev_sub_id
[KMP_HW_LAST
];
996 kmp_sub_ids_t(int last_level
) : last_level(last_level
) {
997 KMP_ASSERT(last_level
< KMP_HW_LAST
);
998 for (size_t i
= 0; i
< SIZE
; ++i
)
1000 for (size_t i
= 0; i
< KMP_HW_LAST
; ++i
)
1001 prev_sub_id
[i
] = -1;
1003 void update(const kmp_hw_thread_t
&hw_thread
) {
1004 int idx
= indexer(hw_thread
);
1005 KMP_ASSERT(idx
< (int)SIZE
);
1006 for (int level
= 0; level
<= last_level
; ++level
) {
1007 if (hw_thread
.sub_ids
[level
] != prev_sub_id
[level
]) {
1008 if (level
< last_level
)
1014 for (int level
= 0; level
<= last_level
; ++level
)
1015 prev_sub_id
[level
] = hw_thread
.sub_ids
[level
];
1017 int get_sub_id(const kmp_hw_thread_t
&hw_thread
) const {
1018 return sub_id
[indexer(hw_thread
)];
1022 #if KMP_AFFINITY_SUPPORTED
1023 static kmp_str_buf_t
*
1024 __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t
&attr
, kmp_str_buf_t
*buf
,
1026 __kmp_str_buf_init(buf
);
1027 if (attr
.is_core_type_valid())
1028 __kmp_str_buf_print(buf
, "%s %s",
1029 __kmp_hw_get_core_type_string(attr
.get_core_type()),
1030 __kmp_hw_get_catalog_string(KMP_HW_CORE
, plural
));
1032 __kmp_str_buf_print(buf
, "%s eff=%d",
1033 __kmp_hw_get_catalog_string(KMP_HW_CORE
, plural
),
1034 attr
.get_core_eff());
1038 bool kmp_topology_t::restrict_to_mask(const kmp_affin_mask_t
*mask
) {
1042 for (int i
= 0; i
< num_hw_threads
; ++i
) {
1043 int os_id
= hw_threads
[i
].os_id
;
1044 if (KMP_CPU_ISSET(os_id
, mask
)) {
1046 hw_threads
[new_index
] = hw_threads
[i
];
1049 KMP_CPU_CLR(os_id
, __kmp_affin_fullMask
);
1054 KMP_DEBUG_ASSERT(new_index
<= num_hw_threads
);
1055 affected
= (num_hw_threads
!= new_index
);
1056 num_hw_threads
= new_index
;
1058 // Post hardware subset canonicalization
1060 _gather_enumeration_information();
1061 _discover_uniformity();
1063 _set_last_level_cache();
1065 // Copy filtered full mask if topology has single processor group
1066 if (__kmp_num_proc_groups
<= 1)
1068 __kmp_affin_origMask
->copy(__kmp_affin_fullMask
);
1073 // Apply the KMP_HW_SUBSET envirable to the topology
1074 // Returns true if KMP_HW_SUBSET filtered any processors
1075 // otherwise, returns false
1076 bool kmp_topology_t::filter_hw_subset() {
1077 // If KMP_HW_SUBSET wasn't requested, then do nothing.
1078 if (!__kmp_hw_subset
)
1081 // First, sort the KMP_HW_SUBSET items by the machine topology
1082 __kmp_hw_subset
->sort();
1084 // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology
1085 bool using_core_types
= false;
1086 bool using_core_effs
= false;
1087 int hw_subset_depth
= __kmp_hw_subset
->get_depth();
1088 kmp_hw_t specified
[KMP_HW_LAST
];
1089 int *topology_levels
= (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth
);
1090 KMP_ASSERT(hw_subset_depth
> 0);
1091 KMP_FOREACH_HW_TYPE(i
) { specified
[i
] = KMP_HW_UNKNOWN
; }
1092 int core_level
= get_level(KMP_HW_CORE
);
1093 for (int i
= 0; i
< hw_subset_depth
; ++i
) {
1095 const kmp_hw_subset_t::item_t
&item
= __kmp_hw_subset
->at(i
);
1096 int num
= item
.num
[0];
1097 int offset
= item
.offset
[0];
1098 kmp_hw_t type
= item
.type
;
1099 kmp_hw_t equivalent_type
= equivalent
[type
];
1100 int level
= get_level(type
);
1101 topology_levels
[i
] = level
;
1103 // Check to see if current layer is in detected machine topology
1104 if (equivalent_type
!= KMP_HW_UNKNOWN
) {
1105 __kmp_hw_subset
->at(i
).type
= equivalent_type
;
1107 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetNotExistGeneric
,
1108 __kmp_hw_get_catalog_string(type
));
1112 // Check to see if current layer has already been
1113 // specified either directly or through an equivalent type
1114 if (specified
[equivalent_type
] != KMP_HW_UNKNOWN
) {
1115 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetEqvLayers
,
1116 __kmp_hw_get_catalog_string(type
),
1117 __kmp_hw_get_catalog_string(specified
[equivalent_type
]));
1120 specified
[equivalent_type
] = type
;
1122 // Check to see if each layer's num & offset parameters are valid
1123 max_count
= get_ratio(level
);
1124 if (max_count
< 0 ||
1125 (num
!= kmp_hw_subset_t::USE_ALL
&& num
+ offset
> max_count
)) {
1126 bool plural
= (num
> 1);
1127 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetManyGeneric
,
1128 __kmp_hw_get_catalog_string(type
, plural
));
1132 // Check to see if core attributes are consistent
1133 if (core_level
== level
) {
1134 // Determine which core attributes are specified
1135 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1136 if (item
.attr
[j
].is_core_type_valid())
1137 using_core_types
= true;
1138 if (item
.attr
[j
].is_core_eff_valid())
1139 using_core_effs
= true;
1142 // Check if using a single core attribute on non-hybrid arch.
1143 // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute.
1145 // Check if using multiple core attributes on non-hyrbid arch.
1146 // Ignore all of KMP_HW_SUBSET if this is the case.
1147 if ((using_core_effs
|| using_core_types
) && !__kmp_is_hybrid_cpu()) {
1148 if (item
.num_attrs
== 1) {
1149 if (using_core_effs
) {
1150 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIgnoringAttr
,
1153 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIgnoringAttr
,
1156 using_core_effs
= false;
1157 using_core_types
= false;
1159 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetAttrsNonHybrid
);
1164 // Check if using both core types and core efficiencies together
1165 if (using_core_types
&& using_core_effs
) {
1166 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIncompat
, "core_type",
1171 // Check that core efficiency values are valid
1172 if (using_core_effs
) {
1173 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1174 if (item
.attr
[j
].is_core_eff_valid()) {
1175 int core_eff
= item
.attr
[j
].get_core_eff();
1176 if (core_eff
< 0 || core_eff
>= num_core_efficiencies
) {
1178 __kmp_str_buf_init(&buf
);
1179 __kmp_str_buf_print(&buf
, "%d", item
.attr
[j
].get_core_eff());
1180 __kmp_msg(kmp_ms_warning
,
1181 KMP_MSG(AffHWSubsetAttrInvalid
, "efficiency", buf
.str
),
1182 KMP_HNT(ValidValuesRange
, 0, num_core_efficiencies
- 1),
1184 __kmp_str_buf_free(&buf
);
1191 // Check that the number of requested cores with attributes is valid
1192 if (using_core_types
|| using_core_effs
) {
1193 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1194 int num
= item
.num
[j
];
1195 int offset
= item
.offset
[j
];
1196 int level_above
= core_level
- 1;
1197 if (level_above
>= 0) {
1198 max_count
= get_ncores_with_attr_per(item
.attr
[j
], level_above
);
1199 if (max_count
<= 0 ||
1200 (num
!= kmp_hw_subset_t::USE_ALL
&& num
+ offset
> max_count
)) {
1202 __kmp_hw_get_catalog_core_string(item
.attr
[j
], &buf
, num
> 0);
1203 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetManyGeneric
, buf
.str
);
1204 __kmp_str_buf_free(&buf
);
1211 if ((using_core_types
|| using_core_effs
) && item
.num_attrs
> 1) {
1212 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1213 // Ambiguous use of specific core attribute + generic core
1214 // e.g., 4c & 3c:intel_core or 4c & 3c:eff1
1215 if (!item
.attr
[j
]) {
1216 kmp_hw_attr_t other_attr
;
1217 for (int k
= 0; k
< item
.num_attrs
; ++k
) {
1218 if (item
.attr
[k
] != item
.attr
[j
]) {
1219 other_attr
= item
.attr
[k
];
1224 __kmp_hw_get_catalog_core_string(other_attr
, &buf
, item
.num
[j
] > 0);
1225 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIncompat
,
1226 __kmp_hw_get_catalog_string(KMP_HW_CORE
), buf
.str
);
1227 __kmp_str_buf_free(&buf
);
1230 // Allow specifying a specific core type or core eff exactly once
1231 for (int k
= 0; k
< j
; ++k
) {
1232 if (!item
.attr
[j
] || !item
.attr
[k
])
1234 if (item
.attr
[k
] == item
.attr
[j
]) {
1236 __kmp_hw_get_catalog_core_string(item
.attr
[j
], &buf
,
1238 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetAttrRepeat
, buf
.str
);
1239 __kmp_str_buf_free(&buf
);
1248 struct core_type_indexer
{
1249 int operator()(const kmp_hw_thread_t
&t
) const {
1250 switch (t
.attrs
.get_core_type()) {
1251 case KMP_HW_CORE_TYPE_UNKNOWN
:
1252 case KMP_HW_MAX_NUM_CORE_TYPES
:
1254 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1255 case KMP_HW_CORE_TYPE_ATOM
:
1257 case KMP_HW_CORE_TYPE_CORE
:
1261 KMP_ASSERT2(false, "Unhandled kmp_hw_thread_t enumeration");
1262 KMP_BUILTIN_UNREACHABLE
;
1265 struct core_eff_indexer
{
1266 int operator()(const kmp_hw_thread_t
&t
) const {
1267 return t
.attrs
.get_core_eff();
1271 kmp_sub_ids_t
<KMP_HW_MAX_NUM_CORE_TYPES
, core_type_indexer
> core_type_sub_ids(
1273 kmp_sub_ids_t
<KMP_HW_MAX_NUM_CORE_EFFS
, core_eff_indexer
> core_eff_sub_ids(
1276 // Determine which hardware threads should be filtered.
1277 int num_filtered
= 0;
1278 kmp_affin_mask_t
*filtered_mask
;
1279 KMP_CPU_ALLOC(filtered_mask
);
1280 KMP_CPU_COPY(filtered_mask
, __kmp_affin_fullMask
);
1281 for (int i
= 0; i
< num_hw_threads
; ++i
) {
1282 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
1283 // Update type_sub_id
1284 if (using_core_types
)
1285 core_type_sub_ids
.update(hw_thread
);
1286 if (using_core_effs
)
1287 core_eff_sub_ids
.update(hw_thread
);
1289 // Check to see if this hardware thread should be filtered
1290 bool should_be_filtered
= false;
1291 for (int hw_subset_index
= 0; hw_subset_index
< hw_subset_depth
;
1292 ++hw_subset_index
) {
1293 const auto &hw_subset_item
= __kmp_hw_subset
->at(hw_subset_index
);
1294 int level
= topology_levels
[hw_subset_index
];
1297 if ((using_core_effs
|| using_core_types
) && level
== core_level
) {
1298 // Look for the core attribute in KMP_HW_SUBSET which corresponds
1299 // to this hardware thread's core attribute. Use this num,offset plus
1300 // the running sub_id for the particular core attribute of this hardware
1301 // thread to determine if the hardware thread should be filtered or not.
1303 kmp_hw_core_type_t core_type
= hw_thread
.attrs
.get_core_type();
1304 int core_eff
= hw_thread
.attrs
.get_core_eff();
1305 for (attr_idx
= 0; attr_idx
< hw_subset_item
.num_attrs
; ++attr_idx
) {
1306 if (using_core_types
&&
1307 hw_subset_item
.attr
[attr_idx
].get_core_type() == core_type
)
1309 if (using_core_effs
&&
1310 hw_subset_item
.attr
[attr_idx
].get_core_eff() == core_eff
)
1313 // This core attribute isn't in the KMP_HW_SUBSET so always filter it.
1314 if (attr_idx
== hw_subset_item
.num_attrs
) {
1315 should_be_filtered
= true;
1319 int num
= hw_subset_item
.num
[attr_idx
];
1320 int offset
= hw_subset_item
.offset
[attr_idx
];
1321 if (using_core_types
)
1322 sub_id
= core_type_sub_ids
.get_sub_id(hw_thread
);
1324 sub_id
= core_eff_sub_ids
.get_sub_id(hw_thread
);
1325 if (sub_id
< offset
||
1326 (num
!= kmp_hw_subset_t::USE_ALL
&& sub_id
>= offset
+ num
)) {
1327 should_be_filtered
= true;
1331 int num
= hw_subset_item
.num
[0];
1332 int offset
= hw_subset_item
.offset
[0];
1333 if (hw_thread
.sub_ids
[level
] < offset
||
1334 (num
!= kmp_hw_subset_t::USE_ALL
&&
1335 hw_thread
.sub_ids
[level
] >= offset
+ num
)) {
1336 should_be_filtered
= true;
1341 // Collect filtering information
1342 if (should_be_filtered
) {
1343 KMP_CPU_CLR(hw_thread
.os_id
, filtered_mask
);
1348 // One last check that we shouldn't allow filtering entire machine
1349 if (num_filtered
== num_hw_threads
) {
1350 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetAllFiltered
);
1355 restrict_to_mask(filtered_mask
);
1359 bool kmp_topology_t::is_close(int hwt1
, int hwt2
,
1360 const kmp_affinity_t
&stgs
) const {
1361 int hw_level
= stgs
.gran_levels
;
1362 if (hw_level
>= depth
)
1365 const kmp_hw_thread_t
&t1
= hw_threads
[hwt1
];
1366 const kmp_hw_thread_t
&t2
= hw_threads
[hwt2
];
1367 if (stgs
.flags
.core_types_gran
)
1368 return t1
.attrs
.get_core_type() == t2
.attrs
.get_core_type();
1369 if (stgs
.flags
.core_effs_gran
)
1370 return t1
.attrs
.get_core_eff() == t2
.attrs
.get_core_eff();
1371 for (int i
= 0; i
< (depth
- hw_level
); ++i
) {
1372 if (t1
.ids
[i
] != t2
.ids
[i
])
1378 ////////////////////////////////////////////////////////////////////////////////
1380 bool KMPAffinity::picked_api
= false;
1382 void *KMPAffinity::Mask::operator new(size_t n
) { return __kmp_allocate(n
); }
1383 void *KMPAffinity::Mask::operator new[](size_t n
) { return __kmp_allocate(n
); }
1384 void KMPAffinity::Mask::operator delete(void *p
) { __kmp_free(p
); }
1385 void KMPAffinity::Mask::operator delete[](void *p
) { __kmp_free(p
); }
1386 void *KMPAffinity::operator new(size_t n
) { return __kmp_allocate(n
); }
1387 void KMPAffinity::operator delete(void *p
) { __kmp_free(p
); }
1389 void KMPAffinity::pick_api() {
1390 KMPAffinity
*affinity_dispatch
;
1394 // Only use Hwloc if affinity isn't explicitly disabled and
1395 // user requests Hwloc topology method
1396 if (__kmp_affinity_top_method
== affinity_top_method_hwloc
&&
1397 __kmp_affinity
.type
!= affinity_disabled
) {
1398 affinity_dispatch
= new KMPHwlocAffinity();
1402 affinity_dispatch
= new KMPNativeAffinity();
1404 __kmp_affinity_dispatch
= affinity_dispatch
;
1408 void KMPAffinity::destroy_api() {
1409 if (__kmp_affinity_dispatch
!= NULL
) {
1410 delete __kmp_affinity_dispatch
;
1411 __kmp_affinity_dispatch
= NULL
;
1416 #define KMP_ADVANCE_SCAN(scan) \
1417 while (*scan != '\0') { \
1421 // Print the affinity mask to the character array in a pretty format.
1422 // The format is a comma separated list of non-negative integers or integer
1423 // ranges: e.g., 1,2,3-5,7,9-15
1424 // The format can also be the string "{<empty>}" if no bits are set in mask
1425 char *__kmp_affinity_print_mask(char *buf
, int buf_len
,
1426 kmp_affin_mask_t
*mask
) {
1427 int start
= 0, finish
= 0, previous
= 0;
1430 KMP_ASSERT(buf_len
>= 40);
1433 char *end
= buf
+ buf_len
- 1;
1435 // Check for empty set.
1436 if (mask
->begin() == mask
->end()) {
1437 KMP_SNPRINTF(scan
, end
- scan
+ 1, "{<empty>}");
1438 KMP_ADVANCE_SCAN(scan
);
1439 KMP_ASSERT(scan
<= end
);
1444 start
= mask
->begin();
1447 // [start, previous] is inclusive range of contiguous bits in mask
1448 for (finish
= mask
->next(start
), previous
= start
;
1449 finish
== previous
+ 1 && finish
!= mask
->end();
1450 finish
= mask
->next(finish
)) {
1454 // The first range does not need a comma printed before it, but the rest
1455 // of the ranges do need a comma beforehand
1457 KMP_SNPRINTF(scan
, end
- scan
+ 1, "%s", ",");
1458 KMP_ADVANCE_SCAN(scan
);
1460 first_range
= false;
1462 // Range with three or more contiguous bits in the affinity mask
1463 if (previous
- start
> 1) {
1464 KMP_SNPRINTF(scan
, end
- scan
+ 1, "%u-%u", start
, previous
);
1466 // Range with one or two contiguous bits in the affinity mask
1467 KMP_SNPRINTF(scan
, end
- scan
+ 1, "%u", start
);
1468 KMP_ADVANCE_SCAN(scan
);
1469 if (previous
- start
> 0) {
1470 KMP_SNPRINTF(scan
, end
- scan
+ 1, ",%u", previous
);
1473 KMP_ADVANCE_SCAN(scan
);
1474 // Start over with new start point
1476 if (start
== mask
->end())
1478 // Check for overflow
1483 // Check for overflow
1484 KMP_ASSERT(scan
<= end
);
1487 #undef KMP_ADVANCE_SCAN
1489 // Print the affinity mask to the string buffer object in a pretty format
1490 // The format is a comma separated list of non-negative integers or integer
1491 // ranges: e.g., 1,2,3-5,7,9-15
1492 // The format can also be the string "{<empty>}" if no bits are set in mask
1493 kmp_str_buf_t
*__kmp_affinity_str_buf_mask(kmp_str_buf_t
*buf
,
1494 kmp_affin_mask_t
*mask
) {
1495 int start
= 0, finish
= 0, previous
= 0;
1500 __kmp_str_buf_clear(buf
);
1502 // Check for empty set.
1503 if (mask
->begin() == mask
->end()) {
1504 __kmp_str_buf_print(buf
, "%s", "{<empty>}");
1509 start
= mask
->begin();
1512 // [start, previous] is inclusive range of contiguous bits in mask
1513 for (finish
= mask
->next(start
), previous
= start
;
1514 finish
== previous
+ 1 && finish
!= mask
->end();
1515 finish
= mask
->next(finish
)) {
1519 // The first range does not need a comma printed before it, but the rest
1520 // of the ranges do need a comma beforehand
1522 __kmp_str_buf_print(buf
, "%s", ",");
1524 first_range
= false;
1526 // Range with three or more contiguous bits in the affinity mask
1527 if (previous
- start
> 1) {
1528 __kmp_str_buf_print(buf
, "%u-%u", start
, previous
);
1530 // Range with one or two contiguous bits in the affinity mask
1531 __kmp_str_buf_print(buf
, "%u", start
);
1532 if (previous
- start
> 0) {
1533 __kmp_str_buf_print(buf
, ",%u", previous
);
1536 // Start over with new start point
1538 if (start
== mask
->end())
1544 // Return (possibly empty) affinity mask representing the offline CPUs
1545 // Caller must free the mask
1546 kmp_affin_mask_t
*__kmp_affinity_get_offline_cpus() {
1547 kmp_affin_mask_t
*offline
;
1548 KMP_CPU_ALLOC(offline
);
1549 KMP_CPU_ZERO(offline
);
1551 int n
, begin_cpu
, end_cpu
;
1552 kmp_safe_raii_file_t offline_file
;
1553 auto skip_ws
= [](FILE *f
) {
1557 } while (isspace(c
));
1561 // File contains CSV of integer ranges representing the offline CPUs
1562 // e.g., 1,2,4-7,9,11-15
1563 int status
= offline_file
.try_open("/sys/devices/system/cpu/offline", "r");
1566 while (!feof(offline_file
)) {
1567 skip_ws(offline_file
);
1568 n
= fscanf(offline_file
, "%d", &begin_cpu
);
1571 skip_ws(offline_file
);
1572 int c
= fgetc(offline_file
);
1573 if (c
== EOF
|| c
== ',') {
1575 end_cpu
= begin_cpu
;
1576 } else if (c
== '-') {
1578 skip_ws(offline_file
);
1579 n
= fscanf(offline_file
, "%d", &end_cpu
);
1582 skip_ws(offline_file
);
1583 c
= fgetc(offline_file
); // skip ','
1588 // Ensure a valid range of CPUs
1589 if (begin_cpu
< 0 || begin_cpu
>= __kmp_xproc
|| end_cpu
< 0 ||
1590 end_cpu
>= __kmp_xproc
|| begin_cpu
> end_cpu
) {
1593 // Insert [begin_cpu, end_cpu] into offline mask
1594 for (int cpu
= begin_cpu
; cpu
<= end_cpu
; ++cpu
) {
1595 KMP_CPU_SET(cpu
, offline
);
1602 // Return the number of available procs
1603 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t
*mask
) {
1607 #if KMP_GROUP_AFFINITY
1609 if (__kmp_num_proc_groups
> 1) {
1611 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount
!= NULL
);
1612 for (group
= 0; group
< __kmp_num_proc_groups
; group
++) {
1614 int num
= __kmp_GetActiveProcessorCount(group
);
1615 for (i
= 0; i
< num
; i
++) {
1616 KMP_CPU_SET(i
+ group
* (CHAR_BIT
* sizeof(DWORD_PTR
)), mask
);
1622 #endif /* KMP_GROUP_AFFINITY */
1626 kmp_affin_mask_t
*offline_cpus
= __kmp_affinity_get_offline_cpus();
1627 for (proc
= 0; proc
< __kmp_xproc
; proc
++) {
1628 // Skip offline CPUs
1629 if (KMP_CPU_ISSET(proc
, offline_cpus
))
1631 KMP_CPU_SET(proc
, mask
);
1634 KMP_CPU_FREE(offline_cpus
);
1640 // All of the __kmp_affinity_create_*_map() routines should allocate the
1641 // internal topology object and set the layer ids for it. Each routine
1642 // returns a boolean on whether it was successful at doing so.
1643 kmp_affin_mask_t
*__kmp_affin_fullMask
= NULL
;
1644 // Original mask is a subset of full mask in multiple processor groups topology
1645 kmp_affin_mask_t
*__kmp_affin_origMask
= NULL
;
1648 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj
) {
1649 #if HWLOC_API_VERSION >= 0x00020000
1650 return hwloc_obj_type_is_cache(obj
->type
);
1652 return obj
->type
== HWLOC_OBJ_CACHE
;
1656 // Returns KMP_HW_* type derived from HWLOC_* type
1657 static inline kmp_hw_t
__kmp_hwloc_type_2_topology_type(hwloc_obj_t obj
) {
1659 if (__kmp_hwloc_is_cache_type(obj
)) {
1660 if (obj
->attr
->cache
.type
== HWLOC_OBJ_CACHE_INSTRUCTION
)
1661 return KMP_HW_UNKNOWN
;
1662 switch (obj
->attr
->cache
.depth
) {
1666 #if KMP_MIC_SUPPORTED
1667 if (__kmp_mic_type
== mic3
) {
1675 return KMP_HW_UNKNOWN
;
1678 switch (obj
->type
) {
1679 case HWLOC_OBJ_PACKAGE
:
1680 return KMP_HW_SOCKET
;
1681 case HWLOC_OBJ_NUMANODE
:
1683 case HWLOC_OBJ_CORE
:
1686 return KMP_HW_THREAD
;
1687 case HWLOC_OBJ_GROUP
:
1688 #if HWLOC_API_VERSION >= 0x00020000
1689 if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_INTEL_DIE
)
1691 else if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_INTEL_TILE
)
1693 else if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_INTEL_MODULE
)
1694 return KMP_HW_MODULE
;
1695 else if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP
)
1696 return KMP_HW_PROC_GROUP
;
1698 return KMP_HW_UNKNOWN
;
1699 #if HWLOC_API_VERSION >= 0x00020100
1704 return KMP_HW_UNKNOWN
;
1707 // Returns the number of objects of type 'type' below 'obj' within the topology
1708 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
1709 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
1711 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj
,
1712 hwloc_obj_type_t type
) {
1715 for (first
= hwloc_get_obj_below_by_type(__kmp_hwloc_topology
, obj
->type
,
1716 obj
->logical_index
, type
, 0);
1717 first
!= NULL
&& hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology
,
1718 obj
->type
, first
) == obj
;
1719 first
= hwloc_get_next_obj_by_type(__kmp_hwloc_topology
, first
->type
,
1726 // This gets the sub_id for a lower object under a higher object in the
1728 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t
, hwloc_obj_t higher
,
1729 hwloc_obj_t lower
) {
1731 hwloc_obj_type_t ltype
= lower
->type
;
1732 int lindex
= lower
->logical_index
- 1;
1734 // Get the previous lower object
1735 obj
= hwloc_get_obj_by_type(t
, ltype
, lindex
);
1736 while (obj
&& lindex
>= 0 &&
1737 hwloc_bitmap_isincluded(obj
->cpuset
, higher
->cpuset
)) {
1738 if (obj
->userdata
) {
1739 sub_id
= (int)(RCAST(kmp_intptr_t
, obj
->userdata
));
1744 obj
= hwloc_get_obj_by_type(t
, ltype
, lindex
);
1746 // store sub_id + 1 so that 0 is differed from NULL
1747 lower
->userdata
= RCAST(void *, sub_id
+ 1);
1751 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t
*const msg_id
) {
1753 int hw_thread_index
, sub_id
;
1755 hwloc_obj_t pu
, obj
, root
, prev
;
1756 kmp_hw_t types
[KMP_HW_LAST
];
1757 hwloc_obj_type_t hwloc_types
[KMP_HW_LAST
];
1759 hwloc_topology_t tp
= __kmp_hwloc_topology
;
1760 *msg_id
= kmp_i18n_null
;
1761 if (__kmp_affinity
.flags
.verbose
) {
1762 KMP_INFORM(AffUsingHwloc
, "KMP_AFFINITY");
1765 if (!KMP_AFFINITY_CAPABLE()) {
1766 // Hack to try and infer the machine topology using only the data
1767 // available from hwloc on the current thread, and __kmp_xproc.
1768 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
1769 // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
1770 hwloc_obj_t o
= hwloc_get_obj_by_type(tp
, HWLOC_OBJ_PACKAGE
, 0);
1772 nCoresPerPkg
= __kmp_hwloc_get_nobjs_under_obj(o
, HWLOC_OBJ_CORE
);
1774 nCoresPerPkg
= 1; // no PACKAGE found
1775 o
= hwloc_get_obj_by_type(tp
, HWLOC_OBJ_CORE
, 0);
1777 __kmp_nThreadsPerCore
= __kmp_hwloc_get_nobjs_under_obj(o
, HWLOC_OBJ_PU
);
1779 __kmp_nThreadsPerCore
= 1; // no CORE found
1780 __kmp_ncores
= __kmp_xproc
/ __kmp_nThreadsPerCore
;
1781 if (nCoresPerPkg
== 0)
1782 nCoresPerPkg
= 1; // to prevent possible division by 0
1783 nPackages
= (__kmp_xproc
+ nCoresPerPkg
- 1) / nCoresPerPkg
;
1787 #if HWLOC_API_VERSION >= 0x00020400
1788 // Handle multiple types of cores if they exist on the system
1789 int nr_cpu_kinds
= hwloc_cpukinds_get_nr(tp
, 0);
1791 typedef struct kmp_hwloc_cpukinds_info_t
{
1793 kmp_hw_core_type_t core_type
;
1794 hwloc_bitmap_t mask
;
1795 } kmp_hwloc_cpukinds_info_t
;
1796 kmp_hwloc_cpukinds_info_t
*cpukinds
= nullptr;
1798 if (nr_cpu_kinds
> 0) {
1800 struct hwloc_info_s
*infos
;
1801 cpukinds
= (kmp_hwloc_cpukinds_info_t
*)__kmp_allocate(
1802 sizeof(kmp_hwloc_cpukinds_info_t
) * nr_cpu_kinds
);
1803 for (unsigned idx
= 0; idx
< (unsigned)nr_cpu_kinds
; ++idx
) {
1804 cpukinds
[idx
].efficiency
= -1;
1805 cpukinds
[idx
].core_type
= KMP_HW_CORE_TYPE_UNKNOWN
;
1806 cpukinds
[idx
].mask
= hwloc_bitmap_alloc();
1807 if (hwloc_cpukinds_get_info(tp
, idx
, cpukinds
[idx
].mask
,
1808 &cpukinds
[idx
].efficiency
, &nr_infos
, &infos
,
1810 for (unsigned i
= 0; i
< nr_infos
; ++i
) {
1811 if (__kmp_str_match("CoreType", 8, infos
[i
].name
)) {
1812 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1813 if (__kmp_str_match("IntelAtom", 9, infos
[i
].value
)) {
1814 cpukinds
[idx
].core_type
= KMP_HW_CORE_TYPE_ATOM
;
1816 } else if (__kmp_str_match("IntelCore", 9, infos
[i
].value
)) {
1817 cpukinds
[idx
].core_type
= KMP_HW_CORE_TYPE_CORE
;
1828 root
= hwloc_get_root_obj(tp
);
1830 // Figure out the depth and types in the topology
1832 pu
= hwloc_get_pu_obj_by_os_index(tp
, __kmp_affin_fullMask
->begin());
1835 types
[depth
] = KMP_HW_THREAD
;
1836 hwloc_types
[depth
] = obj
->type
;
1838 while (obj
!= root
&& obj
!= NULL
) {
1840 #if HWLOC_API_VERSION >= 0x00020000
1841 if (obj
->memory_arity
) {
1843 for (memory
= obj
->memory_first_child
; memory
;
1844 memory
= hwloc_get_next_child(tp
, obj
, memory
)) {
1845 if (memory
->type
== HWLOC_OBJ_NUMANODE
)
1848 if (memory
&& memory
->type
== HWLOC_OBJ_NUMANODE
) {
1849 types
[depth
] = KMP_HW_NUMA
;
1850 hwloc_types
[depth
] = memory
->type
;
1855 type
= __kmp_hwloc_type_2_topology_type(obj
);
1856 if (type
!= KMP_HW_UNKNOWN
) {
1857 types
[depth
] = type
;
1858 hwloc_types
[depth
] = obj
->type
;
1862 KMP_ASSERT(depth
> 0);
1864 // Get the order for the types correct
1865 for (int i
= 0, j
= depth
- 1; i
< j
; ++i
, --j
) {
1866 hwloc_obj_type_t hwloc_temp
= hwloc_types
[i
];
1867 kmp_hw_t temp
= types
[i
];
1868 types
[i
] = types
[j
];
1870 hwloc_types
[i
] = hwloc_types
[j
];
1871 hwloc_types
[j
] = hwloc_temp
;
1874 // Allocate the data structure to be returned.
1875 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
1877 hw_thread_index
= 0;
1879 while ((pu
= hwloc_get_next_obj_by_type(tp
, HWLOC_OBJ_PU
, pu
))) {
1880 int index
= depth
- 1;
1881 bool included
= KMP_CPU_ISSET(pu
->os_index
, __kmp_affin_fullMask
);
1882 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(hw_thread_index
);
1885 hw_thread
.ids
[index
] = pu
->logical_index
;
1886 hw_thread
.os_id
= pu
->os_index
;
1887 // If multiple core types, then set that attribute for the hardware thread
1888 #if HWLOC_API_VERSION >= 0x00020400
1890 int cpukind_index
= -1;
1891 for (int i
= 0; i
< nr_cpu_kinds
; ++i
) {
1892 if (hwloc_bitmap_isset(cpukinds
[i
].mask
, hw_thread
.os_id
)) {
1897 if (cpukind_index
>= 0) {
1898 hw_thread
.attrs
.set_core_type(cpukinds
[cpukind_index
].core_type
);
1899 hw_thread
.attrs
.set_core_eff(cpukinds
[cpukind_index
].efficiency
);
1907 while (obj
!= root
&& obj
!= NULL
) {
1909 #if HWLOC_API_VERSION >= 0x00020000
1910 // NUMA Nodes are handled differently since they are not within the
1911 // parent/child structure anymore. They are separate children
1912 // of obj (memory_first_child points to first memory child)
1913 if (obj
->memory_arity
) {
1915 for (memory
= obj
->memory_first_child
; memory
;
1916 memory
= hwloc_get_next_child(tp
, obj
, memory
)) {
1917 if (memory
->type
== HWLOC_OBJ_NUMANODE
)
1920 if (memory
&& memory
->type
== HWLOC_OBJ_NUMANODE
) {
1921 sub_id
= __kmp_hwloc_get_sub_id(tp
, memory
, prev
);
1923 hw_thread
.ids
[index
] = memory
->logical_index
;
1924 hw_thread
.ids
[index
+ 1] = sub_id
;
1932 type
= __kmp_hwloc_type_2_topology_type(obj
);
1933 if (type
!= KMP_HW_UNKNOWN
) {
1934 sub_id
= __kmp_hwloc_get_sub_id(tp
, obj
, prev
);
1936 hw_thread
.ids
[index
] = obj
->logical_index
;
1937 hw_thread
.ids
[index
+ 1] = sub_id
;
1947 #if HWLOC_API_VERSION >= 0x00020400
1948 // Free the core types information
1950 for (int idx
= 0; idx
< nr_cpu_kinds
; ++idx
)
1951 hwloc_bitmap_free(cpukinds
[idx
].mask
);
1952 __kmp_free(cpukinds
);
1955 __kmp_topology
->sort_ids();
1958 #endif // KMP_USE_HWLOC
1960 // If we don't know how to retrieve the machine's processor topology, or
1961 // encounter an error in doing so, this routine is called to form a "flat"
1962 // mapping of os thread id's <-> processor id's.
1963 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t
*const msg_id
) {
1964 *msg_id
= kmp_i18n_null
;
1966 kmp_hw_t types
[] = {KMP_HW_SOCKET
, KMP_HW_CORE
, KMP_HW_THREAD
};
1968 if (__kmp_affinity
.flags
.verbose
) {
1969 KMP_INFORM(UsingFlatOS
, "KMP_AFFINITY");
1972 // Even if __kmp_affinity.type == affinity_none, this routine might still
1973 // be called to set __kmp_ncores, as well as
1974 // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1975 if (!KMP_AFFINITY_CAPABLE()) {
1976 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
1977 __kmp_ncores
= nPackages
= __kmp_xproc
;
1978 __kmp_nThreadsPerCore
= nCoresPerPkg
= 1;
1982 // When affinity is off, this routine will still be called to set
1983 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1984 // Make sure all these vars are set correctly, and return now if affinity is
1986 __kmp_ncores
= nPackages
= __kmp_avail_proc
;
1987 __kmp_nThreadsPerCore
= nCoresPerPkg
= 1;
1989 // Construct the data structure to be returned.
1990 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
1993 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
1994 // Skip this proc if it is not included in the machine model.
1995 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
1998 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(avail_ct
);
2000 hw_thread
.os_id
= i
;
2001 hw_thread
.ids
[0] = i
;
2002 hw_thread
.ids
[1] = 0;
2003 hw_thread
.ids
[2] = 0;
2006 if (__kmp_affinity
.flags
.verbose
) {
2007 KMP_INFORM(OSProcToPackage
, "KMP_AFFINITY");
2012 #if KMP_GROUP_AFFINITY
2013 // If multiple Windows* OS processor groups exist, we can create a 2-level
2014 // topology map with the groups at level 0 and the individual procs at level 1.
2015 // This facilitates letting the threads float among all procs in a group,
2016 // if granularity=group (the default when there are multiple groups).
2017 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t
*const msg_id
) {
2018 *msg_id
= kmp_i18n_null
;
2020 kmp_hw_t types
[] = {KMP_HW_PROC_GROUP
, KMP_HW_CORE
, KMP_HW_THREAD
};
2021 const static size_t BITS_PER_GROUP
= CHAR_BIT
* sizeof(DWORD_PTR
);
2023 if (__kmp_affinity
.flags
.verbose
) {
2024 KMP_INFORM(AffWindowsProcGroupMap
, "KMP_AFFINITY");
2027 // If we aren't affinity capable, then use flat topology
2028 if (!KMP_AFFINITY_CAPABLE()) {
2029 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2030 nPackages
= __kmp_num_proc_groups
;
2031 __kmp_nThreadsPerCore
= 1;
2032 __kmp_ncores
= __kmp_xproc
;
2033 nCoresPerPkg
= nPackages
/ __kmp_ncores
;
2037 // Construct the data structure to be returned.
2038 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
2041 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
2042 // Skip this proc if it is not included in the machine model.
2043 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
2046 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(avail_ct
++);
2048 hw_thread
.os_id
= i
;
2049 hw_thread
.ids
[0] = i
/ BITS_PER_GROUP
;
2050 hw_thread
.ids
[1] = hw_thread
.ids
[2] = i
% BITS_PER_GROUP
;
2054 #endif /* KMP_GROUP_AFFINITY */
2056 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2058 template <kmp_uint32 LSB
, kmp_uint32 MSB
>
2059 static inline unsigned __kmp_extract_bits(kmp_uint32 v
) {
2060 const kmp_uint32 SHIFT_LEFT
= sizeof(kmp_uint32
) * 8 - 1 - MSB
;
2061 const kmp_uint32 SHIFT_RIGHT
= LSB
;
2062 kmp_uint32 retval
= v
;
2063 retval
<<= SHIFT_LEFT
;
2064 retval
>>= (SHIFT_LEFT
+ SHIFT_RIGHT
);
2068 static int __kmp_cpuid_mask_width(int count
) {
2071 while ((1 << r
) < count
)
2076 class apicThreadInfo
{
2078 unsigned osId
; // param to __kmp_affinity_bind_thread
2079 unsigned apicId
; // from cpuid after binding
2080 unsigned maxCoresPerPkg
; // ""
2081 unsigned maxThreadsPerPkg
; // ""
2082 unsigned pkgId
; // inferred from above values
2083 unsigned coreId
; // ""
2084 unsigned threadId
; // ""
2087 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a
,
2089 const apicThreadInfo
*aa
= (const apicThreadInfo
*)a
;
2090 const apicThreadInfo
*bb
= (const apicThreadInfo
*)b
;
2091 if (aa
->pkgId
< bb
->pkgId
)
2093 if (aa
->pkgId
> bb
->pkgId
)
2095 if (aa
->coreId
< bb
->coreId
)
2097 if (aa
->coreId
> bb
->coreId
)
2099 if (aa
->threadId
< bb
->threadId
)
2101 if (aa
->threadId
> bb
->threadId
)
2106 class kmp_cache_info_t
{
2109 unsigned level
, mask
;
2111 kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
2112 size_t get_depth() const { return depth
; }
2113 info_t
&operator[](size_t index
) { return table
[index
]; }
2114 const info_t
&operator[](size_t index
) const { return table
[index
]; }
2116 static kmp_hw_t
get_topology_type(unsigned level
) {
2117 KMP_DEBUG_ASSERT(level
>= 1 && level
<= MAX_CACHE_LEVEL
);
2126 return KMP_HW_UNKNOWN
;
2130 static const int MAX_CACHE_LEVEL
= 3;
2133 info_t table
[MAX_CACHE_LEVEL
];
2135 void get_leaf4_levels() {
2137 while (depth
< MAX_CACHE_LEVEL
) {
2138 unsigned cache_type
, max_threads_sharing
;
2139 unsigned cache_level
, cache_mask_width
;
2141 __kmp_x86_cpuid(4, level
, &buf2
);
2142 cache_type
= __kmp_extract_bits
<0, 4>(buf2
.eax
);
2145 // Skip instruction caches
2146 if (cache_type
== 2) {
2150 max_threads_sharing
= __kmp_extract_bits
<14, 25>(buf2
.eax
) + 1;
2151 cache_mask_width
= __kmp_cpuid_mask_width(max_threads_sharing
);
2152 cache_level
= __kmp_extract_bits
<5, 7>(buf2
.eax
);
2153 table
[depth
].level
= cache_level
;
2154 table
[depth
].mask
= ((-1) << cache_mask_width
);
2161 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
2162 // an algorithm which cycles through the available os threads, setting
2163 // the current thread's affinity mask to that thread, and then retrieves
2164 // the Apic Id for each thread context using the cpuid instruction.
2165 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t
*const msg_id
) {
2167 *msg_id
= kmp_i18n_null
;
2169 if (__kmp_affinity
.flags
.verbose
) {
2170 KMP_INFORM(AffInfoStr
, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC
));
2173 // Check if cpuid leaf 4 is supported.
2174 __kmp_x86_cpuid(0, 0, &buf
);
2176 *msg_id
= kmp_i18n_str_NoLeaf4Support
;
2180 // The algorithm used starts by setting the affinity to each available thread
2181 // and retrieving info from the cpuid instruction, so if we are not capable of
2182 // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
2183 // need to do something else - use the defaults that we calculated from
2184 // issuing cpuid without binding to each proc.
2185 if (!KMP_AFFINITY_CAPABLE()) {
2186 // Hack to try and infer the machine topology using only the data
2187 // available from cpuid on the current thread, and __kmp_xproc.
2188 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2190 // Get an upper bound on the number of threads per package using cpuid(1).
2191 // On some OS/chps combinations where HT is supported by the chip but is
2192 // disabled, this value will be 2 on a single core chip. Usually, it will be
2193 // 2 if HT is enabled and 1 if HT is disabled.
2194 __kmp_x86_cpuid(1, 0, &buf
);
2195 int maxThreadsPerPkg
= (buf
.ebx
>> 16) & 0xff;
2196 if (maxThreadsPerPkg
== 0) {
2197 maxThreadsPerPkg
= 1;
2200 // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
2203 // The author of cpu_count.cpp treated this only an upper bound on the
2204 // number of cores, but I haven't seen any cases where it was greater than
2205 // the actual number of cores, so we will treat it as exact in this block of
2208 // First, we need to check if cpuid(4) is supported on this chip. To see if
2209 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
2211 __kmp_x86_cpuid(0, 0, &buf
);
2213 __kmp_x86_cpuid(4, 0, &buf
);
2214 nCoresPerPkg
= ((buf
.eax
>> 26) & 0x3f) + 1;
2219 // There is no way to reliably tell if HT is enabled without issuing the
2220 // cpuid instruction from every thread, can correlating the cpuid info, so
2221 // if the machine is not affinity capable, we assume that HT is off. We have
2222 // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
2223 // does not support HT.
2225 // - Older OSes are usually found on machines with older chips, which do not
2227 // - The performance penalty for mistakenly identifying a machine as HT when
2228 // it isn't (which results in blocktime being incorrectly set to 0) is
2229 // greater than the penalty when for mistakenly identifying a machine as
2230 // being 1 thread/core when it is really HT enabled (which results in
2231 // blocktime being incorrectly set to a positive value).
2232 __kmp_ncores
= __kmp_xproc
;
2233 nPackages
= (__kmp_xproc
+ nCoresPerPkg
- 1) / nCoresPerPkg
;
2234 __kmp_nThreadsPerCore
= 1;
2238 // From here on, we can assume that it is safe to call
2239 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2240 // __kmp_affinity.type = affinity_none.
2242 // Save the affinity mask for the current thread.
2243 kmp_affinity_raii_t previous_affinity
;
2245 // Run through each of the available contexts, binding the current thread
2246 // to it, and obtaining the pertinent information using the cpuid instr.
2248 // The relevant information is:
2249 // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
2250 // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
2251 // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
2252 // of this field determines the width of the core# + thread# fields in the
2253 // Apic Id. It is also an upper bound on the number of threads per
2254 // package, but it has been verified that situations happen were it is not
2255 // exact. In particular, on certain OS/chip combinations where Intel(R)
2256 // Hyper-Threading Technology is supported by the chip but has been
2257 // disabled, the value of this field will be 2 (for a single core chip).
2258 // On other OS/chip combinations supporting Intel(R) Hyper-Threading
2259 // Technology, the value of this field will be 1 when Intel(R)
2260 // Hyper-Threading Technology is disabled and 2 when it is enabled.
2261 // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
2262 // of this field (+1) determines the width of the core# field in the Apic
2263 // Id. The comments in "cpucount.cpp" say that this value is an upper
2264 // bound, but the IA-32 architecture manual says that it is exactly the
2265 // number of cores per package, and I haven't seen any case where it
2268 // From this information, deduce the package Id, core Id, and thread Id,
2269 // and set the corresponding fields in the apicThreadInfo struct.
2271 apicThreadInfo
*threadInfo
= (apicThreadInfo
*)__kmp_allocate(
2272 __kmp_avail_proc
* sizeof(apicThreadInfo
));
2273 unsigned nApics
= 0;
2274 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
2275 // Skip this proc if it is not included in the machine model.
2276 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
2279 KMP_DEBUG_ASSERT((int)nApics
< __kmp_avail_proc
);
2281 __kmp_affinity_dispatch
->bind_thread(i
);
2282 threadInfo
[nApics
].osId
= i
;
2284 // The apic id and max threads per pkg come from cpuid(1).
2285 __kmp_x86_cpuid(1, 0, &buf
);
2286 if (((buf
.edx
>> 9) & 1) == 0) {
2287 __kmp_free(threadInfo
);
2288 *msg_id
= kmp_i18n_str_ApicNotPresent
;
2291 threadInfo
[nApics
].apicId
= (buf
.ebx
>> 24) & 0xff;
2292 threadInfo
[nApics
].maxThreadsPerPkg
= (buf
.ebx
>> 16) & 0xff;
2293 if (threadInfo
[nApics
].maxThreadsPerPkg
== 0) {
2294 threadInfo
[nApics
].maxThreadsPerPkg
= 1;
2297 // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
2300 // First, we need to check if cpuid(4) is supported on this chip. To see if
2301 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
2303 __kmp_x86_cpuid(0, 0, &buf
);
2305 __kmp_x86_cpuid(4, 0, &buf
);
2306 threadInfo
[nApics
].maxCoresPerPkg
= ((buf
.eax
>> 26) & 0x3f) + 1;
2308 threadInfo
[nApics
].maxCoresPerPkg
= 1;
2311 // Infer the pkgId / coreId / threadId using only the info obtained locally.
2312 int widthCT
= __kmp_cpuid_mask_width(threadInfo
[nApics
].maxThreadsPerPkg
);
2313 threadInfo
[nApics
].pkgId
= threadInfo
[nApics
].apicId
>> widthCT
;
2315 int widthC
= __kmp_cpuid_mask_width(threadInfo
[nApics
].maxCoresPerPkg
);
2316 int widthT
= widthCT
- widthC
;
2318 // I've never seen this one happen, but I suppose it could, if the cpuid
2319 // instruction on a chip was really screwed up. Make sure to restore the
2320 // affinity mask before the tail call.
2321 __kmp_free(threadInfo
);
2322 *msg_id
= kmp_i18n_str_InvalidCpuidInfo
;
2326 int maskC
= (1 << widthC
) - 1;
2327 threadInfo
[nApics
].coreId
= (threadInfo
[nApics
].apicId
>> widthT
) & maskC
;
2329 int maskT
= (1 << widthT
) - 1;
2330 threadInfo
[nApics
].threadId
= threadInfo
[nApics
].apicId
& maskT
;
2335 // We've collected all the info we need.
2336 // Restore the old affinity mask for this thread.
2337 previous_affinity
.restore();
2339 // Sort the threadInfo table by physical Id.
2340 qsort(threadInfo
, nApics
, sizeof(*threadInfo
),
2341 __kmp_affinity_cmp_apicThreadInfo_phys_id
);
2343 // The table is now sorted by pkgId / coreId / threadId, but we really don't
2344 // know the radix of any of the fields. pkgId's may be sparsely assigned among
2345 // the chips on a system. Although coreId's are usually assigned
2346 // [0 .. coresPerPkg-1] and threadId's are usually assigned
2347 // [0..threadsPerCore-1], we don't want to make any such assumptions.
2349 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2350 // total # packages) are at this point - we want to determine that now. We
2351 // only have an upper bound on the first two figures.
2353 // We also perform a consistency check at this point: the values returned by
2354 // the cpuid instruction for any thread bound to a given package had better
2355 // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
2358 __kmp_nThreadsPerCore
= 1;
2359 unsigned nCores
= 1;
2361 unsigned pkgCt
= 1; // to determine radii
2362 unsigned lastPkgId
= threadInfo
[0].pkgId
;
2363 unsigned coreCt
= 1;
2364 unsigned lastCoreId
= threadInfo
[0].coreId
;
2365 unsigned threadCt
= 1;
2366 unsigned lastThreadId
= threadInfo
[0].threadId
;
2368 // intra-pkg consist checks
2369 unsigned prevMaxCoresPerPkg
= threadInfo
[0].maxCoresPerPkg
;
2370 unsigned prevMaxThreadsPerPkg
= threadInfo
[0].maxThreadsPerPkg
;
2372 for (i
= 1; i
< nApics
; i
++) {
2373 if (threadInfo
[i
].pkgId
!= lastPkgId
) {
2376 lastPkgId
= threadInfo
[i
].pkgId
;
2377 if ((int)coreCt
> nCoresPerPkg
)
2378 nCoresPerPkg
= coreCt
;
2380 lastCoreId
= threadInfo
[i
].coreId
;
2381 if ((int)threadCt
> __kmp_nThreadsPerCore
)
2382 __kmp_nThreadsPerCore
= threadCt
;
2384 lastThreadId
= threadInfo
[i
].threadId
;
2386 // This is a different package, so go on to the next iteration without
2387 // doing any consistency checks. Reset the consistency check vars, though.
2388 prevMaxCoresPerPkg
= threadInfo
[i
].maxCoresPerPkg
;
2389 prevMaxThreadsPerPkg
= threadInfo
[i
].maxThreadsPerPkg
;
2393 if (threadInfo
[i
].coreId
!= lastCoreId
) {
2396 lastCoreId
= threadInfo
[i
].coreId
;
2397 if ((int)threadCt
> __kmp_nThreadsPerCore
)
2398 __kmp_nThreadsPerCore
= threadCt
;
2400 lastThreadId
= threadInfo
[i
].threadId
;
2401 } else if (threadInfo
[i
].threadId
!= lastThreadId
) {
2403 lastThreadId
= threadInfo
[i
].threadId
;
2405 __kmp_free(threadInfo
);
2406 *msg_id
= kmp_i18n_str_LegacyApicIDsNotUnique
;
2410 // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
2411 // fields agree between all the threads bounds to a given package.
2412 if ((prevMaxCoresPerPkg
!= threadInfo
[i
].maxCoresPerPkg
) ||
2413 (prevMaxThreadsPerPkg
!= threadInfo
[i
].maxThreadsPerPkg
)) {
2414 __kmp_free(threadInfo
);
2415 *msg_id
= kmp_i18n_str_InconsistentCpuidInfo
;
2419 // When affinity is off, this routine will still be called to set
2420 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2421 // Make sure all these vars are set correctly
2423 if ((int)coreCt
> nCoresPerPkg
)
2424 nCoresPerPkg
= coreCt
;
2425 if ((int)threadCt
> __kmp_nThreadsPerCore
)
2426 __kmp_nThreadsPerCore
= threadCt
;
2427 __kmp_ncores
= nCores
;
2428 KMP_DEBUG_ASSERT(nApics
== (unsigned)__kmp_avail_proc
);
2430 // Now that we've determined the number of packages, the number of cores per
2431 // package, and the number of threads per core, we can construct the data
2432 // structure that is to be returned.
2436 int threadLevel
= 2;
2437 //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
2438 int depth
= (pkgLevel
>= 0) + (coreLevel
>= 0) + (threadLevel
>= 0);
2441 types
[idx
++] = KMP_HW_SOCKET
;
2443 types
[idx
++] = KMP_HW_CORE
;
2444 if (threadLevel
>= 0)
2445 types
[idx
++] = KMP_HW_THREAD
;
2447 KMP_ASSERT(depth
> 0);
2448 __kmp_topology
= kmp_topology_t::allocate(nApics
, depth
, types
);
2450 for (i
= 0; i
< nApics
; ++i
) {
2452 unsigned os
= threadInfo
[i
].osId
;
2453 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
2456 if (pkgLevel
>= 0) {
2457 hw_thread
.ids
[idx
++] = threadInfo
[i
].pkgId
;
2459 if (coreLevel
>= 0) {
2460 hw_thread
.ids
[idx
++] = threadInfo
[i
].coreId
;
2462 if (threadLevel
>= 0) {
2463 hw_thread
.ids
[idx
++] = threadInfo
[i
].threadId
;
2465 hw_thread
.os_id
= os
;
2468 __kmp_free(threadInfo
);
2469 __kmp_topology
->sort_ids();
2470 if (!__kmp_topology
->check_ids()) {
2471 kmp_topology_t::deallocate(__kmp_topology
);
2472 __kmp_topology
= nullptr;
2473 *msg_id
= kmp_i18n_str_LegacyApicIDsNotUnique
;
2479 // Hybrid cpu detection using CPUID.1A
2480 // Thread should be pinned to processor already
2481 static void __kmp_get_hybrid_info(kmp_hw_core_type_t
*type
, int *efficiency
,
2482 unsigned *native_model_id
) {
2484 __kmp_x86_cpuid(0x1a, 0, &buf
);
2485 *type
= (kmp_hw_core_type_t
)__kmp_extract_bits
<24, 31>(buf
.eax
);
2487 case KMP_HW_CORE_TYPE_ATOM
:
2490 case KMP_HW_CORE_TYPE_CORE
:
2496 *native_model_id
= __kmp_extract_bits
<0, 23>(buf
.eax
);
2499 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
2500 // architectures support a newer interface for specifying the x2APIC Ids,
2501 // based on CPUID.B or CPUID.1F
2503 * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
2506 ---+-----------+--------------+-------------+-----------------+
2507 EAX| reserved | reserved | reserved | Bits to Shift |
2508 ---+-----------|--------------+-------------+-----------------|
2509 EBX| reserved | Num logical processors at level (16 bits) |
2510 ---+-----------|--------------+-------------------------------|
2511 ECX| reserved | Level Type | Level Number (8 bits) |
2512 ---+-----------+--------------+-------------------------------|
2513 EDX| X2APIC ID (32 bits) |
2514 ---+----------------------------------------------------------+
2518 INTEL_LEVEL_TYPE_INVALID
= 0, // Package level
2519 INTEL_LEVEL_TYPE_SMT
= 1,
2520 INTEL_LEVEL_TYPE_CORE
= 2,
2521 INTEL_LEVEL_TYPE_MODULE
= 3,
2522 INTEL_LEVEL_TYPE_TILE
= 4,
2523 INTEL_LEVEL_TYPE_DIE
= 5,
2524 INTEL_LEVEL_TYPE_LAST
= 6,
2527 struct cpuid_level_info_t
{
2528 unsigned level_type
, mask
, mask_width
, nitems
, cache_mask
;
2531 static kmp_hw_t
__kmp_intel_type_2_topology_type(int intel_type
) {
2532 switch (intel_type
) {
2533 case INTEL_LEVEL_TYPE_INVALID
:
2534 return KMP_HW_SOCKET
;
2535 case INTEL_LEVEL_TYPE_SMT
:
2536 return KMP_HW_THREAD
;
2537 case INTEL_LEVEL_TYPE_CORE
:
2539 case INTEL_LEVEL_TYPE_TILE
:
2541 case INTEL_LEVEL_TYPE_MODULE
:
2542 return KMP_HW_MODULE
;
2543 case INTEL_LEVEL_TYPE_DIE
:
2546 return KMP_HW_UNKNOWN
;
2549 // This function takes the topology leaf, a levels array to store the levels
2550 // detected and a bitmap of the known levels.
2551 // Returns the number of levels in the topology
2553 __kmp_x2apicid_get_levels(int leaf
,
2554 cpuid_level_info_t levels
[INTEL_LEVEL_TYPE_LAST
],
2555 kmp_uint64 known_levels
) {
2556 unsigned level
, levels_index
;
2557 unsigned level_type
, mask_width
, nitems
;
2560 // New algorithm has known topology layers act as highest unknown topology
2561 // layers when unknown topology layers exist.
2562 // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z>
2563 // are unknown topology layers, Then SMT will take the characteristics of
2564 // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>).
2565 // This eliminates unknown portions of the topology while still keeping the
2566 // correct structure.
2567 level
= levels_index
= 0;
2569 __kmp_x86_cpuid(leaf
, level
, &buf
);
2570 level_type
= __kmp_extract_bits
<8, 15>(buf
.ecx
);
2571 mask_width
= __kmp_extract_bits
<0, 4>(buf
.eax
);
2572 nitems
= __kmp_extract_bits
<0, 15>(buf
.ebx
);
2573 if (level_type
!= INTEL_LEVEL_TYPE_INVALID
&& nitems
== 0)
2576 if (known_levels
& (1ull << level_type
)) {
2577 // Add a new level to the topology
2578 KMP_ASSERT(levels_index
< INTEL_LEVEL_TYPE_LAST
);
2579 levels
[levels_index
].level_type
= level_type
;
2580 levels
[levels_index
].mask_width
= mask_width
;
2581 levels
[levels_index
].nitems
= nitems
;
2584 // If it is an unknown level, then logically move the previous layer up
2585 if (levels_index
> 0) {
2586 levels
[levels_index
- 1].mask_width
= mask_width
;
2587 levels
[levels_index
- 1].nitems
= nitems
;
2591 } while (level_type
!= INTEL_LEVEL_TYPE_INVALID
);
2593 // Ensure the INTEL_LEVEL_TYPE_INVALID (Socket) layer isn't first
2594 if (levels_index
== 0 || levels
[0].level_type
== INTEL_LEVEL_TYPE_INVALID
)
2597 // Set the masks to & with apicid
2598 for (unsigned i
= 0; i
< levels_index
; ++i
) {
2599 if (levels
[i
].level_type
!= INTEL_LEVEL_TYPE_INVALID
) {
2600 levels
[i
].mask
= ~((-1) << levels
[i
].mask_width
);
2601 levels
[i
].cache_mask
= (-1) << levels
[i
].mask_width
;
2602 for (unsigned j
= 0; j
< i
; ++j
)
2603 levels
[i
].mask
^= levels
[j
].mask
;
2605 KMP_DEBUG_ASSERT(i
> 0);
2606 levels
[i
].mask
= (-1) << levels
[i
- 1].mask_width
;
2607 levels
[i
].cache_mask
= 0;
2610 return levels_index
;
2613 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t
*const msg_id
) {
2615 cpuid_level_info_t levels
[INTEL_LEVEL_TYPE_LAST
];
2616 kmp_hw_t types
[INTEL_LEVEL_TYPE_LAST
];
2617 unsigned levels_index
;
2619 kmp_uint64 known_levels
;
2620 int topology_leaf
, highest_leaf
, apic_id
;
2622 static int leaves
[] = {0, 0};
2624 kmp_i18n_id_t leaf_message_id
;
2626 KMP_BUILD_ASSERT(sizeof(known_levels
) * CHAR_BIT
> KMP_HW_LAST
);
2628 *msg_id
= kmp_i18n_null
;
2629 if (__kmp_affinity
.flags
.verbose
) {
2630 KMP_INFORM(AffInfoStr
, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC
));
2633 // Figure out the known topology levels
2634 known_levels
= 0ull;
2635 for (int i
= 0; i
< INTEL_LEVEL_TYPE_LAST
; ++i
) {
2636 if (__kmp_intel_type_2_topology_type(i
) != KMP_HW_UNKNOWN
) {
2637 known_levels
|= (1ull << i
);
2641 // Get the highest cpuid leaf supported
2642 __kmp_x86_cpuid(0, 0, &buf
);
2643 highest_leaf
= buf
.eax
;
2645 // If a specific topology method was requested, only allow that specific leaf
2646 // otherwise, try both leaves 31 and 11 in that order
2648 if (__kmp_affinity_top_method
== affinity_top_method_x2apicid
) {
2651 leaf_message_id
= kmp_i18n_str_NoLeaf11Support
;
2652 } else if (__kmp_affinity_top_method
== affinity_top_method_x2apicid_1f
) {
2655 leaf_message_id
= kmp_i18n_str_NoLeaf31Support
;
2660 leaf_message_id
= kmp_i18n_str_NoLeaf11Support
;
2663 // Check to see if cpuid leaf 31 or 11 is supported.
2664 __kmp_nThreadsPerCore
= nCoresPerPkg
= nPackages
= 1;
2666 for (int i
= 0; i
< num_leaves
; ++i
) {
2667 int leaf
= leaves
[i
];
2668 if (highest_leaf
< leaf
)
2670 __kmp_x86_cpuid(leaf
, 0, &buf
);
2673 topology_leaf
= leaf
;
2674 levels_index
= __kmp_x2apicid_get_levels(leaf
, levels
, known_levels
);
2675 if (levels_index
== 0)
2679 if (topology_leaf
== -1 || levels_index
== 0) {
2680 *msg_id
= leaf_message_id
;
2683 KMP_ASSERT(levels_index
<= INTEL_LEVEL_TYPE_LAST
);
2685 // The algorithm used starts by setting the affinity to each available thread
2686 // and retrieving info from the cpuid instruction, so if we are not capable of
2687 // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
2688 // we need to do something else - use the defaults that we calculated from
2689 // issuing cpuid without binding to each proc.
2690 if (!KMP_AFFINITY_CAPABLE()) {
2691 // Hack to try and infer the machine topology using only the data
2692 // available from cpuid on the current thread, and __kmp_xproc.
2693 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2694 for (unsigned i
= 0; i
< levels_index
; ++i
) {
2695 if (levels
[i
].level_type
== INTEL_LEVEL_TYPE_SMT
) {
2696 __kmp_nThreadsPerCore
= levels
[i
].nitems
;
2697 } else if (levels
[i
].level_type
== INTEL_LEVEL_TYPE_CORE
) {
2698 nCoresPerPkg
= levels
[i
].nitems
;
2701 __kmp_ncores
= __kmp_xproc
/ __kmp_nThreadsPerCore
;
2702 nPackages
= (__kmp_xproc
+ nCoresPerPkg
- 1) / nCoresPerPkg
;
2706 // Allocate the data structure to be returned.
2707 int depth
= levels_index
;
2708 for (int i
= depth
- 1, j
= 0; i
>= 0; --i
, ++j
)
2709 types
[j
] = __kmp_intel_type_2_topology_type(levels
[i
].level_type
);
2711 kmp_topology_t::allocate(__kmp_avail_proc
, levels_index
, types
);
2713 // Insert equivalent cache types if they exist
2714 kmp_cache_info_t cache_info
;
2715 for (size_t i
= 0; i
< cache_info
.get_depth(); ++i
) {
2716 const kmp_cache_info_t::info_t
&info
= cache_info
[i
];
2717 unsigned cache_mask
= info
.mask
;
2718 unsigned cache_level
= info
.level
;
2719 for (unsigned j
= 0; j
< levels_index
; ++j
) {
2720 unsigned hw_cache_mask
= levels
[j
].cache_mask
;
2721 kmp_hw_t cache_type
= kmp_cache_info_t::get_topology_type(cache_level
);
2722 if (hw_cache_mask
== cache_mask
&& j
< levels_index
- 1) {
2724 __kmp_intel_type_2_topology_type(levels
[j
+ 1].level_type
);
2725 __kmp_topology
->set_equivalent_type(cache_type
, type
);
2730 // From here on, we can assume that it is safe to call
2731 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2732 // __kmp_affinity.type = affinity_none.
2734 // Save the affinity mask for the current thread.
2735 kmp_affinity_raii_t previous_affinity
;
2737 // Run through each of the available contexts, binding the current thread
2738 // to it, and obtaining the pertinent information using the cpuid instr.
2740 int hw_thread_index
= 0;
2741 KMP_CPU_SET_ITERATE(proc
, __kmp_affin_fullMask
) {
2742 cpuid_level_info_t my_levels
[INTEL_LEVEL_TYPE_LAST
];
2743 unsigned my_levels_index
;
2745 // Skip this proc if it is not included in the machine model.
2746 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
2749 KMP_DEBUG_ASSERT(hw_thread_index
< __kmp_avail_proc
);
2751 __kmp_affinity_dispatch
->bind_thread(proc
);
2754 __kmp_x86_cpuid(topology_leaf
, 0, &buf
);
2756 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(hw_thread_index
);
2758 __kmp_x2apicid_get_levels(topology_leaf
, my_levels
, known_levels
);
2759 if (my_levels_index
== 0 || my_levels_index
!= levels_index
) {
2760 *msg_id
= kmp_i18n_str_InvalidCpuidInfo
;
2764 hw_thread
.os_id
= proc
;
2765 // Put in topology information
2766 for (unsigned j
= 0, idx
= depth
- 1; j
< my_levels_index
; ++j
, --idx
) {
2767 hw_thread
.ids
[idx
] = apic_id
& my_levels
[j
].mask
;
2769 hw_thread
.ids
[idx
] >>= my_levels
[j
- 1].mask_width
;
2772 // Hybrid information
2773 if (__kmp_is_hybrid_cpu() && highest_leaf
>= 0x1a) {
2774 kmp_hw_core_type_t type
;
2775 unsigned native_model_id
;
2777 __kmp_get_hybrid_info(&type
, &efficiency
, &native_model_id
);
2778 hw_thread
.attrs
.set_core_type(type
);
2779 hw_thread
.attrs
.set_core_eff(efficiency
);
2783 KMP_ASSERT(hw_thread_index
> 0);
2784 __kmp_topology
->sort_ids();
2785 if (!__kmp_topology
->check_ids()) {
2786 kmp_topology_t::deallocate(__kmp_topology
);
2787 __kmp_topology
= nullptr;
2788 *msg_id
= kmp_i18n_str_x2ApicIDsNotUnique
;
2793 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2796 #define threadIdIndex 1
2797 #define coreIdIndex 2
2798 #define pkgIdIndex 3
2799 #define nodeIdIndex 4
2801 typedef unsigned *ProcCpuInfo
;
2802 static unsigned maxIndex
= pkgIdIndex
;
2804 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a
,
2807 const unsigned *aa
= *(unsigned *const *)a
;
2808 const unsigned *bb
= *(unsigned *const *)b
;
2809 for (i
= maxIndex
;; i
--) {
2820 #if KMP_USE_HIER_SCHED
2821 // Set the array sizes for the hierarchy layers
2822 static void __kmp_dispatch_set_hierarchy_values() {
2823 // Set the maximum number of L1's to number of cores
2824 // Set the maximum number of L2's to either number of cores / 2 for
2825 // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
2826 // Or the number of cores for Intel(R) Xeon(R) processors
2827 // Set the maximum number of NUMA nodes and L3's to number of packages
2828 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_THREAD
+ 1] =
2829 nPackages
* nCoresPerPkg
* __kmp_nThreadsPerCore
;
2830 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L1
+ 1] = __kmp_ncores
;
2831 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2833 if (__kmp_mic_type
>= mic3
)
2834 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L2
+ 1] = __kmp_ncores
/ 2;
2836 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2837 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L2
+ 1] = __kmp_ncores
;
2838 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L3
+ 1] = nPackages
;
2839 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_NUMA
+ 1] = nPackages
;
2840 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_LOOP
+ 1] = 1;
2841 // Set the number of threads per unit
2842 // Number of hardware threads per L1/L2/L3/NUMA/LOOP
2843 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_THREAD
+ 1] = 1;
2844 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L1
+ 1] =
2845 __kmp_nThreadsPerCore
;
2846 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2848 if (__kmp_mic_type
>= mic3
)
2849 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L2
+ 1] =
2850 2 * __kmp_nThreadsPerCore
;
2852 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2853 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L2
+ 1] =
2854 __kmp_nThreadsPerCore
;
2855 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L3
+ 1] =
2856 nCoresPerPkg
* __kmp_nThreadsPerCore
;
2857 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_NUMA
+ 1] =
2858 nCoresPerPkg
* __kmp_nThreadsPerCore
;
2859 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_LOOP
+ 1] =
2860 nPackages
* nCoresPerPkg
* __kmp_nThreadsPerCore
;
2863 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2864 // i.e., this thread's L1 or this thread's L2, etc.
2865 int __kmp_dispatch_get_index(int tid
, kmp_hier_layer_e type
) {
2866 int index
= type
+ 1;
2867 int num_hw_threads
= __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_THREAD
+ 1];
2868 KMP_DEBUG_ASSERT(type
!= kmp_hier_layer_e::LAYER_LAST
);
2869 if (type
== kmp_hier_layer_e::LAYER_THREAD
)
2871 else if (type
== kmp_hier_layer_e::LAYER_LOOP
)
2873 KMP_DEBUG_ASSERT(__kmp_hier_max_units
[index
] != 0);
2874 if (tid
>= num_hw_threads
)
2875 tid
= tid
% num_hw_threads
;
2876 return (tid
/ __kmp_hier_threads_per
[index
]) % __kmp_hier_max_units
[index
];
2879 // Return the number of t1's per t2
2880 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1
, kmp_hier_layer_e t2
) {
2883 KMP_DEBUG_ASSERT(i1
<= i2
);
2884 KMP_DEBUG_ASSERT(t1
!= kmp_hier_layer_e::LAYER_LAST
);
2885 KMP_DEBUG_ASSERT(t2
!= kmp_hier_layer_e::LAYER_LAST
);
2886 KMP_DEBUG_ASSERT(__kmp_hier_threads_per
[i1
] != 0);
2887 // (nthreads/t2) / (nthreads/t1) = t1 / t2
2888 return __kmp_hier_threads_per
[i2
] / __kmp_hier_threads_per
[i1
];
2890 #endif // KMP_USE_HIER_SCHED
2892 static inline const char *__kmp_cpuinfo_get_filename() {
2893 const char *filename
;
2894 if (__kmp_cpuinfo_file
!= nullptr)
2895 filename
= __kmp_cpuinfo_file
;
2897 filename
= "/proc/cpuinfo";
2901 static inline const char *__kmp_cpuinfo_get_envvar() {
2902 const char *envvar
= nullptr;
2903 if (__kmp_cpuinfo_file
!= nullptr)
2904 envvar
= "KMP_CPUINFO_FILE";
2908 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2910 static bool __kmp_affinity_create_cpuinfo_map(int *line
,
2911 kmp_i18n_id_t
*const msg_id
) {
2912 const char *filename
= __kmp_cpuinfo_get_filename();
2913 const char *envvar
= __kmp_cpuinfo_get_envvar();
2914 *msg_id
= kmp_i18n_null
;
2916 if (__kmp_affinity
.flags
.verbose
) {
2917 KMP_INFORM(AffParseFilename
, "KMP_AFFINITY", filename
);
2920 kmp_safe_raii_file_t
f(filename
, "r", envvar
);
2922 // Scan of the file, and count the number of "processor" (osId) fields,
2923 // and find the highest value of <n> for a node_<n> field.
2925 unsigned num_records
= 0;
2927 buf
[sizeof(buf
) - 1] = 1;
2928 if (!fgets(buf
, sizeof(buf
), f
)) {
2929 // Read errors presumably because of EOF
2933 char s1
[] = "processor";
2934 if (strncmp(buf
, s1
, sizeof(s1
) - 1) == 0) {
2939 // FIXME - this will match "node_<n> <garbage>"
2941 if (KMP_SSCANF(buf
, "node_%u id", &level
) == 1) {
2942 // validate the input fisrt:
2943 if (level
> (unsigned)__kmp_xproc
) { // level is too big
2944 level
= __kmp_xproc
;
2946 if (nodeIdIndex
+ level
>= maxIndex
) {
2947 maxIndex
= nodeIdIndex
+ level
;
2953 // Check for empty file / no valid processor records, or too many. The number
2954 // of records can't exceed the number of valid bits in the affinity mask.
2955 if (num_records
== 0) {
2956 *msg_id
= kmp_i18n_str_NoProcRecords
;
2959 if (num_records
> (unsigned)__kmp_xproc
) {
2960 *msg_id
= kmp_i18n_str_TooManyProcRecords
;
2964 // Set the file pointer back to the beginning, so that we can scan the file
2965 // again, this time performing a full parse of the data. Allocate a vector of
2966 // ProcCpuInfo object, where we will place the data. Adding an extra element
2967 // at the end allows us to remove a lot of extra checks for termination
2969 if (fseek(f
, 0, SEEK_SET
) != 0) {
2970 *msg_id
= kmp_i18n_str_CantRewindCpuinfo
;
2974 // Allocate the array of records to store the proc info in. The dummy
2975 // element at the end makes the logic in filling them out easier to code.
2976 unsigned **threadInfo
=
2977 (unsigned **)__kmp_allocate((num_records
+ 1) * sizeof(unsigned *));
2979 for (i
= 0; i
<= num_records
; i
++) {
2981 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
2984 #define CLEANUP_THREAD_INFO \
2985 for (i = 0; i <= num_records; i++) { \
2986 __kmp_free(threadInfo[i]); \
2988 __kmp_free(threadInfo);
2990 // A value of UINT_MAX means that we didn't find the field
2993 #define INIT_PROC_INFO(p) \
2994 for (__index = 0; __index <= maxIndex; __index++) { \
2995 (p)[__index] = UINT_MAX; \
2998 for (i
= 0; i
<= num_records
; i
++) {
2999 INIT_PROC_INFO(threadInfo
[i
]);
3002 unsigned num_avail
= 0;
3005 bool reading_s390x_sys_info
= true;
3008 // Create an inner scoping level, so that all the goto targets at the end of
3009 // the loop appear in an outer scoping level. This avoids warnings about
3010 // jumping past an initialization to a target in the same block.
3012 buf
[sizeof(buf
) - 1] = 1;
3013 bool long_line
= false;
3014 if (!fgets(buf
, sizeof(buf
), f
)) {
3015 // Read errors presumably because of EOF
3016 // If there is valid data in threadInfo[num_avail], then fake
3017 // a blank line in ensure that the last address gets parsed.
3019 for (i
= 0; i
<= maxIndex
; i
++) {
3020 if (threadInfo
[num_avail
][i
] != UINT_MAX
) {
3028 } else if (!buf
[sizeof(buf
) - 1]) {
3029 // The line is longer than the buffer. Set a flag and don't
3030 // emit an error if we were going to ignore the line, anyway.
3033 #define CHECK_LINE \
3035 CLEANUP_THREAD_INFO; \
3036 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
3042 #if KMP_ARCH_LOONGARCH64
3043 // The parsing logic of /proc/cpuinfo in this function highly depends on
3044 // the blank lines between each processor info block. But on LoongArch a
3045 // blank line exists before the first processor info block (i.e. after the
3046 // "system type" line). This blank line was added because the "system
3047 // type" line is unrelated to any of the CPUs. We must skip this line so
3048 // that the original logic works on LoongArch.
3049 if (*buf
== '\n' && *line
== 2)
3053 // s390x /proc/cpuinfo starts with a variable number of lines containing
3054 // the overall system information. Skip them.
3055 if (reading_s390x_sys_info
) {
3057 reading_s390x_sys_info
= false;
3063 char s1
[] = "cpu number";
3065 char s1
[] = "processor";
3067 if (strncmp(buf
, s1
, sizeof(s1
) - 1) == 0) {
3069 char *p
= strchr(buf
+ sizeof(s1
) - 1, ':');
3071 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3073 if (threadInfo
[num_avail
][osIdIndex
] != UINT_MAX
)
3074 #if KMP_ARCH_AARCH64
3075 // Handle the old AArch64 /proc/cpuinfo layout differently,
3076 // it contains all of the 'processor' entries listed in a
3077 // single 'Processor' section, therefore the normal looking
3078 // for duplicates in that section will always fail.
3083 threadInfo
[num_avail
][osIdIndex
] = val
;
3084 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
3088 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
3089 threadInfo
[num_avail
][osIdIndex
]);
3090 __kmp_read_from_file(path
, "%u", &threadInfo
[num_avail
][pkgIdIndex
]);
3093 // Disambiguate physical_package_id.
3095 KMP_SNPRINTF(path
, sizeof(path
),
3096 "/sys/devices/system/cpu/cpu%u/topology/book_id",
3097 threadInfo
[num_avail
][osIdIndex
]);
3098 __kmp_read_from_file(path
, "%u", &book_id
);
3099 threadInfo
[num_avail
][pkgIdIndex
] |= (book_id
<< 8);
3102 KMP_SNPRINTF(path
, sizeof(path
),
3103 "/sys/devices/system/cpu/cpu%u/topology/drawer_id",
3104 threadInfo
[num_avail
][osIdIndex
]);
3105 __kmp_read_from_file(path
, "%u", &drawer_id
);
3106 threadInfo
[num_avail
][pkgIdIndex
] |= (drawer_id
<< 16);
3109 KMP_SNPRINTF(path
, sizeof(path
),
3110 "/sys/devices/system/cpu/cpu%u/topology/core_id",
3111 threadInfo
[num_avail
][osIdIndex
]);
3112 __kmp_read_from_file(path
, "%u", &threadInfo
[num_avail
][coreIdIndex
]);
3116 char s2
[] = "physical id";
3117 if (strncmp(buf
, s2
, sizeof(s2
) - 1) == 0) {
3119 char *p
= strchr(buf
+ sizeof(s2
) - 1, ':');
3121 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3123 if (threadInfo
[num_avail
][pkgIdIndex
] != UINT_MAX
)
3125 threadInfo
[num_avail
][pkgIdIndex
] = val
;
3128 char s3
[] = "core id";
3129 if (strncmp(buf
, s3
, sizeof(s3
) - 1) == 0) {
3131 char *p
= strchr(buf
+ sizeof(s3
) - 1, ':');
3133 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3135 if (threadInfo
[num_avail
][coreIdIndex
] != UINT_MAX
)
3137 threadInfo
[num_avail
][coreIdIndex
] = val
;
3139 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
3141 char s4
[] = "thread id";
3142 if (strncmp(buf
, s4
, sizeof(s4
) - 1) == 0) {
3144 char *p
= strchr(buf
+ sizeof(s4
) - 1, ':');
3146 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3148 if (threadInfo
[num_avail
][threadIdIndex
] != UINT_MAX
)
3150 threadInfo
[num_avail
][threadIdIndex
] = val
;
3154 if (KMP_SSCANF(buf
, "node_%u id", &level
) == 1) {
3156 char *p
= strchr(buf
+ sizeof(s4
) - 1, ':');
3158 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3160 // validate the input before using level:
3161 if (level
> (unsigned)__kmp_xproc
) { // level is too big
3162 level
= __kmp_xproc
;
3164 if (threadInfo
[num_avail
][nodeIdIndex
+ level
] != UINT_MAX
)
3166 threadInfo
[num_avail
][nodeIdIndex
+ level
] = val
;
3170 // We didn't recognize the leading token on the line. There are lots of
3171 // leading tokens that we don't recognize - if the line isn't empty, go on
3172 // to the next line.
3173 if ((*buf
!= 0) && (*buf
!= '\n')) {
3174 // If the line is longer than the buffer, read characters
3175 // until we find a newline.
3178 while (((ch
= fgetc(f
)) != EOF
) && (ch
!= '\n'))
3184 // A newline has signalled the end of the processor record.
3185 // Check that there aren't too many procs specified.
3186 if ((int)num_avail
== __kmp_xproc
) {
3187 CLEANUP_THREAD_INFO
;
3188 *msg_id
= kmp_i18n_str_TooManyEntries
;
3192 // Check for missing fields. The osId field must be there, and we
3193 // currently require that the physical id field is specified, also.
3194 if (threadInfo
[num_avail
][osIdIndex
] == UINT_MAX
) {
3195 CLEANUP_THREAD_INFO
;
3196 *msg_id
= kmp_i18n_str_MissingProcField
;
3199 if (threadInfo
[0][pkgIdIndex
] == UINT_MAX
) {
3200 CLEANUP_THREAD_INFO
;
3201 *msg_id
= kmp_i18n_str_MissingPhysicalIDField
;
3205 // Skip this proc if it is not included in the machine model.
3206 if (KMP_AFFINITY_CAPABLE() &&
3207 !KMP_CPU_ISSET(threadInfo
[num_avail
][osIdIndex
],
3208 __kmp_affin_fullMask
)) {
3209 INIT_PROC_INFO(threadInfo
[num_avail
]);
3213 // We have a successful parse of this proc's info.
3214 // Increment the counter, and prepare for the next proc.
3216 KMP_ASSERT(num_avail
<= num_records
);
3217 INIT_PROC_INFO(threadInfo
[num_avail
]);
3222 CLEANUP_THREAD_INFO
;
3223 *msg_id
= kmp_i18n_str_MissingValCpuinfo
;
3227 CLEANUP_THREAD_INFO
;
3228 *msg_id
= kmp_i18n_str_DuplicateFieldCpuinfo
;
3233 #if KMP_MIC && REDUCE_TEAM_SIZE
3234 unsigned teamSize
= 0;
3235 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3237 // check for num_records == __kmp_xproc ???
3239 // If it is configured to omit the package level when there is only a single
3240 // package, the logic at the end of this routine won't work if there is only a
3242 KMP_ASSERT(num_avail
> 0);
3243 KMP_ASSERT(num_avail
<= num_records
);
3245 // Sort the threadInfo table by physical Id.
3246 qsort(threadInfo
, num_avail
, sizeof(*threadInfo
),
3247 __kmp_affinity_cmp_ProcCpuInfo_phys_id
);
3249 // The table is now sorted by pkgId / coreId / threadId, but we really don't
3250 // know the radix of any of the fields. pkgId's may be sparsely assigned among
3251 // the chips on a system. Although coreId's are usually assigned
3252 // [0 .. coresPerPkg-1] and threadId's are usually assigned
3253 // [0..threadsPerCore-1], we don't want to make any such assumptions.
3255 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
3256 // total # packages) are at this point - we want to determine that now. We
3257 // only have an upper bound on the first two figures.
3259 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3261 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3263 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3265 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3267 bool assign_thread_ids
= false;
3268 unsigned threadIdCt
;
3271 restart_radix_check
:
3274 // Initialize the counter arrays with data from threadInfo[0].
3275 if (assign_thread_ids
) {
3276 if (threadInfo
[0][threadIdIndex
] == UINT_MAX
) {
3277 threadInfo
[0][threadIdIndex
] = threadIdCt
++;
3278 } else if (threadIdCt
<= threadInfo
[0][threadIdIndex
]) {
3279 threadIdCt
= threadInfo
[0][threadIdIndex
] + 1;
3282 for (index
= 0; index
<= maxIndex
; index
++) {
3286 lastId
[index
] = threadInfo
[0][index
];
3290 // Run through the rest of the OS procs.
3291 for (i
= 1; i
< num_avail
; i
++) {
3292 // Find the most significant index whose id differs from the id for the
3293 // previous OS proc.
3294 for (index
= maxIndex
; index
>= threadIdIndex
; index
--) {
3295 if (assign_thread_ids
&& (index
== threadIdIndex
)) {
3296 // Auto-assign the thread id field if it wasn't specified.
3297 if (threadInfo
[i
][threadIdIndex
] == UINT_MAX
) {
3298 threadInfo
[i
][threadIdIndex
] = threadIdCt
++;
3300 // Apparently the thread id field was specified for some entries and not
3301 // others. Start the thread id counter off at the next higher thread id.
3302 else if (threadIdCt
<= threadInfo
[i
][threadIdIndex
]) {
3303 threadIdCt
= threadInfo
[i
][threadIdIndex
] + 1;
3306 if (threadInfo
[i
][index
] != lastId
[index
]) {
3307 // Run through all indices which are less significant, and reset the
3308 // counts to 1. At all levels up to and including index, we need to
3309 // increment the totals and record the last id.
3311 for (index2
= threadIdIndex
; index2
< index
; index2
++) {
3313 if (counts
[index2
] > maxCt
[index2
]) {
3314 maxCt
[index2
] = counts
[index2
];
3317 lastId
[index2
] = threadInfo
[i
][index2
];
3321 lastId
[index
] = threadInfo
[i
][index
];
3323 if (assign_thread_ids
&& (index
> threadIdIndex
)) {
3325 #if KMP_MIC && REDUCE_TEAM_SIZE
3326 // The default team size is the total #threads in the machine
3327 // minus 1 thread for every core that has 3 or more threads.
3328 teamSize
+= (threadIdCt
<= 2) ? (threadIdCt
) : (threadIdCt
- 1);
3329 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3331 // Restart the thread counter, as we are on a new core.
3334 // Auto-assign the thread id field if it wasn't specified.
3335 if (threadInfo
[i
][threadIdIndex
] == UINT_MAX
) {
3336 threadInfo
[i
][threadIdIndex
] = threadIdCt
++;
3339 // Apparently the thread id field was specified for some entries and
3340 // not others. Start the thread id counter off at the next higher
3342 else if (threadIdCt
<= threadInfo
[i
][threadIdIndex
]) {
3343 threadIdCt
= threadInfo
[i
][threadIdIndex
] + 1;
3349 if (index
< threadIdIndex
) {
3350 // If thread ids were specified, it is an error if they are not unique.
3351 // Also, check that we waven't already restarted the loop (to be safe -
3352 // shouldn't need to).
3353 if ((threadInfo
[i
][threadIdIndex
] != UINT_MAX
) || assign_thread_ids
) {
3358 CLEANUP_THREAD_INFO
;
3359 *msg_id
= kmp_i18n_str_PhysicalIDsNotUnique
;
3363 // If the thread ids were not specified and we see entries that
3364 // are duplicates, start the loop over and assign the thread ids manually.
3365 assign_thread_ids
= true;
3366 goto restart_radix_check
;
3370 #if KMP_MIC && REDUCE_TEAM_SIZE
3371 // The default team size is the total #threads in the machine
3372 // minus 1 thread for every core that has 3 or more threads.
3373 teamSize
+= (threadIdCt
<= 2) ? (threadIdCt
) : (threadIdCt
- 1);
3374 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3376 for (index
= threadIdIndex
; index
<= maxIndex
; index
++) {
3377 if (counts
[index
] > maxCt
[index
]) {
3378 maxCt
[index
] = counts
[index
];
3382 __kmp_nThreadsPerCore
= maxCt
[threadIdIndex
];
3383 nCoresPerPkg
= maxCt
[coreIdIndex
];
3384 nPackages
= totals
[pkgIdIndex
];
3386 // When affinity is off, this routine will still be called to set
3387 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
3388 // Make sure all these vars are set correctly, and return now if affinity is
3390 __kmp_ncores
= totals
[coreIdIndex
];
3391 if (!KMP_AFFINITY_CAPABLE()) {
3392 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
3396 #if KMP_MIC && REDUCE_TEAM_SIZE
3397 // Set the default team size.
3398 if ((__kmp_dflt_team_nth
== 0) && (teamSize
> 0)) {
3399 __kmp_dflt_team_nth
= teamSize
;
3400 KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
3401 "__kmp_dflt_team_nth = %d\n",
3402 __kmp_dflt_team_nth
));
3404 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3406 KMP_DEBUG_ASSERT(num_avail
== (unsigned)__kmp_avail_proc
);
3408 // Count the number of levels which have more nodes at that level than at the
3409 // parent's level (with there being an implicit root node of the top level).
3410 // This is equivalent to saying that there is at least one node at this level
3411 // which has a sibling. These levels are in the map, and the package level is
3412 // always in the map.
3413 bool *inMap
= (bool *)__kmp_allocate((maxIndex
+ 1) * sizeof(bool));
3414 for (index
= threadIdIndex
; index
< maxIndex
; index
++) {
3415 KMP_ASSERT(totals
[index
] >= totals
[index
+ 1]);
3416 inMap
[index
] = (totals
[index
] > totals
[index
+ 1]);
3418 inMap
[maxIndex
] = (totals
[maxIndex
] > 1);
3419 inMap
[pkgIdIndex
] = true;
3420 inMap
[coreIdIndex
] = true;
3421 inMap
[threadIdIndex
] = true;
3425 kmp_hw_t types
[KMP_HW_LAST
];
3428 int threadLevel
= -1;
3429 for (index
= threadIdIndex
; index
<= maxIndex
; index
++) {
3434 if (inMap
[pkgIdIndex
]) {
3436 types
[idx
++] = KMP_HW_SOCKET
;
3438 if (inMap
[coreIdIndex
]) {
3440 types
[idx
++] = KMP_HW_CORE
;
3442 if (inMap
[threadIdIndex
]) {
3444 types
[idx
++] = KMP_HW_THREAD
;
3446 KMP_ASSERT(depth
> 0);
3448 // Construct the data structure that is to be returned.
3449 __kmp_topology
= kmp_topology_t::allocate(num_avail
, depth
, types
);
3451 for (i
= 0; i
< num_avail
; ++i
) {
3452 unsigned os
= threadInfo
[i
][osIdIndex
];
3454 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
3456 hw_thread
.os_id
= os
;
3459 for (src_index
= maxIndex
; src_index
>= threadIdIndex
; src_index
--) {
3460 if (!inMap
[src_index
]) {
3463 if (src_index
== pkgIdIndex
) {
3464 hw_thread
.ids
[pkgLevel
] = threadInfo
[i
][src_index
];
3465 } else if (src_index
== coreIdIndex
) {
3466 hw_thread
.ids
[coreLevel
] = threadInfo
[i
][src_index
];
3467 } else if (src_index
== threadIdIndex
) {
3468 hw_thread
.ids
[threadLevel
] = threadInfo
[i
][src_index
];
3478 CLEANUP_THREAD_INFO
;
3479 __kmp_topology
->sort_ids();
3480 if (!__kmp_topology
->check_ids()) {
3481 kmp_topology_t::deallocate(__kmp_topology
);
3482 __kmp_topology
= nullptr;
3483 *msg_id
= kmp_i18n_str_PhysicalIDsNotUnique
;
3489 // Create and return a table of affinity masks, indexed by OS thread ID.
3490 // This routine handles OR'ing together all the affinity masks of threads
3491 // that are sufficiently close, if granularity > fine.
3492 template <typename FindNextFunctionType
>
3493 static void __kmp_create_os_id_masks(unsigned *numUnique
,
3494 kmp_affinity_t
&affinity
,
3495 FindNextFunctionType find_next
) {
3496 // First form a table of affinity masks in order of OS thread id.
3499 int numAddrs
= __kmp_topology
->get_num_hw_threads();
3500 int depth
= __kmp_topology
->get_depth();
3501 const char *env_var
= __kmp_get_affinity_env_var(affinity
);
3502 KMP_ASSERT(numAddrs
);
3506 // If could not find HW thread location with attributes, then return and
3507 // fallback to increment find_next and disregard core attributes.
3512 for (i
= numAddrs
- 1;; --i
) {
3513 int osId
= __kmp_topology
->at(i
).os_id
;
3514 if (osId
> maxOsId
) {
3520 affinity
.num_os_id_masks
= maxOsId
+ 1;
3521 KMP_CPU_ALLOC_ARRAY(affinity
.os_id_masks
, affinity
.num_os_id_masks
);
3522 KMP_ASSERT(affinity
.gran_levels
>= 0);
3523 if (affinity
.flags
.verbose
&& (affinity
.gran_levels
> 0)) {
3524 KMP_INFORM(ThreadsMigrate
, env_var
, affinity
.gran_levels
);
3526 if (affinity
.gran_levels
>= (int)depth
) {
3527 KMP_AFF_WARNING(affinity
, AffThreadsMayMigrate
);
3530 // Run through the table, forming the masks for all threads on each core.
3531 // Threads on the same core will have identical kmp_hw_thread_t objects, not
3532 // considering the last level, which must be the thread id. All threads on a
3533 // core will appear consecutively.
3535 int j
= 0; // index of 1st thread on core
3537 kmp_affin_mask_t
*sum
;
3538 KMP_CPU_ALLOC_ON_STACK(sum
);
3541 i
= j
= leader
= find_next(-1);
3542 KMP_CPU_SET(__kmp_topology
->at(i
).os_id
, sum
);
3543 kmp_full_mask_modifier_t full_mask
;
3544 for (i
= find_next(i
); i
< numAddrs
; i
= find_next(i
)) {
3545 // If this thread is sufficiently close to the leader (within the
3546 // granularity setting), then set the bit for this os thread in the
3547 // affinity mask for this group, and go on to the next thread.
3548 if (__kmp_topology
->is_close(leader
, i
, affinity
)) {
3549 KMP_CPU_SET(__kmp_topology
->at(i
).os_id
, sum
);
3553 // For every thread in this group, copy the mask to the thread's entry in
3554 // the OS Id mask table. Mark the first address as a leader.
3555 for (; j
< i
; j
= find_next(j
)) {
3556 int osId
= __kmp_topology
->at(j
).os_id
;
3557 KMP_DEBUG_ASSERT(osId
<= maxOsId
);
3558 kmp_affin_mask_t
*mask
= KMP_CPU_INDEX(affinity
.os_id_masks
, osId
);
3559 KMP_CPU_COPY(mask
, sum
);
3560 __kmp_topology
->at(j
).leader
= (j
== leader
);
3564 // Start a new mask.
3566 full_mask
.include(sum
);
3568 KMP_CPU_SET(__kmp_topology
->at(i
).os_id
, sum
);
3571 // For every thread in last group, copy the mask to the thread's
3572 // entry in the OS Id mask table.
3573 for (; j
< i
; j
= find_next(j
)) {
3574 int osId
= __kmp_topology
->at(j
).os_id
;
3575 KMP_DEBUG_ASSERT(osId
<= maxOsId
);
3576 kmp_affin_mask_t
*mask
= KMP_CPU_INDEX(affinity
.os_id_masks
, osId
);
3577 KMP_CPU_COPY(mask
, sum
);
3578 __kmp_topology
->at(j
).leader
= (j
== leader
);
3580 full_mask
.include(sum
);
3582 KMP_CPU_FREE_FROM_STACK(sum
);
3584 // See if the OS Id mask table further restricts or changes the full mask
3585 if (full_mask
.restrict_to_mask() && affinity
.flags
.verbose
) {
3586 __kmp_topology
->print(env_var
);
3589 *numUnique
= unique
;
3592 // Stuff for the affinity proclist parsers. It's easier to declare these vars
3593 // as file-static than to try and pass them through the calling sequence of
3594 // the recursive-descent OMP_PLACES parser.
3595 static kmp_affin_mask_t
*newMasks
;
3596 static int numNewMasks
;
3597 static int nextNewMask
;
3599 #define ADD_MASK(_mask) \
3601 if (nextNewMask >= numNewMasks) { \
3604 kmp_affin_mask_t *temp; \
3605 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3606 for (i = 0; i < numNewMasks / 2; i++) { \
3607 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3608 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3609 KMP_CPU_COPY(dest, src); \
3611 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3614 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3618 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3620 if (((_osId) > _maxOsId) || \
3621 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3622 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
3624 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3628 // Re-parse the proclist (for the explicit affinity type), and form the list
3629 // of affinity newMasks indexed by gtid.
3630 static void __kmp_affinity_process_proclist(kmp_affinity_t
&affinity
) {
3632 kmp_affin_mask_t
**out_masks
= &affinity
.masks
;
3633 unsigned *out_numMasks
= &affinity
.num_masks
;
3634 const char *proclist
= affinity
.proclist
;
3635 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
3636 int maxOsId
= affinity
.num_os_id_masks
- 1;
3637 const char *scan
= proclist
;
3638 const char *next
= proclist
;
3640 // We use malloc() for the temporary mask vector, so that we can use
3641 // realloc() to extend it.
3643 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks
, numNewMasks
);
3645 kmp_affin_mask_t
*sumMask
;
3646 KMP_CPU_ALLOC(sumMask
);
3650 int start
, end
, stride
;
3654 if (*next
== '\0') {
3665 // Read the first integer in the set.
3666 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad proclist");
3668 num
= __kmp_str_to_int(scan
, *next
);
3669 KMP_ASSERT2(num
>= 0, "bad explicit proc list");
3671 // Copy the mask for that osId to the sum (union) mask.
3672 if ((num
> maxOsId
) ||
3673 (!KMP_CPU_ISSET(num
, KMP_CPU_INDEX(osId2Mask
, num
)))) {
3674 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, num
);
3675 KMP_CPU_ZERO(sumMask
);
3677 KMP_CPU_COPY(sumMask
, KMP_CPU_INDEX(osId2Mask
, num
));
3682 // Check for end of set.
3689 // Skip optional comma.
3695 // Read the next integer in the set.
3697 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
3700 num
= __kmp_str_to_int(scan
, *next
);
3701 KMP_ASSERT2(num
>= 0, "bad explicit proc list");
3703 // Add the mask for that osId to the sum mask.
3704 if ((num
> maxOsId
) ||
3705 (!KMP_CPU_ISSET(num
, KMP_CPU_INDEX(osId2Mask
, num
)))) {
3706 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, num
);
3708 KMP_CPU_UNION(sumMask
, KMP_CPU_INDEX(osId2Mask
, num
));
3724 // Read the first integer.
3725 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
3727 start
= __kmp_str_to_int(scan
, *next
);
3728 KMP_ASSERT2(start
>= 0, "bad explicit proc list");
3731 // If this isn't a range, then add a mask to the list and go on.
3733 ADD_MASK_OSID(start
, osId2Mask
, maxOsId
);
3735 // Skip optional comma.
3743 // This is a range. Skip over the '-' and read in the 2nd int.
3747 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
3749 end
= __kmp_str_to_int(scan
, *next
);
3750 KMP_ASSERT2(end
>= 0, "bad explicit proc list");
3752 // Check for a stride parameter
3756 // A stride is specified. Skip over the ':" and read the 3rd int.
3767 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
3769 stride
= __kmp_str_to_int(scan
, *next
);
3770 KMP_ASSERT2(stride
>= 0, "bad explicit proc list");
3774 // Do some range checks.
3775 KMP_ASSERT2(stride
!= 0, "bad explicit proc list");
3777 KMP_ASSERT2(start
<= end
, "bad explicit proc list");
3779 KMP_ASSERT2(start
>= end
, "bad explicit proc list");
3781 KMP_ASSERT2((end
- start
) / stride
<= 65536, "bad explicit proc list");
3783 // Add the mask for each OS proc # to the list.
3786 ADD_MASK_OSID(start
, osId2Mask
, maxOsId
);
3788 } while (start
<= end
);
3791 ADD_MASK_OSID(start
, osId2Mask
, maxOsId
);
3793 } while (start
>= end
);
3796 // Skip optional comma.
3804 *out_numMasks
= nextNewMask
;
3805 if (nextNewMask
== 0) {
3807 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
3810 KMP_CPU_ALLOC_ARRAY((*out_masks
), nextNewMask
);
3811 for (i
= 0; i
< nextNewMask
; i
++) {
3812 kmp_affin_mask_t
*src
= KMP_CPU_INDEX(newMasks
, i
);
3813 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX((*out_masks
), i
);
3814 KMP_CPU_COPY(dest
, src
);
3816 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
3817 KMP_CPU_FREE(sumMask
);
3820 /*-----------------------------------------------------------------------------
3821 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3822 places. Again, Here is the grammar:
3825 place_list := place , place_list
3827 place := place : num
3828 place := place : num : signed
3829 place := { subplacelist }
3830 place := ! place // (lowest priority)
3831 subplace_list := subplace
3832 subplace_list := subplace , subplace_list
3834 subplace := num : num
3835 subplace := num : num : signed
3839 -----------------------------------------------------------------------------*/
3840 static void __kmp_process_subplace_list(const char **scan
,
3841 kmp_affinity_t
&affinity
, int maxOsId
,
3842 kmp_affin_mask_t
*tempMask
,
3845 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
3848 int start
, count
, stride
, i
;
3850 // Read in the starting proc id
3852 KMP_ASSERT2((**scan
>= '0') && (**scan
<= '9'), "bad explicit places list");
3855 start
= __kmp_str_to_int(*scan
, *next
);
3856 KMP_ASSERT(start
>= 0);
3859 // valid follow sets are ',' ':' and '}'
3861 if (**scan
== '}' || **scan
== ',') {
3862 if ((start
> maxOsId
) ||
3863 (!KMP_CPU_ISSET(start
, KMP_CPU_INDEX(osId2Mask
, start
)))) {
3864 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, start
);
3866 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, start
));
3869 if (**scan
== '}') {
3872 (*scan
)++; // skip ','
3875 KMP_ASSERT2(**scan
== ':', "bad explicit places list");
3876 (*scan
)++; // skip ':'
3878 // Read count parameter
3880 KMP_ASSERT2((**scan
>= '0') && (**scan
<= '9'), "bad explicit places list");
3883 count
= __kmp_str_to_int(*scan
, *next
);
3884 KMP_ASSERT(count
>= 0);
3887 // valid follow sets are ',' ':' and '}'
3889 if (**scan
== '}' || **scan
== ',') {
3890 for (i
= 0; i
< count
; i
++) {
3891 if ((start
> maxOsId
) ||
3892 (!KMP_CPU_ISSET(start
, KMP_CPU_INDEX(osId2Mask
, start
)))) {
3893 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, start
);
3894 break; // don't proliferate warnings for large count
3896 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, start
));
3901 if (**scan
== '}') {
3904 (*scan
)++; // skip ','
3907 KMP_ASSERT2(**scan
== ':', "bad explicit places list");
3908 (*scan
)++; // skip ':'
3910 // Read stride parameter
3914 if (**scan
== '+') {
3915 (*scan
)++; // skip '+'
3918 if (**scan
== '-') {
3920 (*scan
)++; // skip '-'
3926 KMP_ASSERT2((**scan
>= '0') && (**scan
<= '9'), "bad explicit places list");
3929 stride
= __kmp_str_to_int(*scan
, *next
);
3930 KMP_ASSERT(stride
>= 0);
3934 // valid follow sets are ',' and '}'
3936 if (**scan
== '}' || **scan
== ',') {
3937 for (i
= 0; i
< count
; i
++) {
3938 if ((start
> maxOsId
) ||
3939 (!KMP_CPU_ISSET(start
, KMP_CPU_INDEX(osId2Mask
, start
)))) {
3940 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, start
);
3941 break; // don't proliferate warnings for large count
3943 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, start
));
3948 if (**scan
== '}') {
3951 (*scan
)++; // skip ','
3955 KMP_ASSERT2(0, "bad explicit places list");
3959 static void __kmp_process_place(const char **scan
, kmp_affinity_t
&affinity
,
3960 int maxOsId
, kmp_affin_mask_t
*tempMask
,
3963 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
3965 // valid follow sets are '{' '!' and num
3967 if (**scan
== '{') {
3968 (*scan
)++; // skip '{'
3969 __kmp_process_subplace_list(scan
, affinity
, maxOsId
, tempMask
, setSize
);
3970 KMP_ASSERT2(**scan
== '}', "bad explicit places list");
3971 (*scan
)++; // skip '}'
3972 } else if (**scan
== '!') {
3973 (*scan
)++; // skip '!'
3974 __kmp_process_place(scan
, affinity
, maxOsId
, tempMask
, setSize
);
3975 KMP_CPU_COMPLEMENT(maxOsId
, tempMask
);
3976 } else if ((**scan
>= '0') && (**scan
<= '9')) {
3979 int num
= __kmp_str_to_int(*scan
, *next
);
3980 KMP_ASSERT(num
>= 0);
3981 if ((num
> maxOsId
) ||
3982 (!KMP_CPU_ISSET(num
, KMP_CPU_INDEX(osId2Mask
, num
)))) {
3983 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, num
);
3985 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, num
));
3988 *scan
= next
; // skip num
3990 KMP_ASSERT2(0, "bad explicit places list");
3995 void __kmp_affinity_process_placelist(kmp_affinity_t
&affinity
) {
3996 int i
, j
, count
, stride
, sign
;
3997 kmp_affin_mask_t
**out_masks
= &affinity
.masks
;
3998 unsigned *out_numMasks
= &affinity
.num_masks
;
3999 const char *placelist
= affinity
.proclist
;
4000 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
4001 int maxOsId
= affinity
.num_os_id_masks
- 1;
4002 const char *scan
= placelist
;
4003 const char *next
= placelist
;
4006 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks
, numNewMasks
);
4009 // tempMask is modified based on the previous or initial
4010 // place to form the current place
4011 // previousMask contains the previous place
4012 kmp_affin_mask_t
*tempMask
;
4013 kmp_affin_mask_t
*previousMask
;
4014 KMP_CPU_ALLOC(tempMask
);
4015 KMP_CPU_ZERO(tempMask
);
4016 KMP_CPU_ALLOC(previousMask
);
4017 KMP_CPU_ZERO(previousMask
);
4021 __kmp_process_place(&scan
, affinity
, maxOsId
, tempMask
, &setSize
);
4023 // valid follow sets are ',' ':' and EOL
4025 if (*scan
== '\0' || *scan
== ',') {
4029 KMP_CPU_ZERO(tempMask
);
4031 if (*scan
== '\0') {
4038 KMP_ASSERT2(*scan
== ':', "bad explicit places list");
4041 // Read count parameter
4043 KMP_ASSERT2((*scan
>= '0') && (*scan
<= '9'), "bad explicit places list");
4046 count
= __kmp_str_to_int(scan
, *next
);
4047 KMP_ASSERT(count
>= 0);
4050 // valid follow sets are ',' ':' and EOL
4052 if (*scan
== '\0' || *scan
== ',') {
4055 KMP_ASSERT2(*scan
== ':', "bad explicit places list");
4058 // Read stride parameter
4074 KMP_ASSERT2((*scan
>= '0') && (*scan
<= '9'), "bad explicit places list");
4077 stride
= __kmp_str_to_int(scan
, *next
);
4078 KMP_DEBUG_ASSERT(stride
>= 0);
4083 // Add places determined by initial_place : count : stride
4084 for (i
= 0; i
< count
; i
++) {
4088 // Add the current place, then build the next place (tempMask) from that
4089 KMP_CPU_COPY(previousMask
, tempMask
);
4090 ADD_MASK(previousMask
);
4091 KMP_CPU_ZERO(tempMask
);
4093 KMP_CPU_SET_ITERATE(j
, previousMask
) {
4094 if (!KMP_CPU_ISSET(j
, previousMask
)) {
4097 if ((j
+ stride
> maxOsId
) || (j
+ stride
< 0) ||
4098 (!KMP_CPU_ISSET(j
, __kmp_affin_fullMask
)) ||
4099 (!KMP_CPU_ISSET(j
+ stride
,
4100 KMP_CPU_INDEX(osId2Mask
, j
+ stride
)))) {
4101 if (i
< count
- 1) {
4102 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, j
+ stride
);
4106 KMP_CPU_SET(j
+ stride
, tempMask
);
4110 KMP_CPU_ZERO(tempMask
);
4113 // valid follow sets are ',' and EOL
4115 if (*scan
== '\0') {
4123 KMP_ASSERT2(0, "bad explicit places list");
4126 *out_numMasks
= nextNewMask
;
4127 if (nextNewMask
== 0) {
4129 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
4132 KMP_CPU_ALLOC_ARRAY((*out_masks
), nextNewMask
);
4133 KMP_CPU_FREE(tempMask
);
4134 KMP_CPU_FREE(previousMask
);
4135 for (i
= 0; i
< nextNewMask
; i
++) {
4136 kmp_affin_mask_t
*src
= KMP_CPU_INDEX(newMasks
, i
);
4137 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX((*out_masks
), i
);
4138 KMP_CPU_COPY(dest
, src
);
4140 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
4144 #undef ADD_MASK_OSID
4146 // This function figures out the deepest level at which there is at least one
4147 // cluster/core with more than one processing unit bound to it.
4148 static int __kmp_affinity_find_core_level(int nprocs
, int bottom_level
) {
4151 for (int i
= 0; i
< nprocs
; i
++) {
4152 const kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
4153 for (int j
= bottom_level
; j
> 0; j
--) {
4154 if (hw_thread
.ids
[j
] > 0) {
4155 if (core_level
< (j
- 1)) {
4164 // This function counts number of clusters/cores at given level.
4165 static int __kmp_affinity_compute_ncores(int nprocs
, int bottom_level
,
4167 return __kmp_topology
->get_count(core_level
);
4169 // This function finds to which cluster/core given processing unit is bound.
4170 static int __kmp_affinity_find_core(int proc
, int bottom_level
,
4173 KMP_DEBUG_ASSERT(proc
>= 0 && proc
< __kmp_topology
->get_num_hw_threads());
4174 for (int i
= 0; i
<= proc
; ++i
) {
4175 if (i
+ 1 <= proc
) {
4176 for (int j
= 0; j
<= core_level
; ++j
) {
4177 if (__kmp_topology
->at(i
+ 1).sub_ids
[j
] !=
4178 __kmp_topology
->at(i
).sub_ids
[j
]) {
4188 // This function finds maximal number of processing units bound to a
4189 // cluster/core at given level.
4190 static int __kmp_affinity_max_proc_per_core(int nprocs
, int bottom_level
,
4192 if (core_level
>= bottom_level
)
4194 int thread_level
= __kmp_topology
->get_level(KMP_HW_THREAD
);
4195 return __kmp_topology
->calculate_ratio(thread_level
, core_level
);
4198 static int *procarr
= NULL
;
4199 static int __kmp_aff_depth
= 0;
4200 static int *__kmp_osid_to_hwthread_map
= NULL
;
4202 static void __kmp_affinity_get_mask_topology_info(const kmp_affin_mask_t
*mask
,
4203 kmp_affinity_ids_t
&ids
,
4204 kmp_affinity_attrs_t
&attrs
) {
4205 if (!KMP_AFFINITY_CAPABLE())
4208 // Initiailze ids and attrs thread data
4209 for (int i
= 0; i
< KMP_HW_LAST
; ++i
)
4210 ids
.ids
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
4211 attrs
= KMP_AFFINITY_ATTRS_UNKNOWN
;
4213 // Iterate through each os id within the mask and determine
4214 // the topology id and attribute information
4216 int depth
= __kmp_topology
->get_depth();
4217 KMP_CPU_SET_ITERATE(cpu
, mask
) {
4218 int osid_idx
= __kmp_osid_to_hwthread_map
[cpu
];
4220 const kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(osid_idx
);
4221 for (int level
= 0; level
< depth
; ++level
) {
4222 kmp_hw_t type
= __kmp_topology
->get_type(level
);
4223 int id
= hw_thread
.sub_ids
[level
];
4224 if (ids
.ids
[type
] == kmp_hw_thread_t::UNKNOWN_ID
|| ids
.ids
[type
] == id
) {
4227 // This mask spans across multiple topology units, set it as such
4228 // and mark every level below as such as well.
4229 ids
.ids
[type
] = kmp_hw_thread_t::MULTIPLE_ID
;
4230 for (; level
< depth
; ++level
) {
4231 kmp_hw_t type
= __kmp_topology
->get_type(level
);
4232 ids
.ids
[type
] = kmp_hw_thread_t::MULTIPLE_ID
;
4237 attrs
.core_type
= hw_thread
.attrs
.get_core_type();
4238 attrs
.core_eff
= hw_thread
.attrs
.get_core_eff();
4241 // This mask spans across multiple attributes, set it as such
4242 if (attrs
.core_type
!= hw_thread
.attrs
.get_core_type())
4243 attrs
.core_type
= KMP_HW_CORE_TYPE_UNKNOWN
;
4244 if (attrs
.core_eff
!= hw_thread
.attrs
.get_core_eff())
4245 attrs
.core_eff
= kmp_hw_attr_t::UNKNOWN_CORE_EFF
;
4250 static void __kmp_affinity_get_thread_topology_info(kmp_info_t
*th
) {
4251 if (!KMP_AFFINITY_CAPABLE())
4253 const kmp_affin_mask_t
*mask
= th
->th
.th_affin_mask
;
4254 kmp_affinity_ids_t
&ids
= th
->th
.th_topology_ids
;
4255 kmp_affinity_attrs_t
&attrs
= th
->th
.th_topology_attrs
;
4256 __kmp_affinity_get_mask_topology_info(mask
, ids
, attrs
);
4259 // Assign the topology information to each place in the place list
4260 // A thread can then grab not only its affinity mask, but the topology
4261 // information associated with that mask. e.g., Which socket is a thread on
4262 static void __kmp_affinity_get_topology_info(kmp_affinity_t
&affinity
) {
4263 if (!KMP_AFFINITY_CAPABLE())
4265 if (affinity
.type
!= affinity_none
) {
4266 KMP_ASSERT(affinity
.num_os_id_masks
);
4267 KMP_ASSERT(affinity
.os_id_masks
);
4269 KMP_ASSERT(affinity
.num_masks
);
4270 KMP_ASSERT(affinity
.masks
);
4271 KMP_ASSERT(__kmp_affin_fullMask
);
4273 int max_cpu
= __kmp_affin_fullMask
->get_max_cpu();
4274 int num_hw_threads
= __kmp_topology
->get_num_hw_threads();
4276 // Allocate thread topology information
4277 if (!affinity
.ids
) {
4278 affinity
.ids
= (kmp_affinity_ids_t
*)__kmp_allocate(
4279 sizeof(kmp_affinity_ids_t
) * affinity
.num_masks
);
4281 if (!affinity
.attrs
) {
4282 affinity
.attrs
= (kmp_affinity_attrs_t
*)__kmp_allocate(
4283 sizeof(kmp_affinity_attrs_t
) * affinity
.num_masks
);
4285 if (!__kmp_osid_to_hwthread_map
) {
4286 // Want the +1 because max_cpu should be valid index into map
4287 __kmp_osid_to_hwthread_map
=
4288 (int *)__kmp_allocate(sizeof(int) * (max_cpu
+ 1));
4291 // Create the OS proc to hardware thread map
4292 for (int hw_thread
= 0; hw_thread
< num_hw_threads
; ++hw_thread
) {
4293 int os_id
= __kmp_topology
->at(hw_thread
).os_id
;
4294 if (KMP_CPU_ISSET(os_id
, __kmp_affin_fullMask
))
4295 __kmp_osid_to_hwthread_map
[os_id
] = hw_thread
;
4298 for (unsigned i
= 0; i
< affinity
.num_masks
; ++i
) {
4299 kmp_affinity_ids_t
&ids
= affinity
.ids
[i
];
4300 kmp_affinity_attrs_t
&attrs
= affinity
.attrs
[i
];
4301 kmp_affin_mask_t
*mask
= KMP_CPU_INDEX(affinity
.masks
, i
);
4302 __kmp_affinity_get_mask_topology_info(mask
, ids
, attrs
);
4306 // Called when __kmp_topology is ready
4307 static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t
&affinity
) {
4308 // Initialize other data structures which depend on the topology
4309 if (__kmp_topology
&& __kmp_topology
->get_num_hw_threads()) {
4310 machine_hierarchy
.init(__kmp_topology
->get_num_hw_threads());
4311 __kmp_affinity_get_topology_info(affinity
);
4312 #if KMP_WEIGHTED_ITERATIONS_SUPPORTED
4313 __kmp_first_osid_with_ecore
= __kmp_get_first_osid_with_ecore();
4318 // Create a one element mask array (set of places) which only contains the
4319 // initial process's affinity mask
4320 static void __kmp_create_affinity_none_places(kmp_affinity_t
&affinity
) {
4321 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
4322 KMP_ASSERT(affinity
.type
== affinity_none
);
4323 KMP_ASSERT(__kmp_avail_proc
== __kmp_topology
->get_num_hw_threads());
4324 affinity
.num_masks
= 1;
4325 KMP_CPU_ALLOC_ARRAY(affinity
.masks
, affinity
.num_masks
);
4326 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX(affinity
.masks
, 0);
4327 KMP_CPU_COPY(dest
, __kmp_affin_fullMask
);
4328 __kmp_aux_affinity_initialize_other_data(affinity
);
4331 static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t
&affinity
) {
4332 // Create the "full" mask - this defines all of the processors that we
4333 // consider to be in the machine model. If respect is set, then it is the
4334 // initialization thread's affinity mask. Otherwise, it is all processors that
4335 // we know about on the machine.
4336 int verbose
= affinity
.flags
.verbose
;
4337 const char *env_var
= affinity
.env_var
;
4339 // Already initialized
4340 if (__kmp_affin_fullMask
&& __kmp_affin_origMask
)
4343 if (__kmp_affin_fullMask
== NULL
) {
4344 KMP_CPU_ALLOC(__kmp_affin_fullMask
);
4346 if (__kmp_affin_origMask
== NULL
) {
4347 KMP_CPU_ALLOC(__kmp_affin_origMask
);
4349 if (KMP_AFFINITY_CAPABLE()) {
4350 __kmp_get_system_affinity(__kmp_affin_fullMask
, TRUE
);
4351 // Make a copy before possible expanding to the entire machine mask
4352 __kmp_affin_origMask
->copy(__kmp_affin_fullMask
);
4353 if (affinity
.flags
.respect
) {
4354 // Count the number of available processors.
4356 __kmp_avail_proc
= 0;
4357 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
4358 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
4363 if (__kmp_avail_proc
> __kmp_xproc
) {
4364 KMP_AFF_WARNING(affinity
, ErrorInitializeAffinity
);
4365 affinity
.type
= affinity_none
;
4366 KMP_AFFINITY_DISABLE();
4371 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
4372 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
4373 __kmp_affin_fullMask
);
4374 KMP_INFORM(InitOSProcSetRespect
, env_var
, buf
);
4378 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
4379 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
4380 __kmp_affin_fullMask
);
4381 KMP_INFORM(InitOSProcSetNotRespect
, env_var
, buf
);
4384 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask
);
4386 if (__kmp_num_proc_groups
<= 1) {
4387 // Copy expanded full mask if topology has single processor group
4388 __kmp_affin_origMask
->copy(__kmp_affin_fullMask
);
4390 // Set the process affinity mask since threads' affinity
4391 // masks must be subset of process mask in Windows* OS
4392 __kmp_affin_fullMask
->set_process_affinity(true);
4398 static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t
&affinity
) {
4399 bool success
= false;
4400 const char *env_var
= affinity
.env_var
;
4401 kmp_i18n_id_t msg_id
= kmp_i18n_null
;
4402 int verbose
= affinity
.flags
.verbose
;
4404 // For backward compatibility, setting KMP_CPUINFO_FILE =>
4405 // KMP_TOPOLOGY_METHOD=cpuinfo
4406 if ((__kmp_cpuinfo_file
!= NULL
) &&
4407 (__kmp_affinity_top_method
== affinity_top_method_all
)) {
4408 __kmp_affinity_top_method
= affinity_top_method_cpuinfo
;
4411 if (__kmp_affinity_top_method
== affinity_top_method_all
) {
4412 // In the default code path, errors are not fatal - we just try using
4413 // another method. We only emit a warning message if affinity is on, or the
4414 // verbose flag is set, an the nowarnings flag was not set.
4417 __kmp_affinity_dispatch
->get_api_type() == KMPAffinity::HWLOC
) {
4418 if (!__kmp_hwloc_error
) {
4419 success
= __kmp_affinity_create_hwloc_map(&msg_id
);
4420 if (!success
&& verbose
) {
4421 KMP_INFORM(AffIgnoringHwloc
, env_var
);
4423 } else if (verbose
) {
4424 KMP_INFORM(AffIgnoringHwloc
, env_var
);
4429 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4431 success
= __kmp_affinity_create_x2apicid_map(&msg_id
);
4432 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4433 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4437 success
= __kmp_affinity_create_apicid_map(&msg_id
);
4438 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4439 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4442 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4447 success
= __kmp_affinity_create_cpuinfo_map(&line
, &msg_id
);
4448 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4449 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4452 #endif /* KMP_OS_LINUX */
4454 #if KMP_GROUP_AFFINITY
4455 if (!success
&& (__kmp_num_proc_groups
> 1)) {
4456 success
= __kmp_affinity_create_proc_group_map(&msg_id
);
4457 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4458 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4461 #endif /* KMP_GROUP_AFFINITY */
4464 success
= __kmp_affinity_create_flat_map(&msg_id
);
4465 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4466 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4468 KMP_ASSERT(success
);
4472 // If the user has specified that a paricular topology discovery method is to be
4473 // used, then we abort if that method fails. The exception is group affinity,
4474 // which might have been implicitly set.
4476 else if (__kmp_affinity_top_method
== affinity_top_method_hwloc
) {
4477 KMP_ASSERT(__kmp_affinity_dispatch
->get_api_type() == KMPAffinity::HWLOC
);
4478 success
= __kmp_affinity_create_hwloc_map(&msg_id
);
4480 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4481 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4484 #endif // KMP_USE_HWLOC
4486 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4487 else if (__kmp_affinity_top_method
== affinity_top_method_x2apicid
||
4488 __kmp_affinity_top_method
== affinity_top_method_x2apicid_1f
) {
4489 success
= __kmp_affinity_create_x2apicid_map(&msg_id
);
4491 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4492 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4494 } else if (__kmp_affinity_top_method
== affinity_top_method_apicid
) {
4495 success
= __kmp_affinity_create_apicid_map(&msg_id
);
4497 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4498 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4501 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4503 else if (__kmp_affinity_top_method
== affinity_top_method_cpuinfo
) {
4505 success
= __kmp_affinity_create_cpuinfo_map(&line
, &msg_id
);
4507 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4508 const char *filename
= __kmp_cpuinfo_get_filename();
4510 KMP_FATAL(FileLineMsgExiting
, filename
, line
,
4511 __kmp_i18n_catgets(msg_id
));
4513 KMP_FATAL(FileMsgExiting
, filename
, __kmp_i18n_catgets(msg_id
));
4518 #if KMP_GROUP_AFFINITY
4519 else if (__kmp_affinity_top_method
== affinity_top_method_group
) {
4520 success
= __kmp_affinity_create_proc_group_map(&msg_id
);
4521 KMP_ASSERT(success
);
4523 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4524 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4527 #endif /* KMP_GROUP_AFFINITY */
4529 else if (__kmp_affinity_top_method
== affinity_top_method_flat
) {
4530 success
= __kmp_affinity_create_flat_map(&msg_id
);
4532 KMP_ASSERT(success
);
4535 // Early exit if topology could not be created
4536 if (!__kmp_topology
) {
4537 if (KMP_AFFINITY_CAPABLE()) {
4538 KMP_AFF_WARNING(affinity
, ErrorInitializeAffinity
);
4540 if (nPackages
> 0 && nCoresPerPkg
> 0 && __kmp_nThreadsPerCore
> 0 &&
4542 __kmp_topology
= kmp_topology_t::allocate(0, 0, NULL
);
4543 __kmp_topology
->canonicalize(nPackages
, nCoresPerPkg
,
4544 __kmp_nThreadsPerCore
, __kmp_ncores
);
4546 __kmp_topology
->print(env_var
);
4552 // Canonicalize, print (if requested), apply KMP_HW_SUBSET
4553 __kmp_topology
->canonicalize();
4555 __kmp_topology
->print(env_var
);
4556 bool filtered
= __kmp_topology
->filter_hw_subset();
4557 if (filtered
&& verbose
)
4558 __kmp_topology
->print("KMP_HW_SUBSET");
4562 static void __kmp_aux_affinity_initialize(kmp_affinity_t
&affinity
) {
4563 bool is_regular_affinity
= (&affinity
== &__kmp_affinity
);
4564 bool is_hidden_helper_affinity
= (&affinity
== &__kmp_hh_affinity
);
4565 const char *env_var
= __kmp_get_affinity_env_var(affinity
);
4567 if (affinity
.flags
.initialized
) {
4568 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
4572 if (is_regular_affinity
&& (!__kmp_affin_fullMask
|| !__kmp_affin_origMask
))
4573 __kmp_aux_affinity_initialize_masks(affinity
);
4575 if (is_regular_affinity
&& !__kmp_topology
) {
4576 bool success
= __kmp_aux_affinity_initialize_topology(affinity
);
4578 KMP_ASSERT(__kmp_avail_proc
== __kmp_topology
->get_num_hw_threads());
4580 affinity
.type
= affinity_none
;
4581 KMP_AFFINITY_DISABLE();
4585 // If KMP_AFFINITY=none, then only create the single "none" place
4586 // which is the process's initial affinity mask or the number of
4587 // hardware threads depending on respect,norespect
4588 if (affinity
.type
== affinity_none
) {
4589 __kmp_create_affinity_none_places(affinity
);
4590 #if KMP_USE_HIER_SCHED
4591 __kmp_dispatch_set_hierarchy_values();
4593 affinity
.flags
.initialized
= TRUE
;
4597 __kmp_topology
->set_granularity(affinity
);
4598 int depth
= __kmp_topology
->get_depth();
4600 // Create the table of masks, indexed by thread Id.
4602 int numAddrs
= __kmp_topology
->get_num_hw_threads();
4603 // If OMP_PLACES=cores:<attribute> specified, then attempt
4604 // to make OS Id mask table using those attributes
4605 if (affinity
.core_attr_gran
.valid
) {
4606 __kmp_create_os_id_masks(&numUnique
, affinity
, [&](int idx
) {
4607 KMP_ASSERT(idx
>= -1);
4608 for (int i
= idx
+ 1; i
< numAddrs
; ++i
)
4609 if (__kmp_topology
->at(i
).attrs
.contains(affinity
.core_attr_gran
))
4613 if (!affinity
.os_id_masks
) {
4614 const char *core_attribute
;
4615 if (affinity
.core_attr_gran
.core_eff
!= kmp_hw_attr_t::UNKNOWN_CORE_EFF
)
4616 core_attribute
= "core_efficiency";
4618 core_attribute
= "core_type";
4619 KMP_AFF_WARNING(affinity
, AffIgnoringNotAvailable
, env_var
,
4621 __kmp_hw_get_catalog_string(KMP_HW_CORE
, /*plural=*/true))
4624 // If core attributes did not work, or none were specified,
4625 // then make OS Id mask table using typical incremental way.
4626 if (!affinity
.os_id_masks
) {
4627 __kmp_create_os_id_masks(&numUnique
, affinity
, [](int idx
) {
4628 KMP_ASSERT(idx
>= -1);
4632 if (affinity
.gran_levels
== 0) {
4633 KMP_DEBUG_ASSERT((int)numUnique
== __kmp_avail_proc
);
4636 switch (affinity
.type
) {
4638 case affinity_explicit
:
4639 KMP_DEBUG_ASSERT(affinity
.proclist
!= NULL
);
4640 if (is_hidden_helper_affinity
||
4641 __kmp_nested_proc_bind
.bind_types
[0] == proc_bind_intel
) {
4642 __kmp_affinity_process_proclist(affinity
);
4644 __kmp_affinity_process_placelist(affinity
);
4646 if (affinity
.num_masks
== 0) {
4647 KMP_AFF_WARNING(affinity
, AffNoValidProcID
);
4648 affinity
.type
= affinity_none
;
4649 __kmp_create_affinity_none_places(affinity
);
4650 affinity
.flags
.initialized
= TRUE
;
4655 // The other affinity types rely on sorting the hardware threads according to
4656 // some permutation of the machine topology tree. Set affinity.compact
4657 // and affinity.offset appropriately, then jump to a common code
4658 // fragment to do the sort and create the array of affinity masks.
4659 case affinity_logical
:
4660 affinity
.compact
= 0;
4661 if (affinity
.offset
) {
4663 __kmp_nThreadsPerCore
* affinity
.offset
% __kmp_avail_proc
;
4667 case affinity_physical
:
4668 if (__kmp_nThreadsPerCore
> 1) {
4669 affinity
.compact
= 1;
4670 if (affinity
.compact
>= depth
) {
4671 affinity
.compact
= 0;
4674 affinity
.compact
= 0;
4676 if (affinity
.offset
) {
4678 __kmp_nThreadsPerCore
* affinity
.offset
% __kmp_avail_proc
;
4682 case affinity_scatter
:
4683 if (affinity
.compact
>= depth
) {
4684 affinity
.compact
= 0;
4686 affinity
.compact
= depth
- 1 - affinity
.compact
;
4690 case affinity_compact
:
4691 if (affinity
.compact
>= depth
) {
4692 affinity
.compact
= depth
- 1;
4696 case affinity_balanced
:
4697 if (depth
<= 1 || is_hidden_helper_affinity
) {
4698 KMP_AFF_WARNING(affinity
, AffBalancedNotAvail
, env_var
);
4699 affinity
.type
= affinity_none
;
4700 __kmp_create_affinity_none_places(affinity
);
4701 affinity
.flags
.initialized
= TRUE
;
4703 } else if (!__kmp_topology
->is_uniform()) {
4704 // Save the depth for further usage
4705 __kmp_aff_depth
= depth
;
4708 __kmp_affinity_find_core_level(__kmp_avail_proc
, depth
- 1);
4709 int ncores
= __kmp_affinity_compute_ncores(__kmp_avail_proc
, depth
- 1,
4711 int maxprocpercore
= __kmp_affinity_max_proc_per_core(
4712 __kmp_avail_proc
, depth
- 1, core_level
);
4714 int nproc
= ncores
* maxprocpercore
;
4715 if ((nproc
< 2) || (nproc
< __kmp_avail_proc
)) {
4716 KMP_AFF_WARNING(affinity
, AffBalancedNotAvail
, env_var
);
4717 affinity
.type
= affinity_none
;
4718 __kmp_create_affinity_none_places(affinity
);
4719 affinity
.flags
.initialized
= TRUE
;
4723 procarr
= (int *)__kmp_allocate(sizeof(int) * nproc
);
4724 for (int i
= 0; i
< nproc
; i
++) {
4730 for (int i
= 0; i
< __kmp_avail_proc
; i
++) {
4731 int proc
= __kmp_topology
->at(i
).os_id
;
4732 int core
= __kmp_affinity_find_core(i
, depth
- 1, core_level
);
4734 if (core
== lastcore
) {
4741 procarr
[core
* maxprocpercore
+ inlastcore
] = proc
;
4744 if (affinity
.compact
>= depth
) {
4745 affinity
.compact
= depth
- 1;
4749 // Allocate the gtid->affinity mask table.
4750 if (affinity
.flags
.dups
) {
4751 affinity
.num_masks
= __kmp_avail_proc
;
4753 affinity
.num_masks
= numUnique
;
4756 if ((__kmp_nested_proc_bind
.bind_types
[0] != proc_bind_intel
) &&
4757 (__kmp_affinity_num_places
> 0) &&
4758 ((unsigned)__kmp_affinity_num_places
< affinity
.num_masks
) &&
4759 !is_hidden_helper_affinity
) {
4760 affinity
.num_masks
= __kmp_affinity_num_places
;
4763 KMP_CPU_ALLOC_ARRAY(affinity
.masks
, affinity
.num_masks
);
4765 // Sort the topology table according to the current setting of
4766 // affinity.compact, then fill out affinity.masks.
4767 __kmp_topology
->sort_compact(affinity
);
4771 int num_hw_threads
= __kmp_topology
->get_num_hw_threads();
4772 kmp_full_mask_modifier_t full_mask
;
4773 for (i
= 0, j
= 0; i
< num_hw_threads
; i
++) {
4774 if ((!affinity
.flags
.dups
) && (!__kmp_topology
->at(i
).leader
)) {
4777 int osId
= __kmp_topology
->at(i
).os_id
;
4779 kmp_affin_mask_t
*src
= KMP_CPU_INDEX(affinity
.os_id_masks
, osId
);
4780 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX(affinity
.masks
, j
);
4781 KMP_ASSERT(KMP_CPU_ISSET(osId
, src
));
4782 KMP_CPU_COPY(dest
, src
);
4783 full_mask
.include(src
);
4784 if (++j
>= affinity
.num_masks
) {
4788 KMP_DEBUG_ASSERT(j
== affinity
.num_masks
);
4789 // See if the places list further restricts or changes the full mask
4790 if (full_mask
.restrict_to_mask() && affinity
.flags
.verbose
) {
4791 __kmp_topology
->print(env_var
);
4794 // Sort the topology back using ids
4795 __kmp_topology
->sort_ids();
4799 KMP_ASSERT2(0, "Unexpected affinity setting");
4801 __kmp_aux_affinity_initialize_other_data(affinity
);
4802 affinity
.flags
.initialized
= TRUE
;
4805 void __kmp_affinity_initialize(kmp_affinity_t
&affinity
) {
4806 // Much of the code above was written assuming that if a machine was not
4807 // affinity capable, then affinity type == affinity_none.
4808 // We now explicitly represent this as affinity type == affinity_disabled.
4809 // There are too many checks for affinity type == affinity_none in this code.
4810 // Instead of trying to change them all, check if
4811 // affinity type == affinity_disabled, and if so, slam it with affinity_none,
4812 // call the real initialization routine, then restore affinity type to
4813 // affinity_disabled.
4814 int disabled
= (affinity
.type
== affinity_disabled
);
4815 if (!KMP_AFFINITY_CAPABLE())
4816 KMP_ASSERT(disabled
);
4818 affinity
.type
= affinity_none
;
4819 __kmp_aux_affinity_initialize(affinity
);
4821 affinity
.type
= affinity_disabled
;
4824 void __kmp_affinity_uninitialize(void) {
4825 for (kmp_affinity_t
*affinity
: __kmp_affinities
) {
4826 if (affinity
->masks
!= NULL
)
4827 KMP_CPU_FREE_ARRAY(affinity
->masks
, affinity
->num_masks
);
4828 if (affinity
->os_id_masks
!= NULL
)
4829 KMP_CPU_FREE_ARRAY(affinity
->os_id_masks
, affinity
->num_os_id_masks
);
4830 if (affinity
->proclist
!= NULL
)
4831 __kmp_free(affinity
->proclist
);
4832 if (affinity
->ids
!= NULL
)
4833 __kmp_free(affinity
->ids
);
4834 if (affinity
->attrs
!= NULL
)
4835 __kmp_free(affinity
->attrs
);
4836 *affinity
= KMP_AFFINITY_INIT(affinity
->env_var
);
4838 if (__kmp_affin_origMask
!= NULL
) {
4839 if (KMP_AFFINITY_CAPABLE()) {
4840 __kmp_set_system_affinity(__kmp_affin_origMask
, FALSE
);
4842 KMP_CPU_FREE(__kmp_affin_origMask
);
4843 __kmp_affin_origMask
= NULL
;
4845 __kmp_affinity_num_places
= 0;
4846 if (procarr
!= NULL
) {
4847 __kmp_free(procarr
);
4850 if (__kmp_osid_to_hwthread_map
) {
4851 __kmp_free(__kmp_osid_to_hwthread_map
);
4852 __kmp_osid_to_hwthread_map
= NULL
;
4855 if (__kmp_hwloc_topology
!= NULL
) {
4856 hwloc_topology_destroy(__kmp_hwloc_topology
);
4857 __kmp_hwloc_topology
= NULL
;
4860 if (__kmp_hw_subset
) {
4861 kmp_hw_subset_t::deallocate(__kmp_hw_subset
);
4862 __kmp_hw_subset
= nullptr;
4864 if (__kmp_topology
) {
4865 kmp_topology_t::deallocate(__kmp_topology
);
4866 __kmp_topology
= nullptr;
4868 KMPAffinity::destroy_api();
4871 static void __kmp_select_mask_by_gtid(int gtid
, const kmp_affinity_t
*affinity
,
4872 int *place
, kmp_affin_mask_t
**mask
) {
4874 bool is_hidden_helper
= KMP_HIDDEN_HELPER_THREAD(gtid
);
4875 if (is_hidden_helper
)
4876 // The first gtid is the regular primary thread, the second gtid is the main
4877 // thread of hidden team which does not participate in task execution.
4878 mask_idx
= gtid
- 2;
4880 mask_idx
= __kmp_adjust_gtid_for_hidden_helpers(gtid
);
4881 KMP_DEBUG_ASSERT(affinity
->num_masks
> 0);
4882 *place
= (mask_idx
+ affinity
->offset
) % affinity
->num_masks
;
4883 *mask
= KMP_CPU_INDEX(affinity
->masks
, *place
);
4886 // This function initializes the per-thread data concerning affinity including
4887 // the mask and topology information
4888 void __kmp_affinity_set_init_mask(int gtid
, int isa_root
) {
4890 kmp_info_t
*th
= (kmp_info_t
*)TCR_SYNC_PTR(__kmp_threads
[gtid
]);
4892 // Set the thread topology information to default of unknown
4893 for (int id
= 0; id
< KMP_HW_LAST
; ++id
)
4894 th
->th
.th_topology_ids
.ids
[id
] = kmp_hw_thread_t::UNKNOWN_ID
;
4895 th
->th
.th_topology_attrs
= KMP_AFFINITY_ATTRS_UNKNOWN
;
4897 if (!KMP_AFFINITY_CAPABLE()) {
4901 if (th
->th
.th_affin_mask
== NULL
) {
4902 KMP_CPU_ALLOC(th
->th
.th_affin_mask
);
4904 KMP_CPU_ZERO(th
->th
.th_affin_mask
);
4907 // Copy the thread mask to the kmp_info_t structure. If
4908 // __kmp_affinity.type == affinity_none, copy the "full" mask, i.e.
4909 // one that has all of the OS proc ids set, or if
4910 // __kmp_affinity.flags.respect is set, then the full mask is the
4911 // same as the mask of the initialization thread.
4912 kmp_affin_mask_t
*mask
;
4914 const kmp_affinity_t
*affinity
;
4915 bool is_hidden_helper
= KMP_HIDDEN_HELPER_THREAD(gtid
);
4917 if (is_hidden_helper
)
4918 affinity
= &__kmp_hh_affinity
;
4920 affinity
= &__kmp_affinity
;
4922 if (KMP_AFFINITY_NON_PROC_BIND
|| is_hidden_helper
) {
4923 if ((affinity
->type
== affinity_none
) ||
4924 (affinity
->type
== affinity_balanced
) ||
4925 KMP_HIDDEN_HELPER_MAIN_THREAD(gtid
)) {
4926 #if KMP_GROUP_AFFINITY
4927 if (__kmp_num_proc_groups
> 1) {
4931 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
4933 mask
= __kmp_affin_fullMask
;
4935 __kmp_select_mask_by_gtid(gtid
, affinity
, &i
, &mask
);
4938 if (!isa_root
|| __kmp_nested_proc_bind
.bind_types
[0] == proc_bind_false
) {
4939 #if KMP_GROUP_AFFINITY
4940 if (__kmp_num_proc_groups
> 1) {
4944 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
4946 mask
= __kmp_affin_fullMask
;
4948 __kmp_select_mask_by_gtid(gtid
, affinity
, &i
, &mask
);
4952 th
->th
.th_current_place
= i
;
4953 if (isa_root
&& !is_hidden_helper
) {
4954 th
->th
.th_new_place
= i
;
4955 th
->th
.th_first_place
= 0;
4956 th
->th
.th_last_place
= affinity
->num_masks
- 1;
4957 } else if (KMP_AFFINITY_NON_PROC_BIND
) {
4958 // When using a Non-OMP_PROC_BIND affinity method,
4959 // set all threads' place-partition-var to the entire place list
4960 th
->th
.th_first_place
= 0;
4961 th
->th
.th_last_place
= affinity
->num_masks
- 1;
4963 // Copy topology information associated with the place
4965 th
->th
.th_topology_ids
= __kmp_affinity
.ids
[i
];
4966 th
->th
.th_topology_attrs
= __kmp_affinity
.attrs
[i
];
4969 if (i
== KMP_PLACE_ALL
) {
4970 KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to all places\n",
4973 KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to place %d\n",
4977 KMP_CPU_COPY(th
->th
.th_affin_mask
, mask
);
4980 void __kmp_affinity_bind_init_mask(int gtid
) {
4981 if (!KMP_AFFINITY_CAPABLE()) {
4984 kmp_info_t
*th
= (kmp_info_t
*)TCR_SYNC_PTR(__kmp_threads
[gtid
]);
4985 const kmp_affinity_t
*affinity
;
4986 const char *env_var
;
4987 bool is_hidden_helper
= KMP_HIDDEN_HELPER_THREAD(gtid
);
4989 if (is_hidden_helper
)
4990 affinity
= &__kmp_hh_affinity
;
4992 affinity
= &__kmp_affinity
;
4993 env_var
= __kmp_get_affinity_env_var(*affinity
, /*for_binding=*/true);
4994 /* to avoid duplicate printing (will be correctly printed on barrier) */
4995 if (affinity
->flags
.verbose
&& (affinity
->type
== affinity_none
||
4996 (th
->th
.th_current_place
!= KMP_PLACE_ALL
&&
4997 affinity
->type
!= affinity_balanced
)) &&
4998 !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid
)) {
4999 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5000 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5001 th
->th
.th_affin_mask
);
5002 KMP_INFORM(BoundToOSProcSet
, env_var
, (kmp_int32
)getpid(), __kmp_gettid(),
5007 // On Windows* OS, the process affinity mask might have changed. If the user
5008 // didn't request affinity and this call fails, just continue silently.
5010 if (affinity
->type
== affinity_none
) {
5011 __kmp_set_system_affinity(th
->th
.th_affin_mask
, FALSE
);
5014 __kmp_set_system_affinity(th
->th
.th_affin_mask
, TRUE
);
5017 void __kmp_affinity_bind_place(int gtid
) {
5018 // Hidden helper threads should not be affected by OMP_PLACES/OMP_PROC_BIND
5019 if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid
)) {
5023 kmp_info_t
*th
= (kmp_info_t
*)TCR_SYNC_PTR(__kmp_threads
[gtid
]);
5025 KA_TRACE(100, ("__kmp_affinity_bind_place: binding T#%d to place %d (current "
5027 gtid
, th
->th
.th_new_place
, th
->th
.th_current_place
));
5029 // Check that the new place is within this thread's partition.
5030 KMP_DEBUG_ASSERT(th
->th
.th_affin_mask
!= NULL
);
5031 KMP_ASSERT(th
->th
.th_new_place
>= 0);
5032 KMP_ASSERT((unsigned)th
->th
.th_new_place
<= __kmp_affinity
.num_masks
);
5033 if (th
->th
.th_first_place
<= th
->th
.th_last_place
) {
5034 KMP_ASSERT((th
->th
.th_new_place
>= th
->th
.th_first_place
) &&
5035 (th
->th
.th_new_place
<= th
->th
.th_last_place
));
5037 KMP_ASSERT((th
->th
.th_new_place
<= th
->th
.th_first_place
) ||
5038 (th
->th
.th_new_place
>= th
->th
.th_last_place
));
5041 // Copy the thread mask to the kmp_info_t structure,
5042 // and set this thread's affinity.
5043 kmp_affin_mask_t
*mask
=
5044 KMP_CPU_INDEX(__kmp_affinity
.masks
, th
->th
.th_new_place
);
5045 KMP_CPU_COPY(th
->th
.th_affin_mask
, mask
);
5046 th
->th
.th_current_place
= th
->th
.th_new_place
;
5048 if (__kmp_affinity
.flags
.verbose
) {
5049 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5050 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5051 th
->th
.th_affin_mask
);
5052 KMP_INFORM(BoundToOSProcSet
, "OMP_PROC_BIND", (kmp_int32
)getpid(),
5053 __kmp_gettid(), gtid
, buf
);
5055 __kmp_set_system_affinity(th
->th
.th_affin_mask
, TRUE
);
5058 int __kmp_aux_set_affinity(void **mask
) {
5063 if (!KMP_AFFINITY_CAPABLE()) {
5067 gtid
= __kmp_entry_gtid();
5070 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5071 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5072 (kmp_affin_mask_t
*)(*mask
));
5074 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
5078 if (__kmp_env_consistency_check
) {
5079 if ((mask
== NULL
) || (*mask
== NULL
)) {
5080 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5085 KMP_CPU_SET_ITERATE(proc
, ((kmp_affin_mask_t
*)(*mask
))) {
5086 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5087 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5089 if (!KMP_CPU_ISSET(proc
, (kmp_affin_mask_t
*)(*mask
))) {
5094 if (num_procs
== 0) {
5095 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5098 #if KMP_GROUP_AFFINITY
5099 if (__kmp_get_proc_group((kmp_affin_mask_t
*)(*mask
)) < 0) {
5100 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5102 #endif /* KMP_GROUP_AFFINITY */
5106 th
= __kmp_threads
[gtid
];
5107 KMP_DEBUG_ASSERT(th
->th
.th_affin_mask
!= NULL
);
5108 retval
= __kmp_set_system_affinity((kmp_affin_mask_t
*)(*mask
), FALSE
);
5110 KMP_CPU_COPY(th
->th
.th_affin_mask
, (kmp_affin_mask_t
*)(*mask
));
5113 th
->th
.th_current_place
= KMP_PLACE_UNDEFINED
;
5114 th
->th
.th_new_place
= KMP_PLACE_UNDEFINED
;
5115 th
->th
.th_first_place
= 0;
5116 th
->th
.th_last_place
= __kmp_affinity
.num_masks
- 1;
5118 // Turn off 4.0 affinity for the current tread at this parallel level.
5119 th
->th
.th_current_task
->td_icvs
.proc_bind
= proc_bind_false
;
5124 int __kmp_aux_get_affinity(void **mask
) {
5127 #if KMP_OS_WINDOWS || KMP_DEBUG
5130 if (!KMP_AFFINITY_CAPABLE()) {
5134 gtid
= __kmp_entry_gtid();
5135 #if KMP_OS_WINDOWS || KMP_DEBUG
5136 th
= __kmp_threads
[gtid
];
5138 (void)gtid
; // unused variable
5140 KMP_DEBUG_ASSERT(th
->th
.th_affin_mask
!= NULL
);
5144 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5145 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5146 th
->th
.th_affin_mask
);
5148 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid
,
5152 if (__kmp_env_consistency_check
) {
5153 if ((mask
== NULL
) || (*mask
== NULL
)) {
5154 KMP_FATAL(AffinityInvalidMask
, "kmp_get_affinity");
5160 retval
= __kmp_get_system_affinity((kmp_affin_mask_t
*)(*mask
), FALSE
);
5163 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5164 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5165 (kmp_affin_mask_t
*)(*mask
));
5167 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid
,
5175 KMP_CPU_COPY((kmp_affin_mask_t
*)(*mask
), th
->th
.th_affin_mask
);
5178 #endif /* KMP_OS_WINDOWS */
5181 int __kmp_aux_get_affinity_max_proc() {
5182 if (!KMP_AFFINITY_CAPABLE()) {
5185 #if KMP_GROUP_AFFINITY
5186 if (__kmp_num_proc_groups
> 1) {
5187 return (int)(__kmp_num_proc_groups
* sizeof(DWORD_PTR
) * CHAR_BIT
);
5193 int __kmp_aux_set_affinity_mask_proc(int proc
, void **mask
) {
5194 if (!KMP_AFFINITY_CAPABLE()) {
5200 int gtid
= __kmp_entry_gtid();
5201 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5202 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5203 (kmp_affin_mask_t
*)(*mask
));
5204 __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
5205 "affinity mask for thread %d = %s\n",
5209 if (__kmp_env_consistency_check
) {
5210 if ((mask
== NULL
) || (*mask
== NULL
)) {
5211 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity_mask_proc");
5215 if ((proc
< 0) || (proc
>= __kmp_aux_get_affinity_max_proc())) {
5218 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5222 KMP_CPU_SET(proc
, (kmp_affin_mask_t
*)(*mask
));
5226 int __kmp_aux_unset_affinity_mask_proc(int proc
, void **mask
) {
5227 if (!KMP_AFFINITY_CAPABLE()) {
5233 int gtid
= __kmp_entry_gtid();
5234 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5235 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5236 (kmp_affin_mask_t
*)(*mask
));
5237 __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
5238 "affinity mask for thread %d = %s\n",
5242 if (__kmp_env_consistency_check
) {
5243 if ((mask
== NULL
) || (*mask
== NULL
)) {
5244 KMP_FATAL(AffinityInvalidMask
, "kmp_unset_affinity_mask_proc");
5248 if ((proc
< 0) || (proc
>= __kmp_aux_get_affinity_max_proc())) {
5251 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5255 KMP_CPU_CLR(proc
, (kmp_affin_mask_t
*)(*mask
));
5259 int __kmp_aux_get_affinity_mask_proc(int proc
, void **mask
) {
5260 if (!KMP_AFFINITY_CAPABLE()) {
5266 int gtid
= __kmp_entry_gtid();
5267 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5268 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5269 (kmp_affin_mask_t
*)(*mask
));
5270 __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
5271 "affinity mask for thread %d = %s\n",
5275 if (__kmp_env_consistency_check
) {
5276 if ((mask
== NULL
) || (*mask
== NULL
)) {
5277 KMP_FATAL(AffinityInvalidMask
, "kmp_get_affinity_mask_proc");
5281 if ((proc
< 0) || (proc
>= __kmp_aux_get_affinity_max_proc())) {
5284 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5288 return KMP_CPU_ISSET(proc
, (kmp_affin_mask_t
*)(*mask
));
5291 #if KMP_WEIGHTED_ITERATIONS_SUPPORTED
5292 // Returns first os proc id with ATOM core
5293 int __kmp_get_first_osid_with_ecore(void) {
5295 int high
= __kmp_topology
->get_num_hw_threads() - 1;
5297 while (high
- low
> 1) {
5298 mid
= (high
+ low
) / 2;
5299 if (__kmp_topology
->at(mid
).attrs
.get_core_type() ==
5300 KMP_HW_CORE_TYPE_CORE
) {
5306 if (__kmp_topology
->at(mid
).attrs
.get_core_type() == KMP_HW_CORE_TYPE_ATOM
) {
5313 // Dynamic affinity settings - Affinity balanced
5314 void __kmp_balanced_affinity(kmp_info_t
*th
, int nthreads
) {
5315 KMP_DEBUG_ASSERT(th
);
5316 bool fine_gran
= true;
5317 int tid
= th
->th
.th_info
.ds
.ds_tid
;
5318 const char *env_var
= "KMP_AFFINITY";
5320 // Do not perform balanced affinity for the hidden helper threads
5321 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th
)))
5324 switch (__kmp_affinity
.gran
) {
5328 if (__kmp_nThreadsPerCore
> 1) {
5333 if (nCoresPerPkg
> 1) {
5341 if (__kmp_topology
->is_uniform()) {
5344 // Number of hyper threads per core in HT machine
5345 int __kmp_nth_per_core
= __kmp_avail_proc
/ __kmp_ncores
;
5347 int ncores
= __kmp_ncores
;
5348 if ((nPackages
> 1) && (__kmp_nth_per_core
<= 1)) {
5349 __kmp_nth_per_core
= __kmp_avail_proc
/ nPackages
;
5352 // How many threads will be bound to each core
5353 int chunk
= nthreads
/ ncores
;
5354 // How many cores will have an additional thread bound to it - "big cores"
5355 int big_cores
= nthreads
% ncores
;
5356 // Number of threads on the big cores
5357 int big_nth
= (chunk
+ 1) * big_cores
;
5358 if (tid
< big_nth
) {
5359 coreID
= tid
/ (chunk
+ 1);
5360 threadID
= (tid
% (chunk
+ 1)) % __kmp_nth_per_core
;
5361 } else { // tid >= big_nth
5362 coreID
= (tid
- big_cores
) / chunk
;
5363 threadID
= ((tid
- big_cores
) % chunk
) % __kmp_nth_per_core
;
5365 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5366 "Illegal set affinity operation when not capable");
5368 kmp_affin_mask_t
*mask
= th
->th
.th_affin_mask
;
5373 __kmp_topology
->at(coreID
* __kmp_nth_per_core
+ threadID
).os_id
;
5374 KMP_CPU_SET(osID
, mask
);
5376 for (int i
= 0; i
< __kmp_nth_per_core
; i
++) {
5378 osID
= __kmp_topology
->at(coreID
* __kmp_nth_per_core
+ i
).os_id
;
5379 KMP_CPU_SET(osID
, mask
);
5382 if (__kmp_affinity
.flags
.verbose
) {
5383 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5384 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
, mask
);
5385 KMP_INFORM(BoundToOSProcSet
, env_var
, (kmp_int32
)getpid(), __kmp_gettid(),
5388 __kmp_affinity_get_thread_topology_info(th
);
5389 __kmp_set_system_affinity(mask
, TRUE
);
5390 } else { // Non-uniform topology
5392 kmp_affin_mask_t
*mask
= th
->th
.th_affin_mask
;
5396 __kmp_affinity_find_core_level(__kmp_avail_proc
, __kmp_aff_depth
- 1);
5397 int ncores
= __kmp_affinity_compute_ncores(__kmp_avail_proc
,
5398 __kmp_aff_depth
- 1, core_level
);
5399 int nth_per_core
= __kmp_affinity_max_proc_per_core(
5400 __kmp_avail_proc
, __kmp_aff_depth
- 1, core_level
);
5402 // For performance gain consider the special case nthreads ==
5404 if (nthreads
== __kmp_avail_proc
) {
5406 int osID
= __kmp_topology
->at(tid
).os_id
;
5407 KMP_CPU_SET(osID
, mask
);
5410 __kmp_affinity_find_core(tid
, __kmp_aff_depth
- 1, core_level
);
5411 for (int i
= 0; i
< __kmp_avail_proc
; i
++) {
5412 int osID
= __kmp_topology
->at(i
).os_id
;
5413 if (__kmp_affinity_find_core(i
, __kmp_aff_depth
- 1, core_level
) ==
5415 KMP_CPU_SET(osID
, mask
);
5419 } else if (nthreads
<= ncores
) {
5422 for (int i
= 0; i
< ncores
; i
++) {
5423 // Check if this core from procarr[] is in the mask
5425 for (int j
= 0; j
< nth_per_core
; j
++) {
5426 if (procarr
[i
* nth_per_core
+ j
] != -1) {
5433 for (int j
= 0; j
< nth_per_core
; j
++) {
5434 int osID
= procarr
[i
* nth_per_core
+ j
];
5436 KMP_CPU_SET(osID
, mask
);
5437 // For fine granularity it is enough to set the first available
5438 // osID for this core
5450 } else { // nthreads > ncores
5451 // Array to save the number of processors at each core
5452 int *nproc_at_core
= (int *)KMP_ALLOCA(sizeof(int) * ncores
);
5453 // Array to save the number of cores with "x" available processors;
5454 int *ncores_with_x_procs
=
5455 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core
+ 1));
5456 // Array to save the number of cores with # procs from x to nth_per_core
5457 int *ncores_with_x_to_max_procs
=
5458 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core
+ 1));
5460 for (int i
= 0; i
<= nth_per_core
; i
++) {
5461 ncores_with_x_procs
[i
] = 0;
5462 ncores_with_x_to_max_procs
[i
] = 0;
5465 for (int i
= 0; i
< ncores
; i
++) {
5467 for (int j
= 0; j
< nth_per_core
; j
++) {
5468 if (procarr
[i
* nth_per_core
+ j
] != -1) {
5472 nproc_at_core
[i
] = cnt
;
5473 ncores_with_x_procs
[cnt
]++;
5476 for (int i
= 0; i
<= nth_per_core
; i
++) {
5477 for (int j
= i
; j
<= nth_per_core
; j
++) {
5478 ncores_with_x_to_max_procs
[i
] += ncores_with_x_procs
[j
];
5482 // Max number of processors
5483 int nproc
= nth_per_core
* ncores
;
5484 // An array to keep number of threads per each context
5485 int *newarr
= (int *)__kmp_allocate(sizeof(int) * nproc
);
5486 for (int i
= 0; i
< nproc
; i
++) {
5493 for (int j
= 1; j
<= nth_per_core
; j
++) {
5494 int cnt
= ncores_with_x_to_max_procs
[j
];
5495 for (int i
= 0; i
< ncores
; i
++) {
5496 // Skip the core with 0 processors
5497 if (nproc_at_core
[i
] == 0) {
5500 for (int k
= 0; k
< nth_per_core
; k
++) {
5501 if (procarr
[i
* nth_per_core
+ k
] != -1) {
5502 if (newarr
[i
* nth_per_core
+ k
] == 0) {
5503 newarr
[i
* nth_per_core
+ k
] = 1;
5509 newarr
[i
* nth_per_core
+ k
]++;
5517 if (cnt
== 0 || nth
== 0) {
5528 for (int i
= 0; i
< nproc
; i
++) {
5532 int osID
= procarr
[i
];
5533 KMP_CPU_SET(osID
, mask
);
5535 int coreID
= i
/ nth_per_core
;
5536 for (int ii
= 0; ii
< nth_per_core
; ii
++) {
5537 int osID
= procarr
[coreID
* nth_per_core
+ ii
];
5539 KMP_CPU_SET(osID
, mask
);
5549 if (__kmp_affinity
.flags
.verbose
) {
5550 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5551 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
, mask
);
5552 KMP_INFORM(BoundToOSProcSet
, env_var
, (kmp_int32
)getpid(), __kmp_gettid(),
5555 __kmp_affinity_get_thread_topology_info(th
);
5556 __kmp_set_system_affinity(mask
, TRUE
);
5560 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5561 // We don't need this entry for Windows because
5562 // there is GetProcessAffinityMask() api
5564 // The intended usage is indicated by these steps:
5565 // 1) The user gets the current affinity mask
5566 // 2) Then sets the affinity by calling this function
5567 // 3) Error check the return value
5568 // 4) Use non-OpenMP parallelization
5569 // 5) Reset the affinity to what was stored in step 1)
5574 kmp_set_thread_affinity_mask_initial()
5575 // the function returns 0 on success,
5576 // -1 if we cannot bind thread
5577 // >0 (errno) if an error happened during binding
5579 int gtid
= __kmp_get_gtid();
5581 // Do not touch non-omp threads
5582 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5583 "non-omp thread, returning\n"));
5586 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle
) {
5587 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5588 "affinity not initialized, returning\n"));
5591 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5592 "set full mask for thread %d\n",
5594 KMP_DEBUG_ASSERT(__kmp_affin_fullMask
!= NULL
);
5595 return __kmp_set_system_affinity(__kmp_affin_fullMask
, FALSE
);
5599 #endif // KMP_AFFINITY_SUPPORTED