2 * kmp_affinity.cpp -- affinity management
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
14 #include "kmp_affinity.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
31 // The machine topology
32 kmp_topology_t
*__kmp_topology
= nullptr;
33 // KMP_HW_SUBSET environment variable
34 kmp_hw_subset_t
*__kmp_hw_subset
= nullptr;
36 // Store the real or imagined machine hierarchy here
37 static hierarchy_info machine_hierarchy
;
39 void __kmp_cleanup_hierarchy() { machine_hierarchy
.fini(); }
41 #if KMP_AFFINITY_SUPPORTED
42 // Helper class to see if place lists further restrict the fullMask
43 class kmp_full_mask_modifier_t
{
44 kmp_affin_mask_t
*mask
;
47 kmp_full_mask_modifier_t() {
51 ~kmp_full_mask_modifier_t() {
55 void include(const kmp_affin_mask_t
*other
) { KMP_CPU_UNION(mask
, other
); }
56 // If the new full mask is different from the current full mask,
57 // then switch them. Returns true if full mask was affected, false otherwise.
58 bool restrict_to_mask() {
59 // See if the new mask further restricts or changes the full mask
60 if (KMP_CPU_EQUAL(__kmp_affin_fullMask
, mask
) || KMP_CPU_ISEMPTY(mask
))
62 return __kmp_topology
->restrict_to_mask(mask
);
66 static inline const char *
67 __kmp_get_affinity_env_var(const kmp_affinity_t
&affinity
,
68 bool for_binding
= false) {
69 if (affinity
.flags
.omp_places
) {
71 return "OMP_PROC_BIND";
74 return affinity
.env_var
;
76 #endif // KMP_AFFINITY_SUPPORTED
78 void __kmp_get_hierarchy(kmp_uint32 nproc
, kmp_bstate_t
*thr_bar
) {
80 // The test below is true if affinity is available, but set to "none". Need to
81 // init on first use of hierarchical barrier.
82 if (TCR_1(machine_hierarchy
.uninitialized
))
83 machine_hierarchy
.init(nproc
);
85 // Adjust the hierarchy in case num threads exceeds original
86 if (nproc
> machine_hierarchy
.base_num_threads
)
87 machine_hierarchy
.resize(nproc
);
89 depth
= machine_hierarchy
.depth
;
90 KMP_DEBUG_ASSERT(depth
> 0);
92 thr_bar
->depth
= depth
;
93 __kmp_type_convert(machine_hierarchy
.numPerLevel
[0] - 1,
94 &(thr_bar
->base_leaf_kids
));
95 thr_bar
->skip_per_level
= machine_hierarchy
.skipPerLevel
;
98 static int nCoresPerPkg
, nPackages
;
99 static int __kmp_nThreadsPerCore
;
100 #ifndef KMP_DFLT_NTH_CORES
101 static int __kmp_ncores
;
104 const char *__kmp_hw_get_catalog_string(kmp_hw_t type
, bool plural
) {
107 return ((plural
) ? KMP_I18N_STR(Sockets
) : KMP_I18N_STR(Socket
));
109 return ((plural
) ? KMP_I18N_STR(Dice
) : KMP_I18N_STR(Die
));
111 return ((plural
) ? KMP_I18N_STR(Modules
) : KMP_I18N_STR(Module
));
113 return ((plural
) ? KMP_I18N_STR(Tiles
) : KMP_I18N_STR(Tile
));
115 return ((plural
) ? KMP_I18N_STR(NumaDomains
) : KMP_I18N_STR(NumaDomain
));
117 return ((plural
) ? KMP_I18N_STR(L3Caches
) : KMP_I18N_STR(L3Cache
));
119 return ((plural
) ? KMP_I18N_STR(L2Caches
) : KMP_I18N_STR(L2Cache
));
121 return ((plural
) ? KMP_I18N_STR(L1Caches
) : KMP_I18N_STR(L1Cache
));
123 return ((plural
) ? KMP_I18N_STR(LLCaches
) : KMP_I18N_STR(LLCache
));
125 return ((plural
) ? KMP_I18N_STR(Cores
) : KMP_I18N_STR(Core
));
127 return ((plural
) ? KMP_I18N_STR(Threads
) : KMP_I18N_STR(Thread
));
128 case KMP_HW_PROC_GROUP
:
129 return ((plural
) ? KMP_I18N_STR(ProcGroups
) : KMP_I18N_STR(ProcGroup
));
132 return KMP_I18N_STR(Unknown
);
134 KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration");
135 KMP_BUILTIN_UNREACHABLE
;
138 const char *__kmp_hw_get_keyword(kmp_hw_t type
, bool plural
) {
141 return ((plural
) ? "sockets" : "socket");
143 return ((plural
) ? "dice" : "die");
145 return ((plural
) ? "modules" : "module");
147 return ((plural
) ? "tiles" : "tile");
149 return ((plural
) ? "numa_domains" : "numa_domain");
151 return ((plural
) ? "l3_caches" : "l3_cache");
153 return ((plural
) ? "l2_caches" : "l2_cache");
155 return ((plural
) ? "l1_caches" : "l1_cache");
157 return ((plural
) ? "ll_caches" : "ll_cache");
159 return ((plural
) ? "cores" : "core");
161 return ((plural
) ? "threads" : "thread");
162 case KMP_HW_PROC_GROUP
:
163 return ((plural
) ? "proc_groups" : "proc_group");
166 return ((plural
) ? "unknowns" : "unknown");
168 KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration");
169 KMP_BUILTIN_UNREACHABLE
;
172 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type
) {
174 case KMP_HW_CORE_TYPE_UNKNOWN
:
175 case KMP_HW_MAX_NUM_CORE_TYPES
:
177 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
178 case KMP_HW_CORE_TYPE_ATOM
:
179 return "Intel Atom(R) processor";
180 case KMP_HW_CORE_TYPE_CORE
:
181 return "Intel(R) Core(TM) processor";
184 KMP_ASSERT2(false, "Unhandled kmp_hw_core_type_t enumeration");
185 KMP_BUILTIN_UNREACHABLE
;
188 #if KMP_AFFINITY_SUPPORTED
189 // If affinity is supported, check the affinity
190 // verbose and warning flags before printing warning
191 #define KMP_AFF_WARNING(s, ...) \
192 if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
193 KMP_WARNING(__VA_ARGS__); \
196 #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
199 ////////////////////////////////////////////////////////////////////////////////
200 // kmp_hw_thread_t methods
201 int kmp_hw_thread_t::compare_ids(const void *a
, const void *b
) {
202 const kmp_hw_thread_t
*ahwthread
= (const kmp_hw_thread_t
*)a
;
203 const kmp_hw_thread_t
*bhwthread
= (const kmp_hw_thread_t
*)b
;
204 int depth
= __kmp_topology
->get_depth();
205 for (int level
= 0; level
< depth
; ++level
) {
206 // Reverse sort (higher efficiencies earlier in list) cores by core
207 // efficiency if available.
208 if (__kmp_is_hybrid_cpu() &&
209 __kmp_topology
->get_type(level
) == KMP_HW_CORE
&&
210 ahwthread
->attrs
.is_core_eff_valid() &&
211 bhwthread
->attrs
.is_core_eff_valid()) {
212 if (ahwthread
->attrs
.get_core_eff() < bhwthread
->attrs
.get_core_eff())
214 if (ahwthread
->attrs
.get_core_eff() > bhwthread
->attrs
.get_core_eff())
217 if (ahwthread
->ids
[level
] == bhwthread
->ids
[level
])
219 // If the hardware id is unknown for this level, then place hardware thread
220 // further down in the sorted list as it should take last priority
221 if (ahwthread
->ids
[level
] == UNKNOWN_ID
)
223 else if (bhwthread
->ids
[level
] == UNKNOWN_ID
)
225 else if (ahwthread
->ids
[level
] < bhwthread
->ids
[level
])
227 else if (ahwthread
->ids
[level
] > bhwthread
->ids
[level
])
230 if (ahwthread
->os_id
< bhwthread
->os_id
)
232 else if (ahwthread
->os_id
> bhwthread
->os_id
)
237 #if KMP_AFFINITY_SUPPORTED
238 int kmp_hw_thread_t::compare_compact(const void *a
, const void *b
) {
240 const kmp_hw_thread_t
*aa
= (const kmp_hw_thread_t
*)a
;
241 const kmp_hw_thread_t
*bb
= (const kmp_hw_thread_t
*)b
;
242 int depth
= __kmp_topology
->get_depth();
243 int compact
= __kmp_topology
->compact
;
244 KMP_DEBUG_ASSERT(compact
>= 0);
245 KMP_DEBUG_ASSERT(compact
<= depth
);
246 for (i
= 0; i
< compact
; i
++) {
247 int j
= depth
- i
- 1;
248 if (aa
->sub_ids
[j
] < bb
->sub_ids
[j
])
250 if (aa
->sub_ids
[j
] > bb
->sub_ids
[j
])
253 for (; i
< depth
; i
++) {
255 if (aa
->sub_ids
[j
] < bb
->sub_ids
[j
])
257 if (aa
->sub_ids
[j
] > bb
->sub_ids
[j
])
264 void kmp_hw_thread_t::print() const {
265 int depth
= __kmp_topology
->get_depth();
266 printf("%4d ", os_id
);
267 for (int i
= 0; i
< depth
; ++i
) {
268 printf("%4d (%d) ", ids
[i
], sub_ids
[i
]);
271 if (attrs
.is_core_type_valid())
272 printf(" (%s)", __kmp_hw_get_core_type_string(attrs
.get_core_type()));
273 if (attrs
.is_core_eff_valid())
274 printf(" (eff=%d)", attrs
.get_core_eff());
281 ////////////////////////////////////////////////////////////////////////////////
282 // kmp_topology_t methods
284 // Add a layer to the topology based on the ids. Assume the topology
285 // is perfectly nested (i.e., so no object has more than one parent)
286 void kmp_topology_t::insert_layer(kmp_hw_t type
, const int *ids
) {
287 // Figure out where the layer should go by comparing the ids of the current
288 // layers with the new ids
290 int previous_id
= kmp_hw_thread_t::UNKNOWN_ID
;
291 int previous_new_id
= kmp_hw_thread_t::UNKNOWN_ID
;
293 // Start from the highest layer and work down to find target layer
294 // If new layer is equal to another layer then put the new layer above
295 for (target_layer
= 0; target_layer
< depth
; ++target_layer
) {
296 bool layers_equal
= true;
297 bool strictly_above_target_layer
= false;
298 for (int i
= 0; i
< num_hw_threads
; ++i
) {
299 int id
= hw_threads
[i
].ids
[target_layer
];
301 if (id
!= previous_id
&& new_id
== previous_new_id
) {
302 // Found the layer we are strictly above
303 strictly_above_target_layer
= true;
304 layers_equal
= false;
306 } else if (id
== previous_id
&& new_id
!= previous_new_id
) {
307 // Found a layer we are below. Move to next layer and check.
308 layers_equal
= false;
312 previous_new_id
= new_id
;
314 if (strictly_above_target_layer
|| layers_equal
)
318 // Found the layer we are above. Now move everything to accommodate the new
319 // layer. And put the new ids and type into the topology.
320 for (int i
= depth
- 1, j
= depth
; i
>= target_layer
; --i
, --j
)
322 types
[target_layer
] = type
;
323 for (int k
= 0; k
< num_hw_threads
; ++k
) {
324 for (int i
= depth
- 1, j
= depth
; i
>= target_layer
; --i
, --j
)
325 hw_threads
[k
].ids
[j
] = hw_threads
[k
].ids
[i
];
326 hw_threads
[k
].ids
[target_layer
] = ids
[k
];
328 equivalent
[type
] = type
;
332 #if KMP_GROUP_AFFINITY
333 // Insert the Windows Processor Group structure into the topology
334 void kmp_topology_t::_insert_windows_proc_groups() {
335 // Do not insert the processor group structure for a single group
336 if (__kmp_num_proc_groups
== 1)
338 kmp_affin_mask_t
*mask
;
339 int *ids
= (int *)__kmp_allocate(sizeof(int) * num_hw_threads
);
341 for (int i
= 0; i
< num_hw_threads
; ++i
) {
343 KMP_CPU_SET(hw_threads
[i
].os_id
, mask
);
344 ids
[i
] = __kmp_get_proc_group(mask
);
347 insert_layer(KMP_HW_PROC_GROUP
, ids
);
350 // sort topology after adding proc groups
351 __kmp_topology
->sort_ids();
355 // Remove layers that don't add information to the topology.
356 // This is done by having the layer take on the id = UNKNOWN_ID (-1)
357 void kmp_topology_t::_remove_radix1_layers() {
358 int preference
[KMP_HW_LAST
];
359 int top_index1
, top_index2
;
360 // Set up preference associative array
361 preference
[KMP_HW_SOCKET
] = 110;
362 preference
[KMP_HW_PROC_GROUP
] = 100;
363 preference
[KMP_HW_CORE
] = 95;
364 preference
[KMP_HW_THREAD
] = 90;
365 preference
[KMP_HW_NUMA
] = 85;
366 preference
[KMP_HW_DIE
] = 80;
367 preference
[KMP_HW_TILE
] = 75;
368 preference
[KMP_HW_MODULE
] = 73;
369 preference
[KMP_HW_L3
] = 70;
370 preference
[KMP_HW_L2
] = 65;
371 preference
[KMP_HW_L1
] = 60;
372 preference
[KMP_HW_LLC
] = 5;
375 while (top_index1
< depth
- 1 && top_index2
< depth
) {
376 kmp_hw_t type1
= types
[top_index1
];
377 kmp_hw_t type2
= types
[top_index2
];
378 KMP_ASSERT_VALID_HW_TYPE(type1
);
379 KMP_ASSERT_VALID_HW_TYPE(type2
);
380 // Do not allow the three main topology levels (sockets, cores, threads) to
382 if ((type1
== KMP_HW_THREAD
|| type1
== KMP_HW_CORE
||
383 type1
== KMP_HW_SOCKET
) &&
384 (type2
== KMP_HW_THREAD
|| type2
== KMP_HW_CORE
||
385 type2
== KMP_HW_SOCKET
)) {
386 top_index1
= top_index2
++;
390 bool all_same
= true;
391 int id1
= hw_threads
[0].ids
[top_index1
];
392 int id2
= hw_threads
[0].ids
[top_index2
];
393 int pref1
= preference
[type1
];
394 int pref2
= preference
[type2
];
395 for (int hwidx
= 1; hwidx
< num_hw_threads
; ++hwidx
) {
396 if (hw_threads
[hwidx
].ids
[top_index1
] == id1
&&
397 hw_threads
[hwidx
].ids
[top_index2
] != id2
) {
401 if (hw_threads
[hwidx
].ids
[top_index2
] != id2
)
403 id1
= hw_threads
[hwidx
].ids
[top_index1
];
404 id2
= hw_threads
[hwidx
].ids
[top_index2
];
407 // Select the layer to remove based on preference
408 kmp_hw_t remove_type
, keep_type
;
409 int remove_layer
, remove_layer_ids
;
412 remove_layer
= remove_layer_ids
= top_index2
;
416 remove_layer
= remove_layer_ids
= top_index1
;
419 // If all the indexes for the second (deeper) layer are the same.
420 // e.g., all are zero, then make sure to keep the first layer's ids
422 remove_layer_ids
= top_index2
;
423 // Remove radix one type by setting the equivalence, removing the id from
424 // the hw threads and removing the layer from types and depth
425 set_equivalent_type(remove_type
, keep_type
);
426 for (int idx
= 0; idx
< num_hw_threads
; ++idx
) {
427 kmp_hw_thread_t
&hw_thread
= hw_threads
[idx
];
428 for (int d
= remove_layer_ids
; d
< depth
- 1; ++d
)
429 hw_thread
.ids
[d
] = hw_thread
.ids
[d
+ 1];
431 for (int idx
= remove_layer
; idx
< depth
- 1; ++idx
)
432 types
[idx
] = types
[idx
+ 1];
435 top_index1
= top_index2
++;
438 KMP_ASSERT(depth
> 0);
441 void kmp_topology_t::_set_last_level_cache() {
442 if (get_equivalent_type(KMP_HW_L3
) != KMP_HW_UNKNOWN
)
443 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L3
);
444 else if (get_equivalent_type(KMP_HW_L2
) != KMP_HW_UNKNOWN
)
445 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L2
);
446 #if KMP_MIC_SUPPORTED
447 else if (__kmp_mic_type
== mic3
) {
448 if (get_equivalent_type(KMP_HW_L2
) != KMP_HW_UNKNOWN
)
449 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L2
);
450 else if (get_equivalent_type(KMP_HW_TILE
) != KMP_HW_UNKNOWN
)
451 set_equivalent_type(KMP_HW_LLC
, KMP_HW_TILE
);
452 // L2/Tile wasn't detected so just say L1
454 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L1
);
457 else if (get_equivalent_type(KMP_HW_L1
) != KMP_HW_UNKNOWN
)
458 set_equivalent_type(KMP_HW_LLC
, KMP_HW_L1
);
459 // Fallback is to set last level cache to socket or core
460 if (get_equivalent_type(KMP_HW_LLC
) == KMP_HW_UNKNOWN
) {
461 if (get_equivalent_type(KMP_HW_SOCKET
) != KMP_HW_UNKNOWN
)
462 set_equivalent_type(KMP_HW_LLC
, KMP_HW_SOCKET
);
463 else if (get_equivalent_type(KMP_HW_CORE
) != KMP_HW_UNKNOWN
)
464 set_equivalent_type(KMP_HW_LLC
, KMP_HW_CORE
);
466 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC
) != KMP_HW_UNKNOWN
);
469 // Gather the count of each topology layer and the ratio
470 void kmp_topology_t::_gather_enumeration_information() {
471 int previous_id
[KMP_HW_LAST
];
472 int max
[KMP_HW_LAST
];
474 for (int i
= 0; i
< depth
; ++i
) {
475 previous_id
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
480 int core_level
= get_level(KMP_HW_CORE
);
481 for (int i
= 0; i
< num_hw_threads
; ++i
) {
482 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
483 for (int layer
= 0; layer
< depth
; ++layer
) {
484 int id
= hw_thread
.ids
[layer
];
485 if (id
!= previous_id
[layer
]) {
486 // Add an additional increment to each count
487 for (int l
= layer
; l
< depth
; ++l
) {
488 if (hw_thread
.ids
[l
] != kmp_hw_thread_t::UNKNOWN_ID
)
491 // Keep track of topology layer ratio statistics
492 if (hw_thread
.ids
[layer
] != kmp_hw_thread_t::UNKNOWN_ID
)
494 for (int l
= layer
+ 1; l
< depth
; ++l
) {
495 if (max
[l
] > ratio
[l
])
499 // Figure out the number of different core types
500 // and efficiencies for hybrid CPUs
501 if (__kmp_is_hybrid_cpu() && core_level
>= 0 && layer
<= core_level
) {
502 if (hw_thread
.attrs
.is_core_eff_valid() &&
503 hw_thread
.attrs
.core_eff
>= num_core_efficiencies
) {
504 // Because efficiencies can range from 0 to max efficiency - 1,
505 // the number of efficiencies is max efficiency + 1
506 num_core_efficiencies
= hw_thread
.attrs
.core_eff
+ 1;
508 if (hw_thread
.attrs
.is_core_type_valid()) {
510 for (int j
= 0; j
< num_core_types
; ++j
) {
511 if (hw_thread
.attrs
.get_core_type() == core_types
[j
]) {
517 KMP_ASSERT(num_core_types
< KMP_HW_MAX_NUM_CORE_TYPES
);
518 core_types
[num_core_types
++] = hw_thread
.attrs
.get_core_type();
525 for (int layer
= 0; layer
< depth
; ++layer
) {
526 previous_id
[layer
] = hw_thread
.ids
[layer
];
529 for (int layer
= 0; layer
< depth
; ++layer
) {
530 if (max
[layer
] > ratio
[layer
])
531 ratio
[layer
] = max
[layer
];
535 int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t
&attr
,
537 bool find_all
) const {
538 int current
, current_max
;
539 int previous_id
[KMP_HW_LAST
];
540 for (int i
= 0; i
< depth
; ++i
)
541 previous_id
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
542 int core_level
= get_level(KMP_HW_CORE
);
545 KMP_ASSERT(above_level
< core_level
);
548 for (int i
= 0; i
< num_hw_threads
; ++i
) {
549 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
550 if (!find_all
&& hw_thread
.ids
[above_level
] != previous_id
[above_level
]) {
551 if (current
> current_max
)
552 current_max
= current
;
553 current
= hw_thread
.attrs
.contains(attr
);
555 for (int level
= above_level
+ 1; level
<= core_level
; ++level
) {
556 if (hw_thread
.ids
[level
] != previous_id
[level
]) {
557 if (hw_thread
.attrs
.contains(attr
))
563 for (int level
= 0; level
< depth
; ++level
)
564 previous_id
[level
] = hw_thread
.ids
[level
];
566 if (current
> current_max
)
567 current_max
= current
;
571 // Find out if the topology is uniform
572 void kmp_topology_t::_discover_uniformity() {
574 for (int level
= 0; level
< depth
; ++level
)
576 flags
.uniform
= (num
== count
[depth
- 1]);
579 // Set all the sub_ids for each hardware thread
580 void kmp_topology_t::_set_sub_ids() {
581 int previous_id
[KMP_HW_LAST
];
582 int sub_id
[KMP_HW_LAST
];
584 for (int i
= 0; i
< depth
; ++i
) {
588 for (int i
= 0; i
< num_hw_threads
; ++i
) {
589 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
591 for (int j
= 0; j
< depth
; ++j
) {
592 if (hw_thread
.ids
[j
] != previous_id
[j
]) {
594 for (int k
= j
+ 1; k
< depth
; ++k
) {
601 for (int j
= 0; j
< depth
; ++j
) {
602 previous_id
[j
] = hw_thread
.ids
[j
];
604 // Set the sub_ids field
605 for (int j
= 0; j
< depth
; ++j
) {
606 hw_thread
.sub_ids
[j
] = sub_id
[j
];
611 void kmp_topology_t::_set_globals() {
612 // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores
613 int core_level
, thread_level
, package_level
;
614 package_level
= get_level(KMP_HW_SOCKET
);
615 #if KMP_GROUP_AFFINITY
616 if (package_level
== -1)
617 package_level
= get_level(KMP_HW_PROC_GROUP
);
619 core_level
= get_level(KMP_HW_CORE
);
620 thread_level
= get_level(KMP_HW_THREAD
);
622 KMP_ASSERT(core_level
!= -1);
623 KMP_ASSERT(thread_level
!= -1);
625 __kmp_nThreadsPerCore
= calculate_ratio(thread_level
, core_level
);
626 if (package_level
!= -1) {
627 nCoresPerPkg
= calculate_ratio(core_level
, package_level
);
628 nPackages
= get_count(package_level
);
631 nCoresPerPkg
= get_count(core_level
);
634 #ifndef KMP_DFLT_NTH_CORES
635 __kmp_ncores
= get_count(core_level
);
639 kmp_topology_t
*kmp_topology_t::allocate(int nproc
, int ndepth
,
640 const kmp_hw_t
*types
) {
641 kmp_topology_t
*retval
;
642 // Allocate all data in one large allocation
643 size_t size
= sizeof(kmp_topology_t
) + sizeof(kmp_hw_thread_t
) * nproc
+
644 sizeof(int) * (size_t)KMP_HW_LAST
* 3;
645 char *bytes
= (char *)__kmp_allocate(size
);
646 retval
= (kmp_topology_t
*)bytes
;
648 retval
->hw_threads
= (kmp_hw_thread_t
*)(bytes
+ sizeof(kmp_topology_t
));
650 retval
->hw_threads
= nullptr;
652 retval
->num_hw_threads
= nproc
;
653 retval
->depth
= ndepth
;
655 (int *)(bytes
+ sizeof(kmp_topology_t
) + sizeof(kmp_hw_thread_t
) * nproc
);
656 retval
->types
= (kmp_hw_t
*)arr
;
657 retval
->ratio
= arr
+ (size_t)KMP_HW_LAST
;
658 retval
->count
= arr
+ 2 * (size_t)KMP_HW_LAST
;
659 retval
->num_core_efficiencies
= 0;
660 retval
->num_core_types
= 0;
662 for (int i
= 0; i
< KMP_HW_MAX_NUM_CORE_TYPES
; ++i
)
663 retval
->core_types
[i
] = KMP_HW_CORE_TYPE_UNKNOWN
;
664 KMP_FOREACH_HW_TYPE(type
) { retval
->equivalent
[type
] = KMP_HW_UNKNOWN
; }
665 for (int i
= 0; i
< ndepth
; ++i
) {
666 retval
->types
[i
] = types
[i
];
667 retval
->equivalent
[types
[i
]] = types
[i
];
672 void kmp_topology_t::deallocate(kmp_topology_t
*topology
) {
674 __kmp_free(topology
);
677 bool kmp_topology_t::check_ids() const {
678 // Assume ids have been sorted
679 if (num_hw_threads
== 0)
681 for (int i
= 1; i
< num_hw_threads
; ++i
) {
682 kmp_hw_thread_t
¤t_thread
= hw_threads
[i
];
683 kmp_hw_thread_t
&previous_thread
= hw_threads
[i
- 1];
685 for (int j
= 0; j
< depth
; ++j
) {
686 if (previous_thread
.ids
[j
] != current_thread
.ids
[j
]) {
698 void kmp_topology_t::dump() const {
699 printf("***********************\n");
700 printf("*** __kmp_topology: ***\n");
701 printf("***********************\n");
702 printf("* depth: %d\n", depth
);
705 for (int i
= 0; i
< depth
; ++i
)
706 printf("%15s ", __kmp_hw_get_keyword(types
[i
]));
710 for (int i
= 0; i
< depth
; ++i
) {
711 printf("%15d ", ratio
[i
]);
716 for (int i
= 0; i
< depth
; ++i
) {
717 printf("%15d ", count
[i
]);
721 printf("* num_core_eff: %d\n", num_core_efficiencies
);
722 printf("* num_core_types: %d\n", num_core_types
);
723 printf("* core_types: ");
724 for (int i
= 0; i
< num_core_types
; ++i
)
725 printf("%3d ", core_types
[i
]);
728 printf("* equivalent map:\n");
729 KMP_FOREACH_HW_TYPE(i
) {
730 const char *key
= __kmp_hw_get_keyword(i
);
731 const char *value
= __kmp_hw_get_keyword(equivalent
[i
]);
732 printf("%-15s -> %-15s\n", key
, value
);
735 printf("* uniform: %s\n", (is_uniform() ? "Yes" : "No"));
737 printf("* num_hw_threads: %d\n", num_hw_threads
);
738 printf("* hw_threads:\n");
739 for (int i
= 0; i
< num_hw_threads
; ++i
) {
740 hw_threads
[i
].print();
742 printf("***********************\n");
745 void kmp_topology_t::print(const char *env_var
) const {
747 int print_types_depth
;
748 __kmp_str_buf_init(&buf
);
749 kmp_hw_t print_types
[KMP_HW_LAST
+ 2];
751 // Num Available Threads
752 if (num_hw_threads
) {
753 KMP_INFORM(AvailableOSProc
, env_var
, num_hw_threads
);
755 KMP_INFORM(AvailableOSProc
, env_var
, __kmp_xproc
);
760 KMP_INFORM(Uniform
, env_var
);
762 KMP_INFORM(NonUniform
, env_var
);
766 KMP_FOREACH_HW_TYPE(type
) {
767 kmp_hw_t eq_type
= equivalent
[type
];
768 if (eq_type
!= KMP_HW_UNKNOWN
&& eq_type
!= type
) {
769 KMP_INFORM(AffEqualTopologyTypes
, env_var
,
770 __kmp_hw_get_catalog_string(type
),
771 __kmp_hw_get_catalog_string(eq_type
));
776 KMP_ASSERT(depth
> 0 && depth
<= (int)KMP_HW_LAST
);
777 // Create a print types array that always guarantees printing
778 // the core and thread level
779 print_types_depth
= 0;
780 for (int level
= 0; level
< depth
; ++level
)
781 print_types
[print_types_depth
++] = types
[level
];
782 if (equivalent
[KMP_HW_CORE
] != KMP_HW_CORE
) {
783 // Force in the core level for quick topology
784 if (print_types
[print_types_depth
- 1] == KMP_HW_THREAD
) {
785 // Force core before thread e.g., 1 socket X 2 threads/socket
786 // becomes 1 socket X 1 core/socket X 2 threads/socket
787 print_types
[print_types_depth
- 1] = KMP_HW_CORE
;
788 print_types
[print_types_depth
++] = KMP_HW_THREAD
;
790 print_types
[print_types_depth
++] = KMP_HW_CORE
;
793 // Always put threads at very end of quick topology
794 if (equivalent
[KMP_HW_THREAD
] != KMP_HW_THREAD
)
795 print_types
[print_types_depth
++] = KMP_HW_THREAD
;
797 __kmp_str_buf_clear(&buf
);
798 kmp_hw_t numerator_type
;
799 kmp_hw_t denominator_type
= KMP_HW_UNKNOWN
;
800 int core_level
= get_level(KMP_HW_CORE
);
801 int ncores
= get_count(core_level
);
803 for (int plevel
= 0, level
= 0; plevel
< print_types_depth
; ++plevel
) {
806 numerator_type
= print_types
[plevel
];
807 KMP_ASSERT_VALID_HW_TYPE(numerator_type
);
808 if (equivalent
[numerator_type
] != numerator_type
)
811 c
= get_ratio(level
++);
814 __kmp_str_buf_print(&buf
, "%d %s", c
,
815 __kmp_hw_get_catalog_string(numerator_type
, plural
));
817 __kmp_str_buf_print(&buf
, " x %d %s/%s", c
,
818 __kmp_hw_get_catalog_string(numerator_type
, plural
),
819 __kmp_hw_get_catalog_string(denominator_type
));
821 denominator_type
= numerator_type
;
823 KMP_INFORM(TopologyGeneric
, env_var
, buf
.str
, ncores
);
825 // Hybrid topology information
826 if (__kmp_is_hybrid_cpu()) {
827 for (int i
= 0; i
< num_core_types
; ++i
) {
828 kmp_hw_core_type_t core_type
= core_types
[i
];
831 attr
.set_core_type(core_type
);
832 int ncores
= get_ncores_with_attr(attr
);
834 KMP_INFORM(TopologyHybrid
, env_var
, ncores
,
835 __kmp_hw_get_core_type_string(core_type
));
836 KMP_ASSERT(num_core_efficiencies
<= KMP_HW_MAX_NUM_CORE_EFFS
)
837 for (int eff
= 0; eff
< num_core_efficiencies
; ++eff
) {
838 attr
.set_core_eff(eff
);
839 int ncores_with_eff
= get_ncores_with_attr(attr
);
840 if (ncores_with_eff
> 0) {
841 KMP_INFORM(TopologyHybridCoreEff
, env_var
, ncores_with_eff
, eff
);
848 if (num_hw_threads
<= 0) {
849 __kmp_str_buf_free(&buf
);
853 // Full OS proc to hardware thread map
854 KMP_INFORM(OSProcToPhysicalThreadMap
, env_var
);
855 for (int i
= 0; i
< num_hw_threads
; i
++) {
856 __kmp_str_buf_clear(&buf
);
857 for (int level
= 0; level
< depth
; ++level
) {
858 if (hw_threads
[i
].ids
[level
] == kmp_hw_thread_t::UNKNOWN_ID
)
860 kmp_hw_t type
= types
[level
];
861 __kmp_str_buf_print(&buf
, "%s ", __kmp_hw_get_catalog_string(type
));
862 __kmp_str_buf_print(&buf
, "%d ", hw_threads
[i
].ids
[level
]);
864 if (__kmp_is_hybrid_cpu())
867 __kmp_hw_get_core_type_string(hw_threads
[i
].attrs
.get_core_type()));
868 KMP_INFORM(OSProcMapToPack
, env_var
, hw_threads
[i
].os_id
, buf
.str
);
871 __kmp_str_buf_free(&buf
);
874 #if KMP_AFFINITY_SUPPORTED
875 void kmp_topology_t::set_granularity(kmp_affinity_t
&affinity
) const {
876 const char *env_var
= __kmp_get_affinity_env_var(affinity
);
877 // If requested hybrid CPU attributes for granularity (either OMP_PLACES or
878 // KMP_AFFINITY), but none exist, then reset granularity and have below method
879 // select a granularity and warn user.
880 if (!__kmp_is_hybrid_cpu()) {
881 if (affinity
.core_attr_gran
.valid
) {
882 // OMP_PLACES with cores:<attribute> but non-hybrid arch, use cores
885 affinity
, AffIgnoringNonHybrid
, env_var
,
886 __kmp_hw_get_catalog_string(KMP_HW_CORE
, /*plural=*/true));
887 affinity
.gran
= KMP_HW_CORE
;
888 affinity
.gran_levels
= -1;
889 affinity
.core_attr_gran
= KMP_AFFINITY_ATTRS_UNKNOWN
;
890 affinity
.flags
.core_types_gran
= affinity
.flags
.core_effs_gran
= 0;
891 } else if (affinity
.flags
.core_types_gran
||
892 affinity
.flags
.core_effs_gran
) {
893 // OMP_PLACES=core_types|core_effs but non-hybrid, use cores instead
894 if (affinity
.flags
.omp_places
) {
896 affinity
, AffIgnoringNonHybrid
, env_var
,
897 __kmp_hw_get_catalog_string(KMP_HW_CORE
, /*plural=*/true));
899 // KMP_AFFINITY=granularity=core_type|core_eff,...
900 KMP_AFF_WARNING(affinity
, AffGranularityBad
, env_var
,
901 "Intel(R) Hybrid Technology core attribute",
902 __kmp_hw_get_catalog_string(KMP_HW_CORE
));
904 affinity
.gran
= KMP_HW_CORE
;
905 affinity
.gran_levels
= -1;
906 affinity
.core_attr_gran
= KMP_AFFINITY_ATTRS_UNKNOWN
;
907 affinity
.flags
.core_types_gran
= affinity
.flags
.core_effs_gran
= 0;
910 // Set the number of affinity granularity levels
911 if (affinity
.gran_levels
< 0) {
912 kmp_hw_t gran_type
= get_equivalent_type(affinity
.gran
);
913 // Check if user's granularity request is valid
914 if (gran_type
== KMP_HW_UNKNOWN
) {
915 // First try core, then thread, then package
916 kmp_hw_t gran_types
[3] = {KMP_HW_CORE
, KMP_HW_THREAD
, KMP_HW_SOCKET
};
917 for (auto g
: gran_types
) {
918 if (get_equivalent_type(g
) != KMP_HW_UNKNOWN
) {
923 KMP_ASSERT(gran_type
!= KMP_HW_UNKNOWN
);
924 // Warn user what granularity setting will be used instead
925 KMP_AFF_WARNING(affinity
, AffGranularityBad
, env_var
,
926 __kmp_hw_get_catalog_string(affinity
.gran
),
927 __kmp_hw_get_catalog_string(gran_type
));
928 affinity
.gran
= gran_type
;
930 #if KMP_GROUP_AFFINITY
931 // If more than one processor group exists, and the level of
932 // granularity specified by the user is too coarse, then the
933 // granularity must be adjusted "down" to processor group affinity
934 // because threads can only exist within one processor group.
935 // For example, if a user sets granularity=socket and there are two
936 // processor groups that cover a socket, then the runtime must
937 // restrict the granularity down to the processor group level.
938 if (__kmp_num_proc_groups
> 1) {
939 int gran_depth
= get_level(gran_type
);
940 int proc_group_depth
= get_level(KMP_HW_PROC_GROUP
);
941 if (gran_depth
>= 0 && proc_group_depth
>= 0 &&
942 gran_depth
< proc_group_depth
) {
943 KMP_AFF_WARNING(affinity
, AffGranTooCoarseProcGroup
, env_var
,
944 __kmp_hw_get_catalog_string(affinity
.gran
));
945 affinity
.gran
= gran_type
= KMP_HW_PROC_GROUP
;
949 affinity
.gran_levels
= 0;
950 for (int i
= depth
- 1; i
>= 0 && get_type(i
) != gran_type
; --i
)
951 affinity
.gran_levels
++;
956 void kmp_topology_t::canonicalize() {
957 #if KMP_GROUP_AFFINITY
958 _insert_windows_proc_groups();
960 _remove_radix1_layers();
961 _gather_enumeration_information();
962 _discover_uniformity();
965 _set_last_level_cache();
967 #if KMP_MIC_SUPPORTED
968 // Manually Add L2 = Tile equivalence
969 if (__kmp_mic_type
== mic3
) {
970 if (get_level(KMP_HW_L2
) != -1)
971 set_equivalent_type(KMP_HW_TILE
, KMP_HW_L2
);
972 else if (get_level(KMP_HW_TILE
) != -1)
973 set_equivalent_type(KMP_HW_L2
, KMP_HW_TILE
);
977 // Perform post canonicalization checking
978 KMP_ASSERT(depth
> 0);
979 for (int level
= 0; level
< depth
; ++level
) {
980 // All counts, ratios, and types must be valid
981 KMP_ASSERT(count
[level
] > 0 && ratio
[level
] > 0);
982 KMP_ASSERT_VALID_HW_TYPE(types
[level
]);
983 // Detected types must point to themselves
984 KMP_ASSERT(equivalent
[types
[level
]] == types
[level
]);
988 // Canonicalize an explicit packages X cores/pkg X threads/core topology
989 void kmp_topology_t::canonicalize(int npackages
, int ncores_per_pkg
,
990 int nthreads_per_core
, int ncores
) {
993 KMP_FOREACH_HW_TYPE(i
) { equivalent
[i
] = KMP_HW_UNKNOWN
; }
994 for (int level
= 0; level
< depth
; ++level
) {
998 count
[0] = npackages
;
1000 count
[2] = __kmp_xproc
;
1001 ratio
[0] = npackages
;
1002 ratio
[1] = ncores_per_pkg
;
1003 ratio
[2] = nthreads_per_core
;
1004 equivalent
[KMP_HW_SOCKET
] = KMP_HW_SOCKET
;
1005 equivalent
[KMP_HW_CORE
] = KMP_HW_CORE
;
1006 equivalent
[KMP_HW_THREAD
] = KMP_HW_THREAD
;
1007 types
[0] = KMP_HW_SOCKET
;
1008 types
[1] = KMP_HW_CORE
;
1009 types
[2] = KMP_HW_THREAD
;
1010 //__kmp_avail_proc = __kmp_xproc;
1011 _discover_uniformity();
1014 #if KMP_AFFINITY_SUPPORTED
1015 static kmp_str_buf_t
*
1016 __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t
&attr
, kmp_str_buf_t
*buf
,
1018 __kmp_str_buf_init(buf
);
1019 if (attr
.is_core_type_valid())
1020 __kmp_str_buf_print(buf
, "%s %s",
1021 __kmp_hw_get_core_type_string(attr
.get_core_type()),
1022 __kmp_hw_get_catalog_string(KMP_HW_CORE
, plural
));
1024 __kmp_str_buf_print(buf
, "%s eff=%d",
1025 __kmp_hw_get_catalog_string(KMP_HW_CORE
, plural
),
1026 attr
.get_core_eff());
1030 bool kmp_topology_t::restrict_to_mask(const kmp_affin_mask_t
*mask
) {
1034 for (int i
= 0; i
< num_hw_threads
; ++i
) {
1035 int os_id
= hw_threads
[i
].os_id
;
1036 if (KMP_CPU_ISSET(os_id
, mask
)) {
1038 hw_threads
[new_index
] = hw_threads
[i
];
1041 KMP_CPU_CLR(os_id
, __kmp_affin_fullMask
);
1046 KMP_DEBUG_ASSERT(new_index
<= num_hw_threads
);
1047 affected
= (num_hw_threads
!= new_index
);
1048 num_hw_threads
= new_index
;
1050 // Post hardware subset canonicalization
1052 _gather_enumeration_information();
1053 _discover_uniformity();
1055 _set_last_level_cache();
1057 // Copy filtered full mask if topology has single processor group
1058 if (__kmp_num_proc_groups
<= 1)
1060 __kmp_affin_origMask
->copy(__kmp_affin_fullMask
);
1065 // Apply the KMP_HW_SUBSET envirable to the topology
1066 // Returns true if KMP_HW_SUBSET filtered any processors
1067 // otherwise, returns false
1068 bool kmp_topology_t::filter_hw_subset() {
1069 // If KMP_HW_SUBSET wasn't requested, then do nothing.
1070 if (!__kmp_hw_subset
)
1073 // First, sort the KMP_HW_SUBSET items by the machine topology
1074 __kmp_hw_subset
->sort();
1076 __kmp_hw_subset
->canonicalize(__kmp_topology
);
1078 // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology
1079 bool using_core_types
= false;
1080 bool using_core_effs
= false;
1081 bool is_absolute
= __kmp_hw_subset
->is_absolute();
1082 int hw_subset_depth
= __kmp_hw_subset
->get_depth();
1083 kmp_hw_t specified
[KMP_HW_LAST
];
1084 int *topology_levels
= (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth
);
1085 KMP_ASSERT(hw_subset_depth
> 0);
1086 KMP_FOREACH_HW_TYPE(i
) { specified
[i
] = KMP_HW_UNKNOWN
; }
1087 int core_level
= get_level(KMP_HW_CORE
);
1088 for (int i
= 0; i
< hw_subset_depth
; ++i
) {
1090 const kmp_hw_subset_t::item_t
&item
= __kmp_hw_subset
->at(i
);
1091 int num
= item
.num
[0];
1092 int offset
= item
.offset
[0];
1093 kmp_hw_t type
= item
.type
;
1094 kmp_hw_t equivalent_type
= equivalent
[type
];
1095 int level
= get_level(type
);
1096 topology_levels
[i
] = level
;
1098 // Check to see if current layer is in detected machine topology
1099 if (equivalent_type
!= KMP_HW_UNKNOWN
) {
1100 __kmp_hw_subset
->at(i
).type
= equivalent_type
;
1102 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetNotExistGeneric
,
1103 __kmp_hw_get_catalog_string(type
));
1107 // Check to see if current layer has already been
1108 // specified either directly or through an equivalent type
1109 if (specified
[equivalent_type
] != KMP_HW_UNKNOWN
) {
1110 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetEqvLayers
,
1111 __kmp_hw_get_catalog_string(type
),
1112 __kmp_hw_get_catalog_string(specified
[equivalent_type
]));
1115 specified
[equivalent_type
] = type
;
1117 // Check to see if each layer's num & offset parameters are valid
1118 max_count
= get_ratio(level
);
1120 if (max_count
< 0 ||
1121 (num
!= kmp_hw_subset_t::USE_ALL
&& num
+ offset
> max_count
)) {
1122 bool plural
= (num
> 1);
1123 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetManyGeneric
,
1124 __kmp_hw_get_catalog_string(type
, plural
));
1129 // Check to see if core attributes are consistent
1130 if (core_level
== level
) {
1131 // Determine which core attributes are specified
1132 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1133 if (item
.attr
[j
].is_core_type_valid())
1134 using_core_types
= true;
1135 if (item
.attr
[j
].is_core_eff_valid())
1136 using_core_effs
= true;
1139 // Check if using a single core attribute on non-hybrid arch.
1140 // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute.
1142 // Check if using multiple core attributes on non-hyrbid arch.
1143 // Ignore all of KMP_HW_SUBSET if this is the case.
1144 if ((using_core_effs
|| using_core_types
) && !__kmp_is_hybrid_cpu()) {
1145 if (item
.num_attrs
== 1) {
1146 if (using_core_effs
) {
1147 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIgnoringAttr
,
1150 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIgnoringAttr
,
1153 using_core_effs
= false;
1154 using_core_types
= false;
1156 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetAttrsNonHybrid
);
1161 // Check if using both core types and core efficiencies together
1162 if (using_core_types
&& using_core_effs
) {
1163 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIncompat
, "core_type",
1168 // Check that core efficiency values are valid
1169 if (using_core_effs
) {
1170 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1171 if (item
.attr
[j
].is_core_eff_valid()) {
1172 int core_eff
= item
.attr
[j
].get_core_eff();
1173 if (core_eff
< 0 || core_eff
>= num_core_efficiencies
) {
1175 __kmp_str_buf_init(&buf
);
1176 __kmp_str_buf_print(&buf
, "%d", item
.attr
[j
].get_core_eff());
1177 __kmp_msg(kmp_ms_warning
,
1178 KMP_MSG(AffHWSubsetAttrInvalid
, "efficiency", buf
.str
),
1179 KMP_HNT(ValidValuesRange
, 0, num_core_efficiencies
- 1),
1181 __kmp_str_buf_free(&buf
);
1188 // Check that the number of requested cores with attributes is valid
1189 if ((using_core_types
|| using_core_effs
) && !is_absolute
) {
1190 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1191 int num
= item
.num
[j
];
1192 int offset
= item
.offset
[j
];
1193 int level_above
= core_level
- 1;
1194 if (level_above
>= 0) {
1195 max_count
= get_ncores_with_attr_per(item
.attr
[j
], level_above
);
1196 if (max_count
<= 0 ||
1197 (num
!= kmp_hw_subset_t::USE_ALL
&& num
+ offset
> max_count
)) {
1199 __kmp_hw_get_catalog_core_string(item
.attr
[j
], &buf
, num
> 0);
1200 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetManyGeneric
, buf
.str
);
1201 __kmp_str_buf_free(&buf
);
1208 if ((using_core_types
|| using_core_effs
) && item
.num_attrs
> 1) {
1209 for (int j
= 0; j
< item
.num_attrs
; ++j
) {
1210 // Ambiguous use of specific core attribute + generic core
1211 // e.g., 4c & 3c:intel_core or 4c & 3c:eff1
1212 if (!item
.attr
[j
]) {
1213 kmp_hw_attr_t other_attr
;
1214 for (int k
= 0; k
< item
.num_attrs
; ++k
) {
1215 if (item
.attr
[k
] != item
.attr
[j
]) {
1216 other_attr
= item
.attr
[k
];
1221 __kmp_hw_get_catalog_core_string(other_attr
, &buf
, item
.num
[j
] > 0);
1222 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetIncompat
,
1223 __kmp_hw_get_catalog_string(KMP_HW_CORE
), buf
.str
);
1224 __kmp_str_buf_free(&buf
);
1227 // Allow specifying a specific core type or core eff exactly once
1228 for (int k
= 0; k
< j
; ++k
) {
1229 if (!item
.attr
[j
] || !item
.attr
[k
])
1231 if (item
.attr
[k
] == item
.attr
[j
]) {
1233 __kmp_hw_get_catalog_core_string(item
.attr
[j
], &buf
,
1235 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetAttrRepeat
, buf
.str
);
1236 __kmp_str_buf_free(&buf
);
1245 // For keeping track of sub_ids for an absolute KMP_HW_SUBSET
1246 // or core attributes (core type or efficiency)
1247 int prev_sub_ids
[KMP_HW_LAST
];
1248 int abs_sub_ids
[KMP_HW_LAST
];
1249 int core_eff_sub_ids
[KMP_HW_MAX_NUM_CORE_EFFS
];
1250 int core_type_sub_ids
[KMP_HW_MAX_NUM_CORE_TYPES
];
1251 for (size_t i
= 0; i
< KMP_HW_LAST
; ++i
) {
1252 abs_sub_ids
[i
] = -1;
1253 prev_sub_ids
[i
] = -1;
1255 for (size_t i
= 0; i
< KMP_HW_MAX_NUM_CORE_EFFS
; ++i
)
1256 core_eff_sub_ids
[i
] = -1;
1257 for (size_t i
= 0; i
< KMP_HW_MAX_NUM_CORE_TYPES
; ++i
)
1258 core_type_sub_ids
[i
] = -1;
1260 // Determine which hardware threads should be filtered.
1262 // Helpful to determine if a topology layer is targeted by an absolute subset
1263 auto is_targeted
= [&](int level
) {
1265 for (int i
= 0; i
< hw_subset_depth
; ++i
)
1266 if (topology_levels
[i
] == level
)
1270 // If not absolute KMP_HW_SUBSET, then every layer is seen as targeted
1274 // Helpful to index into core type sub Ids array
1275 auto get_core_type_index
= [](const kmp_hw_thread_t
&t
) {
1276 switch (t
.attrs
.get_core_type()) {
1277 case KMP_HW_CORE_TYPE_UNKNOWN
:
1278 case KMP_HW_MAX_NUM_CORE_TYPES
:
1280 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1281 case KMP_HW_CORE_TYPE_ATOM
:
1283 case KMP_HW_CORE_TYPE_CORE
:
1287 KMP_ASSERT2(false, "Unhandled kmp_hw_thread_t enumeration");
1288 KMP_BUILTIN_UNREACHABLE
;
1291 // Helpful to index into core efficiencies sub Ids array
1292 auto get_core_eff_index
= [](const kmp_hw_thread_t
&t
) {
1293 return t
.attrs
.get_core_eff();
1296 int num_filtered
= 0;
1297 kmp_affin_mask_t
*filtered_mask
;
1298 KMP_CPU_ALLOC(filtered_mask
);
1299 KMP_CPU_COPY(filtered_mask
, __kmp_affin_fullMask
);
1300 for (int i
= 0; i
< num_hw_threads
; ++i
) {
1301 kmp_hw_thread_t
&hw_thread
= hw_threads
[i
];
1303 // Figure out the absolute sub ids and core eff/type sub ids
1304 if (is_absolute
|| using_core_effs
|| using_core_types
) {
1305 for (int level
= 0; level
< get_depth(); ++level
) {
1306 if (hw_thread
.sub_ids
[level
] != prev_sub_ids
[level
]) {
1307 bool found_targeted
= false;
1308 for (int j
= level
; j
< get_depth(); ++j
) {
1309 bool targeted
= is_targeted(j
);
1310 if (!found_targeted
&& targeted
) {
1311 found_targeted
= true;
1313 if (j
== core_level
&& using_core_effs
)
1314 core_eff_sub_ids
[get_core_eff_index(hw_thread
)]++;
1315 if (j
== core_level
&& using_core_types
)
1316 core_type_sub_ids
[get_core_type_index(hw_thread
)]++;
1317 } else if (targeted
) {
1319 if (j
== core_level
&& using_core_effs
)
1320 core_eff_sub_ids
[get_core_eff_index(hw_thread
)] = 0;
1321 if (j
== core_level
&& using_core_types
)
1322 core_type_sub_ids
[get_core_type_index(hw_thread
)] = 0;
1328 for (int level
= 0; level
< get_depth(); ++level
)
1329 prev_sub_ids
[level
] = hw_thread
.sub_ids
[level
];
1332 // Check to see if this hardware thread should be filtered
1333 bool should_be_filtered
= false;
1334 for (int hw_subset_index
= 0; hw_subset_index
< hw_subset_depth
;
1335 ++hw_subset_index
) {
1336 const auto &hw_subset_item
= __kmp_hw_subset
->at(hw_subset_index
);
1337 int level
= topology_levels
[hw_subset_index
];
1340 if ((using_core_effs
|| using_core_types
) && level
== core_level
) {
1341 // Look for the core attribute in KMP_HW_SUBSET which corresponds
1342 // to this hardware thread's core attribute. Use this num,offset plus
1343 // the running sub_id for the particular core attribute of this hardware
1344 // thread to determine if the hardware thread should be filtered or not.
1346 kmp_hw_core_type_t core_type
= hw_thread
.attrs
.get_core_type();
1347 int core_eff
= hw_thread
.attrs
.get_core_eff();
1348 for (attr_idx
= 0; attr_idx
< hw_subset_item
.num_attrs
; ++attr_idx
) {
1349 if (using_core_types
&&
1350 hw_subset_item
.attr
[attr_idx
].get_core_type() == core_type
)
1352 if (using_core_effs
&&
1353 hw_subset_item
.attr
[attr_idx
].get_core_eff() == core_eff
)
1356 // This core attribute isn't in the KMP_HW_SUBSET so always filter it.
1357 if (attr_idx
== hw_subset_item
.num_attrs
) {
1358 should_be_filtered
= true;
1362 int num
= hw_subset_item
.num
[attr_idx
];
1363 int offset
= hw_subset_item
.offset
[attr_idx
];
1364 if (using_core_types
)
1365 sub_id
= core_type_sub_ids
[get_core_type_index(hw_thread
)];
1367 sub_id
= core_eff_sub_ids
[get_core_eff_index(hw_thread
)];
1368 if (sub_id
< offset
||
1369 (num
!= kmp_hw_subset_t::USE_ALL
&& sub_id
>= offset
+ num
)) {
1370 should_be_filtered
= true;
1375 int num
= hw_subset_item
.num
[0];
1376 int offset
= hw_subset_item
.offset
[0];
1378 sub_id
= abs_sub_ids
[level
];
1380 sub_id
= hw_thread
.sub_ids
[level
];
1381 if (hw_thread
.ids
[level
] == kmp_hw_thread_t::UNKNOWN_ID
||
1383 (num
!= kmp_hw_subset_t::USE_ALL
&& sub_id
>= offset
+ num
)) {
1384 should_be_filtered
= true;
1389 // Collect filtering information
1390 if (should_be_filtered
) {
1391 KMP_CPU_CLR(hw_thread
.os_id
, filtered_mask
);
1396 // One last check that we shouldn't allow filtering entire machine
1397 if (num_filtered
== num_hw_threads
) {
1398 KMP_AFF_WARNING(__kmp_affinity
, AffHWSubsetAllFiltered
);
1403 restrict_to_mask(filtered_mask
);
1407 bool kmp_topology_t::is_close(int hwt1
, int hwt2
,
1408 const kmp_affinity_t
&stgs
) const {
1409 int hw_level
= stgs
.gran_levels
;
1410 if (hw_level
>= depth
)
1413 const kmp_hw_thread_t
&t1
= hw_threads
[hwt1
];
1414 const kmp_hw_thread_t
&t2
= hw_threads
[hwt2
];
1415 if (stgs
.flags
.core_types_gran
)
1416 return t1
.attrs
.get_core_type() == t2
.attrs
.get_core_type();
1417 if (stgs
.flags
.core_effs_gran
)
1418 return t1
.attrs
.get_core_eff() == t2
.attrs
.get_core_eff();
1419 for (int i
= 0; i
< (depth
- hw_level
); ++i
) {
1420 if (t1
.ids
[i
] != t2
.ids
[i
])
1426 ////////////////////////////////////////////////////////////////////////////////
1428 bool KMPAffinity::picked_api
= false;
1430 void *KMPAffinity::Mask::operator new(size_t n
) { return __kmp_allocate(n
); }
1431 void *KMPAffinity::Mask::operator new[](size_t n
) { return __kmp_allocate(n
); }
1432 void KMPAffinity::Mask::operator delete(void *p
) { __kmp_free(p
); }
1433 void KMPAffinity::Mask::operator delete[](void *p
) { __kmp_free(p
); }
1434 void *KMPAffinity::operator new(size_t n
) { return __kmp_allocate(n
); }
1435 void KMPAffinity::operator delete(void *p
) { __kmp_free(p
); }
1437 void KMPAffinity::pick_api() {
1438 KMPAffinity
*affinity_dispatch
;
1442 // Only use Hwloc if affinity isn't explicitly disabled and
1443 // user requests Hwloc topology method
1444 if (__kmp_affinity_top_method
== affinity_top_method_hwloc
&&
1445 __kmp_affinity
.type
!= affinity_disabled
) {
1446 affinity_dispatch
= new KMPHwlocAffinity();
1450 affinity_dispatch
= new KMPNativeAffinity();
1452 __kmp_affinity_dispatch
= affinity_dispatch
;
1456 void KMPAffinity::destroy_api() {
1457 if (__kmp_affinity_dispatch
!= NULL
) {
1458 delete __kmp_affinity_dispatch
;
1459 __kmp_affinity_dispatch
= NULL
;
1464 #define KMP_ADVANCE_SCAN(scan) \
1465 while (*scan != '\0') { \
1469 // Print the affinity mask to the character array in a pretty format.
1470 // The format is a comma separated list of non-negative integers or integer
1471 // ranges: e.g., 1,2,3-5,7,9-15
1472 // The format can also be the string "{<empty>}" if no bits are set in mask
1473 char *__kmp_affinity_print_mask(char *buf
, int buf_len
,
1474 kmp_affin_mask_t
*mask
) {
1475 int start
= 0, finish
= 0, previous
= 0;
1478 KMP_ASSERT(buf_len
>= 40);
1481 char *end
= buf
+ buf_len
- 1;
1483 // Check for empty set.
1484 if (mask
->begin() == mask
->end()) {
1485 KMP_SNPRINTF(scan
, end
- scan
+ 1, "{<empty>}");
1486 KMP_ADVANCE_SCAN(scan
);
1487 KMP_ASSERT(scan
<= end
);
1492 start
= mask
->begin();
1495 // [start, previous] is inclusive range of contiguous bits in mask
1496 for (finish
= mask
->next(start
), previous
= start
;
1497 finish
== previous
+ 1 && finish
!= mask
->end();
1498 finish
= mask
->next(finish
)) {
1502 // The first range does not need a comma printed before it, but the rest
1503 // of the ranges do need a comma beforehand
1505 KMP_SNPRINTF(scan
, end
- scan
+ 1, "%s", ",");
1506 KMP_ADVANCE_SCAN(scan
);
1508 first_range
= false;
1510 // Range with three or more contiguous bits in the affinity mask
1511 if (previous
- start
> 1) {
1512 KMP_SNPRINTF(scan
, end
- scan
+ 1, "%u-%u", start
, previous
);
1514 // Range with one or two contiguous bits in the affinity mask
1515 KMP_SNPRINTF(scan
, end
- scan
+ 1, "%u", start
);
1516 KMP_ADVANCE_SCAN(scan
);
1517 if (previous
- start
> 0) {
1518 KMP_SNPRINTF(scan
, end
- scan
+ 1, ",%u", previous
);
1521 KMP_ADVANCE_SCAN(scan
);
1522 // Start over with new start point
1524 if (start
== mask
->end())
1526 // Check for overflow
1531 // Check for overflow
1532 KMP_ASSERT(scan
<= end
);
1535 #undef KMP_ADVANCE_SCAN
1537 // Print the affinity mask to the string buffer object in a pretty format
1538 // The format is a comma separated list of non-negative integers or integer
1539 // ranges: e.g., 1,2,3-5,7,9-15
1540 // The format can also be the string "{<empty>}" if no bits are set in mask
1541 kmp_str_buf_t
*__kmp_affinity_str_buf_mask(kmp_str_buf_t
*buf
,
1542 kmp_affin_mask_t
*mask
) {
1543 int start
= 0, finish
= 0, previous
= 0;
1548 __kmp_str_buf_clear(buf
);
1550 // Check for empty set.
1551 if (mask
->begin() == mask
->end()) {
1552 __kmp_str_buf_print(buf
, "%s", "{<empty>}");
1557 start
= mask
->begin();
1560 // [start, previous] is inclusive range of contiguous bits in mask
1561 for (finish
= mask
->next(start
), previous
= start
;
1562 finish
== previous
+ 1 && finish
!= mask
->end();
1563 finish
= mask
->next(finish
)) {
1567 // The first range does not need a comma printed before it, but the rest
1568 // of the ranges do need a comma beforehand
1570 __kmp_str_buf_print(buf
, "%s", ",");
1572 first_range
= false;
1574 // Range with three or more contiguous bits in the affinity mask
1575 if (previous
- start
> 1) {
1576 __kmp_str_buf_print(buf
, "%u-%u", start
, previous
);
1578 // Range with one or two contiguous bits in the affinity mask
1579 __kmp_str_buf_print(buf
, "%u", start
);
1580 if (previous
- start
> 0) {
1581 __kmp_str_buf_print(buf
, ",%u", previous
);
1584 // Start over with new start point
1586 if (start
== mask
->end())
1592 static kmp_affin_mask_t
*__kmp_parse_cpu_list(const char *path
) {
1593 kmp_affin_mask_t
*mask
;
1594 KMP_CPU_ALLOC(mask
);
1597 int n
, begin_cpu
, end_cpu
;
1598 kmp_safe_raii_file_t file
;
1599 auto skip_ws
= [](FILE *f
) {
1603 } while (isspace(c
));
1607 // File contains CSV of integer ranges representing the CPUs
1608 // e.g., 1,2,4-7,9,11-15
1609 int status
= file
.try_open(path
, "r");
1612 while (!feof(file
)) {
1614 n
= fscanf(file
, "%d", &begin_cpu
);
1618 int c
= fgetc(file
);
1619 if (c
== EOF
|| c
== ',') {
1621 end_cpu
= begin_cpu
;
1622 } else if (c
== '-') {
1625 n
= fscanf(file
, "%d", &end_cpu
);
1629 c
= fgetc(file
); // skip ','
1634 // Ensure a valid range of CPUs
1635 if (begin_cpu
< 0 || begin_cpu
>= __kmp_xproc
|| end_cpu
< 0 ||
1636 end_cpu
>= __kmp_xproc
|| begin_cpu
> end_cpu
) {
1639 // Insert [begin_cpu, end_cpu] into mask
1640 for (int cpu
= begin_cpu
; cpu
<= end_cpu
; ++cpu
) {
1641 KMP_CPU_SET(cpu
, mask
);
1648 // Return (possibly empty) affinity mask representing the offline CPUs
1649 // Caller must free the mask
1650 kmp_affin_mask_t
*__kmp_affinity_get_offline_cpus() {
1651 return __kmp_parse_cpu_list("/sys/devices/system/cpu/offline");
1654 // Return the number of available procs
1655 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t
*mask
) {
1659 #if KMP_GROUP_AFFINITY
1661 if (__kmp_num_proc_groups
> 1) {
1663 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount
!= NULL
);
1664 for (group
= 0; group
< __kmp_num_proc_groups
; group
++) {
1666 int num
= __kmp_GetActiveProcessorCount(group
);
1667 for (i
= 0; i
< num
; i
++) {
1668 KMP_CPU_SET(i
+ group
* (CHAR_BIT
* sizeof(DWORD_PTR
)), mask
);
1674 #endif /* KMP_GROUP_AFFINITY */
1678 kmp_affin_mask_t
*offline_cpus
= __kmp_affinity_get_offline_cpus();
1679 for (proc
= 0; proc
< __kmp_xproc
; proc
++) {
1680 // Skip offline CPUs
1681 if (KMP_CPU_ISSET(proc
, offline_cpus
))
1683 KMP_CPU_SET(proc
, mask
);
1686 KMP_CPU_FREE(offline_cpus
);
1692 // All of the __kmp_affinity_create_*_map() routines should allocate the
1693 // internal topology object and set the layer ids for it. Each routine
1694 // returns a boolean on whether it was successful at doing so.
1695 kmp_affin_mask_t
*__kmp_affin_fullMask
= NULL
;
1696 // Original mask is a subset of full mask in multiple processor groups topology
1697 kmp_affin_mask_t
*__kmp_affin_origMask
= NULL
;
1700 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj
) {
1701 #if HWLOC_API_VERSION >= 0x00020000
1702 return hwloc_obj_type_is_cache(obj
->type
);
1704 return obj
->type
== HWLOC_OBJ_CACHE
;
1708 // Returns KMP_HW_* type derived from HWLOC_* type
1709 static inline kmp_hw_t
__kmp_hwloc_type_2_topology_type(hwloc_obj_t obj
) {
1711 if (__kmp_hwloc_is_cache_type(obj
)) {
1712 if (obj
->attr
->cache
.type
== HWLOC_OBJ_CACHE_INSTRUCTION
)
1713 return KMP_HW_UNKNOWN
;
1714 switch (obj
->attr
->cache
.depth
) {
1718 #if KMP_MIC_SUPPORTED
1719 if (__kmp_mic_type
== mic3
) {
1727 return KMP_HW_UNKNOWN
;
1730 switch (obj
->type
) {
1731 case HWLOC_OBJ_PACKAGE
:
1732 return KMP_HW_SOCKET
;
1733 case HWLOC_OBJ_NUMANODE
:
1735 case HWLOC_OBJ_CORE
:
1738 return KMP_HW_THREAD
;
1739 case HWLOC_OBJ_GROUP
:
1740 #if HWLOC_API_VERSION >= 0x00020000
1741 if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_INTEL_DIE
)
1743 else if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_INTEL_TILE
)
1745 else if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_INTEL_MODULE
)
1746 return KMP_HW_MODULE
;
1747 else if (obj
->attr
->group
.kind
== HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP
)
1748 return KMP_HW_PROC_GROUP
;
1750 return KMP_HW_UNKNOWN
;
1751 #if HWLOC_API_VERSION >= 0x00020100
1756 return KMP_HW_UNKNOWN
;
1759 // Returns the number of objects of type 'type' below 'obj' within the topology
1760 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
1761 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
1763 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj
,
1764 hwloc_obj_type_t type
) {
1767 for (first
= hwloc_get_obj_below_by_type(__kmp_hwloc_topology
, obj
->type
,
1768 obj
->logical_index
, type
, 0);
1769 first
!= NULL
&& hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology
,
1770 obj
->type
, first
) == obj
;
1771 first
= hwloc_get_next_obj_by_type(__kmp_hwloc_topology
, first
->type
,
1778 // This gets the sub_id for a lower object under a higher object in the
1780 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t
, hwloc_obj_t higher
,
1781 hwloc_obj_t lower
) {
1783 hwloc_obj_type_t ltype
= lower
->type
;
1784 int lindex
= lower
->logical_index
- 1;
1786 // Get the previous lower object
1787 obj
= hwloc_get_obj_by_type(t
, ltype
, lindex
);
1788 while (obj
&& lindex
>= 0 &&
1789 hwloc_bitmap_isincluded(obj
->cpuset
, higher
->cpuset
)) {
1790 if (obj
->userdata
) {
1791 sub_id
= (int)(RCAST(kmp_intptr_t
, obj
->userdata
));
1796 obj
= hwloc_get_obj_by_type(t
, ltype
, lindex
);
1798 // store sub_id + 1 so that 0 is differed from NULL
1799 lower
->userdata
= RCAST(void *, sub_id
+ 1);
1803 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t
*const msg_id
) {
1805 int hw_thread_index
, sub_id
;
1807 hwloc_obj_t pu
, obj
, root
, prev
;
1808 kmp_hw_t types
[KMP_HW_LAST
];
1809 hwloc_obj_type_t hwloc_types
[KMP_HW_LAST
];
1811 hwloc_topology_t tp
= __kmp_hwloc_topology
;
1812 *msg_id
= kmp_i18n_null
;
1813 if (__kmp_affinity
.flags
.verbose
) {
1814 KMP_INFORM(AffUsingHwloc
, "KMP_AFFINITY");
1817 if (!KMP_AFFINITY_CAPABLE()) {
1818 // Hack to try and infer the machine topology using only the data
1819 // available from hwloc on the current thread, and __kmp_xproc.
1820 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
1821 // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
1822 hwloc_obj_t o
= hwloc_get_obj_by_type(tp
, HWLOC_OBJ_PACKAGE
, 0);
1824 nCoresPerPkg
= __kmp_hwloc_get_nobjs_under_obj(o
, HWLOC_OBJ_CORE
);
1826 nCoresPerPkg
= 1; // no PACKAGE found
1827 o
= hwloc_get_obj_by_type(tp
, HWLOC_OBJ_CORE
, 0);
1829 __kmp_nThreadsPerCore
= __kmp_hwloc_get_nobjs_under_obj(o
, HWLOC_OBJ_PU
);
1831 __kmp_nThreadsPerCore
= 1; // no CORE found
1832 if (__kmp_nThreadsPerCore
== 0)
1833 __kmp_nThreadsPerCore
= 1;
1834 __kmp_ncores
= __kmp_xproc
/ __kmp_nThreadsPerCore
;
1835 if (nCoresPerPkg
== 0)
1836 nCoresPerPkg
= 1; // to prevent possible division by 0
1837 nPackages
= (__kmp_xproc
+ nCoresPerPkg
- 1) / nCoresPerPkg
;
1841 #if HWLOC_API_VERSION >= 0x00020400
1842 // Handle multiple types of cores if they exist on the system
1843 int nr_cpu_kinds
= hwloc_cpukinds_get_nr(tp
, 0);
1845 typedef struct kmp_hwloc_cpukinds_info_t
{
1847 kmp_hw_core_type_t core_type
;
1848 hwloc_bitmap_t mask
;
1849 } kmp_hwloc_cpukinds_info_t
;
1850 kmp_hwloc_cpukinds_info_t
*cpukinds
= nullptr;
1852 if (nr_cpu_kinds
> 0) {
1854 struct hwloc_info_s
*infos
;
1855 cpukinds
= (kmp_hwloc_cpukinds_info_t
*)__kmp_allocate(
1856 sizeof(kmp_hwloc_cpukinds_info_t
) * nr_cpu_kinds
);
1857 for (unsigned idx
= 0; idx
< (unsigned)nr_cpu_kinds
; ++idx
) {
1858 cpukinds
[idx
].efficiency
= -1;
1859 cpukinds
[idx
].core_type
= KMP_HW_CORE_TYPE_UNKNOWN
;
1860 cpukinds
[idx
].mask
= hwloc_bitmap_alloc();
1861 if (hwloc_cpukinds_get_info(tp
, idx
, cpukinds
[idx
].mask
,
1862 &cpukinds
[idx
].efficiency
, &nr_infos
, &infos
,
1864 for (unsigned i
= 0; i
< nr_infos
; ++i
) {
1865 if (__kmp_str_match("CoreType", 8, infos
[i
].name
)) {
1866 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1867 if (__kmp_str_match("IntelAtom", 9, infos
[i
].value
)) {
1868 cpukinds
[idx
].core_type
= KMP_HW_CORE_TYPE_ATOM
;
1870 } else if (__kmp_str_match("IntelCore", 9, infos
[i
].value
)) {
1871 cpukinds
[idx
].core_type
= KMP_HW_CORE_TYPE_CORE
;
1882 root
= hwloc_get_root_obj(tp
);
1884 // Figure out the depth and types in the topology
1886 obj
= hwloc_get_pu_obj_by_os_index(tp
, __kmp_affin_fullMask
->begin());
1887 while (obj
&& obj
!= root
) {
1888 #if HWLOC_API_VERSION >= 0x00020000
1889 if (obj
->memory_arity
) {
1891 for (memory
= obj
->memory_first_child
; memory
;
1892 memory
= hwloc_get_next_child(tp
, obj
, memory
)) {
1893 if (memory
->type
== HWLOC_OBJ_NUMANODE
)
1896 if (memory
&& memory
->type
== HWLOC_OBJ_NUMANODE
) {
1897 types
[depth
] = KMP_HW_NUMA
;
1898 hwloc_types
[depth
] = memory
->type
;
1903 type
= __kmp_hwloc_type_2_topology_type(obj
);
1904 if (type
!= KMP_HW_UNKNOWN
) {
1905 types
[depth
] = type
;
1906 hwloc_types
[depth
] = obj
->type
;
1911 KMP_ASSERT(depth
> 0);
1913 // Get the order for the types correct
1914 for (int i
= 0, j
= depth
- 1; i
< j
; ++i
, --j
) {
1915 hwloc_obj_type_t hwloc_temp
= hwloc_types
[i
];
1916 kmp_hw_t temp
= types
[i
];
1917 types
[i
] = types
[j
];
1919 hwloc_types
[i
] = hwloc_types
[j
];
1920 hwloc_types
[j
] = hwloc_temp
;
1923 // Allocate the data structure to be returned.
1924 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
1926 hw_thread_index
= 0;
1928 while ((pu
= hwloc_get_next_obj_by_type(tp
, HWLOC_OBJ_PU
, pu
))) {
1929 int index
= depth
- 1;
1930 bool included
= KMP_CPU_ISSET(pu
->os_index
, __kmp_affin_fullMask
);
1931 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(hw_thread_index
);
1934 hw_thread
.ids
[index
] = pu
->logical_index
;
1935 hw_thread
.os_id
= pu
->os_index
;
1936 hw_thread
.original_idx
= hw_thread_index
;
1937 // If multiple core types, then set that attribute for the hardware thread
1938 #if HWLOC_API_VERSION >= 0x00020400
1940 int cpukind_index
= -1;
1941 for (int i
= 0; i
< nr_cpu_kinds
; ++i
) {
1942 if (hwloc_bitmap_isset(cpukinds
[i
].mask
, hw_thread
.os_id
)) {
1947 if (cpukind_index
>= 0) {
1948 hw_thread
.attrs
.set_core_type(cpukinds
[cpukind_index
].core_type
);
1949 hw_thread
.attrs
.set_core_eff(cpukinds
[cpukind_index
].efficiency
);
1957 while (obj
!= root
&& obj
!= NULL
) {
1959 #if HWLOC_API_VERSION >= 0x00020000
1960 // NUMA Nodes are handled differently since they are not within the
1961 // parent/child structure anymore. They are separate children
1962 // of obj (memory_first_child points to first memory child)
1963 if (obj
->memory_arity
) {
1965 for (memory
= obj
->memory_first_child
; memory
;
1966 memory
= hwloc_get_next_child(tp
, obj
, memory
)) {
1967 if (memory
->type
== HWLOC_OBJ_NUMANODE
)
1970 if (memory
&& memory
->type
== HWLOC_OBJ_NUMANODE
) {
1971 sub_id
= __kmp_hwloc_get_sub_id(tp
, memory
, prev
);
1973 hw_thread
.ids
[index
] = memory
->logical_index
;
1974 hw_thread
.ids
[index
+ 1] = sub_id
;
1981 type
= __kmp_hwloc_type_2_topology_type(obj
);
1982 if (type
!= KMP_HW_UNKNOWN
) {
1983 sub_id
= __kmp_hwloc_get_sub_id(tp
, obj
, prev
);
1985 hw_thread
.ids
[index
] = obj
->logical_index
;
1986 hw_thread
.ids
[index
+ 1] = sub_id
;
1996 #if HWLOC_API_VERSION >= 0x00020400
1997 // Free the core types information
1999 for (int idx
= 0; idx
< nr_cpu_kinds
; ++idx
)
2000 hwloc_bitmap_free(cpukinds
[idx
].mask
);
2001 __kmp_free(cpukinds
);
2004 __kmp_topology
->sort_ids();
2007 #endif // KMP_USE_HWLOC
2009 // If we don't know how to retrieve the machine's processor topology, or
2010 // encounter an error in doing so, this routine is called to form a "flat"
2011 // mapping of os thread id's <-> processor id's.
2012 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t
*const msg_id
) {
2013 *msg_id
= kmp_i18n_null
;
2015 kmp_hw_t types
[] = {KMP_HW_SOCKET
, KMP_HW_CORE
, KMP_HW_THREAD
};
2017 if (__kmp_affinity
.flags
.verbose
) {
2018 KMP_INFORM(UsingFlatOS
, "KMP_AFFINITY");
2021 // Even if __kmp_affinity.type == affinity_none, this routine might still
2022 // be called to set __kmp_ncores, as well as
2023 // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2024 if (!KMP_AFFINITY_CAPABLE()) {
2025 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2026 __kmp_ncores
= nPackages
= __kmp_xproc
;
2027 __kmp_nThreadsPerCore
= nCoresPerPkg
= 1;
2031 // When affinity is off, this routine will still be called to set
2032 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2033 // Make sure all these vars are set correctly, and return now if affinity is
2035 __kmp_ncores
= nPackages
= __kmp_avail_proc
;
2036 __kmp_nThreadsPerCore
= nCoresPerPkg
= 1;
2038 // Construct the data structure to be returned.
2039 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
2042 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
2043 // Skip this proc if it is not included in the machine model.
2044 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
2047 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(avail_ct
);
2049 hw_thread
.os_id
= i
;
2050 hw_thread
.original_idx
= avail_ct
;
2051 hw_thread
.ids
[0] = i
;
2052 hw_thread
.ids
[1] = 0;
2053 hw_thread
.ids
[2] = 0;
2056 if (__kmp_affinity
.flags
.verbose
) {
2057 KMP_INFORM(OSProcToPackage
, "KMP_AFFINITY");
2062 #if KMP_GROUP_AFFINITY
2063 // If multiple Windows* OS processor groups exist, we can create a 2-level
2064 // topology map with the groups at level 0 and the individual procs at level 1.
2065 // This facilitates letting the threads float among all procs in a group,
2066 // if granularity=group (the default when there are multiple groups).
2067 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t
*const msg_id
) {
2068 *msg_id
= kmp_i18n_null
;
2070 kmp_hw_t types
[] = {KMP_HW_PROC_GROUP
, KMP_HW_CORE
, KMP_HW_THREAD
};
2071 const static size_t BITS_PER_GROUP
= CHAR_BIT
* sizeof(DWORD_PTR
);
2073 if (__kmp_affinity
.flags
.verbose
) {
2074 KMP_INFORM(AffWindowsProcGroupMap
, "KMP_AFFINITY");
2077 // If we aren't affinity capable, then use flat topology
2078 if (!KMP_AFFINITY_CAPABLE()) {
2079 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2080 nPackages
= __kmp_num_proc_groups
;
2081 __kmp_nThreadsPerCore
= 1;
2082 __kmp_ncores
= __kmp_xproc
;
2083 nCoresPerPkg
= nPackages
/ __kmp_ncores
;
2087 // Construct the data structure to be returned.
2088 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
2091 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
2092 // Skip this proc if it is not included in the machine model.
2093 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
2096 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(avail_ct
);
2098 hw_thread
.os_id
= i
;
2099 hw_thread
.original_idx
= avail_ct
;
2100 hw_thread
.ids
[0] = i
/ BITS_PER_GROUP
;
2101 hw_thread
.ids
[1] = hw_thread
.ids
[2] = i
% BITS_PER_GROUP
;
2106 #endif /* KMP_GROUP_AFFINITY */
2108 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2110 template <kmp_uint32 LSB
, kmp_uint32 MSB
>
2111 static inline unsigned __kmp_extract_bits(kmp_uint32 v
) {
2112 const kmp_uint32 SHIFT_LEFT
= sizeof(kmp_uint32
) * 8 - 1 - MSB
;
2113 const kmp_uint32 SHIFT_RIGHT
= LSB
;
2114 kmp_uint32 retval
= v
;
2115 retval
<<= SHIFT_LEFT
;
2116 retval
>>= (SHIFT_LEFT
+ SHIFT_RIGHT
);
2120 static int __kmp_cpuid_mask_width(int count
) {
2123 while ((1 << r
) < count
)
2128 class apicThreadInfo
{
2130 unsigned osId
; // param to __kmp_affinity_bind_thread
2131 unsigned apicId
; // from cpuid after binding
2132 unsigned maxCoresPerPkg
; // ""
2133 unsigned maxThreadsPerPkg
; // ""
2134 unsigned pkgId
; // inferred from above values
2135 unsigned coreId
; // ""
2136 unsigned threadId
; // ""
2139 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a
,
2141 const apicThreadInfo
*aa
= (const apicThreadInfo
*)a
;
2142 const apicThreadInfo
*bb
= (const apicThreadInfo
*)b
;
2143 if (aa
->pkgId
< bb
->pkgId
)
2145 if (aa
->pkgId
> bb
->pkgId
)
2147 if (aa
->coreId
< bb
->coreId
)
2149 if (aa
->coreId
> bb
->coreId
)
2151 if (aa
->threadId
< bb
->threadId
)
2153 if (aa
->threadId
> bb
->threadId
)
2158 class cpuid_cache_info_t
{
2163 bool operator==(const info_t
&rhs
) const {
2164 return level
== rhs
.level
&& mask
== rhs
.mask
;
2166 bool operator!=(const info_t
&rhs
) const { return !operator==(rhs
); }
2168 cpuid_cache_info_t() : depth(0) {
2169 table
[MAX_CACHE_LEVEL
].level
= 0;
2170 table
[MAX_CACHE_LEVEL
].mask
= 0;
2172 size_t get_depth() const { return depth
; }
2173 info_t
&operator[](size_t index
) { return table
[index
]; }
2174 const info_t
&operator[](size_t index
) const { return table
[index
]; }
2175 bool operator==(const cpuid_cache_info_t
&rhs
) const {
2176 if (rhs
.depth
!= depth
)
2178 for (size_t i
= 0; i
< depth
; ++i
)
2179 if (table
[i
] != rhs
.table
[i
])
2183 bool operator!=(const cpuid_cache_info_t
&rhs
) const {
2184 return !operator==(rhs
);
2186 // Get cache information assocaited with L1, L2, L3 cache, etc.
2187 // If level does not exist, then return the "NULL" level (level 0)
2188 const info_t
&get_level(unsigned level
) const {
2189 for (size_t i
= 0; i
< depth
; ++i
) {
2190 if (table
[i
].level
== level
)
2193 return table
[MAX_CACHE_LEVEL
];
2196 static kmp_hw_t
get_topology_type(unsigned level
) {
2197 KMP_DEBUG_ASSERT(level
>= 1 && level
<= MAX_CACHE_LEVEL
);
2206 return KMP_HW_UNKNOWN
;
2208 void get_leaf4_levels() {
2210 while (depth
< MAX_CACHE_LEVEL
) {
2211 unsigned cache_type
, max_threads_sharing
;
2212 unsigned cache_level
, cache_mask_width
;
2214 __kmp_x86_cpuid(4, level
, &buf2
);
2215 cache_type
= __kmp_extract_bits
<0, 4>(buf2
.eax
);
2218 // Skip instruction caches
2219 if (cache_type
== 2) {
2223 max_threads_sharing
= __kmp_extract_bits
<14, 25>(buf2
.eax
) + 1;
2224 cache_mask_width
= __kmp_cpuid_mask_width(max_threads_sharing
);
2225 cache_level
= __kmp_extract_bits
<5, 7>(buf2
.eax
);
2226 table
[depth
].level
= cache_level
;
2227 table
[depth
].mask
= ((-1) << cache_mask_width
);
2232 static const int MAX_CACHE_LEVEL
= 3;
2236 info_t table
[MAX_CACHE_LEVEL
+ 1];
2239 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
2240 // an algorithm which cycles through the available os threads, setting
2241 // the current thread's affinity mask to that thread, and then retrieves
2242 // the Apic Id for each thread context using the cpuid instruction.
2243 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t
*const msg_id
) {
2245 *msg_id
= kmp_i18n_null
;
2247 if (__kmp_affinity
.flags
.verbose
) {
2248 KMP_INFORM(AffInfoStr
, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC
));
2251 // Check if cpuid leaf 4 is supported.
2252 __kmp_x86_cpuid(0, 0, &buf
);
2254 *msg_id
= kmp_i18n_str_NoLeaf4Support
;
2258 // The algorithm used starts by setting the affinity to each available thread
2259 // and retrieving info from the cpuid instruction, so if we are not capable of
2260 // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
2261 // need to do something else - use the defaults that we calculated from
2262 // issuing cpuid without binding to each proc.
2263 if (!KMP_AFFINITY_CAPABLE()) {
2264 // Hack to try and infer the machine topology using only the data
2265 // available from cpuid on the current thread, and __kmp_xproc.
2266 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2268 // Get an upper bound on the number of threads per package using cpuid(1).
2269 // On some OS/chps combinations where HT is supported by the chip but is
2270 // disabled, this value will be 2 on a single core chip. Usually, it will be
2271 // 2 if HT is enabled and 1 if HT is disabled.
2272 __kmp_x86_cpuid(1, 0, &buf
);
2273 int maxThreadsPerPkg
= (buf
.ebx
>> 16) & 0xff;
2274 if (maxThreadsPerPkg
== 0) {
2275 maxThreadsPerPkg
= 1;
2278 // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
2281 // The author of cpu_count.cpp treated this only an upper bound on the
2282 // number of cores, but I haven't seen any cases where it was greater than
2283 // the actual number of cores, so we will treat it as exact in this block of
2286 // First, we need to check if cpuid(4) is supported on this chip. To see if
2287 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
2289 __kmp_x86_cpuid(0, 0, &buf
);
2291 __kmp_x86_cpuid(4, 0, &buf
);
2292 nCoresPerPkg
= ((buf
.eax
>> 26) & 0x3f) + 1;
2297 // There is no way to reliably tell if HT is enabled without issuing the
2298 // cpuid instruction from every thread, can correlating the cpuid info, so
2299 // if the machine is not affinity capable, we assume that HT is off. We have
2300 // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
2301 // does not support HT.
2303 // - Older OSes are usually found on machines with older chips, which do not
2305 // - The performance penalty for mistakenly identifying a machine as HT when
2306 // it isn't (which results in blocktime being incorrectly set to 0) is
2307 // greater than the penalty when for mistakenly identifying a machine as
2308 // being 1 thread/core when it is really HT enabled (which results in
2309 // blocktime being incorrectly set to a positive value).
2310 __kmp_ncores
= __kmp_xproc
;
2311 nPackages
= (__kmp_xproc
+ nCoresPerPkg
- 1) / nCoresPerPkg
;
2312 __kmp_nThreadsPerCore
= 1;
2316 // From here on, we can assume that it is safe to call
2317 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2318 // __kmp_affinity.type = affinity_none.
2320 // Save the affinity mask for the current thread.
2321 kmp_affinity_raii_t previous_affinity
;
2323 // Run through each of the available contexts, binding the current thread
2324 // to it, and obtaining the pertinent information using the cpuid instr.
2326 // The relevant information is:
2327 // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
2328 // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
2329 // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
2330 // of this field determines the width of the core# + thread# fields in the
2331 // Apic Id. It is also an upper bound on the number of threads per
2332 // package, but it has been verified that situations happen were it is not
2333 // exact. In particular, on certain OS/chip combinations where Intel(R)
2334 // Hyper-Threading Technology is supported by the chip but has been
2335 // disabled, the value of this field will be 2 (for a single core chip).
2336 // On other OS/chip combinations supporting Intel(R) Hyper-Threading
2337 // Technology, the value of this field will be 1 when Intel(R)
2338 // Hyper-Threading Technology is disabled and 2 when it is enabled.
2339 // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
2340 // of this field (+1) determines the width of the core# field in the Apic
2341 // Id. The comments in "cpucount.cpp" say that this value is an upper
2342 // bound, but the IA-32 architecture manual says that it is exactly the
2343 // number of cores per package, and I haven't seen any case where it
2346 // From this information, deduce the package Id, core Id, and thread Id,
2347 // and set the corresponding fields in the apicThreadInfo struct.
2349 apicThreadInfo
*threadInfo
= (apicThreadInfo
*)__kmp_allocate(
2350 __kmp_avail_proc
* sizeof(apicThreadInfo
));
2351 unsigned nApics
= 0;
2352 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
2353 // Skip this proc if it is not included in the machine model.
2354 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
2357 KMP_DEBUG_ASSERT((int)nApics
< __kmp_avail_proc
);
2359 __kmp_affinity_dispatch
->bind_thread(i
);
2360 threadInfo
[nApics
].osId
= i
;
2362 // The apic id and max threads per pkg come from cpuid(1).
2363 __kmp_x86_cpuid(1, 0, &buf
);
2364 if (((buf
.edx
>> 9) & 1) == 0) {
2365 __kmp_free(threadInfo
);
2366 *msg_id
= kmp_i18n_str_ApicNotPresent
;
2369 threadInfo
[nApics
].apicId
= (buf
.ebx
>> 24) & 0xff;
2370 threadInfo
[nApics
].maxThreadsPerPkg
= (buf
.ebx
>> 16) & 0xff;
2371 if (threadInfo
[nApics
].maxThreadsPerPkg
== 0) {
2372 threadInfo
[nApics
].maxThreadsPerPkg
= 1;
2375 // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
2378 // First, we need to check if cpuid(4) is supported on this chip. To see if
2379 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
2381 __kmp_x86_cpuid(0, 0, &buf
);
2383 __kmp_x86_cpuid(4, 0, &buf
);
2384 threadInfo
[nApics
].maxCoresPerPkg
= ((buf
.eax
>> 26) & 0x3f) + 1;
2386 threadInfo
[nApics
].maxCoresPerPkg
= 1;
2389 // Infer the pkgId / coreId / threadId using only the info obtained locally.
2390 int widthCT
= __kmp_cpuid_mask_width(threadInfo
[nApics
].maxThreadsPerPkg
);
2391 threadInfo
[nApics
].pkgId
= threadInfo
[nApics
].apicId
>> widthCT
;
2393 int widthC
= __kmp_cpuid_mask_width(threadInfo
[nApics
].maxCoresPerPkg
);
2394 int widthT
= widthCT
- widthC
;
2396 // I've never seen this one happen, but I suppose it could, if the cpuid
2397 // instruction on a chip was really screwed up. Make sure to restore the
2398 // affinity mask before the tail call.
2399 __kmp_free(threadInfo
);
2400 *msg_id
= kmp_i18n_str_InvalidCpuidInfo
;
2404 int maskC
= (1 << widthC
) - 1;
2405 threadInfo
[nApics
].coreId
= (threadInfo
[nApics
].apicId
>> widthT
) & maskC
;
2407 int maskT
= (1 << widthT
) - 1;
2408 threadInfo
[nApics
].threadId
= threadInfo
[nApics
].apicId
& maskT
;
2413 // We've collected all the info we need.
2414 // Restore the old affinity mask for this thread.
2415 previous_affinity
.restore();
2417 // Sort the threadInfo table by physical Id.
2418 qsort(threadInfo
, nApics
, sizeof(*threadInfo
),
2419 __kmp_affinity_cmp_apicThreadInfo_phys_id
);
2421 // The table is now sorted by pkgId / coreId / threadId, but we really don't
2422 // know the radix of any of the fields. pkgId's may be sparsely assigned among
2423 // the chips on a system. Although coreId's are usually assigned
2424 // [0 .. coresPerPkg-1] and threadId's are usually assigned
2425 // [0..threadsPerCore-1], we don't want to make any such assumptions.
2427 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2428 // total # packages) are at this point - we want to determine that now. We
2429 // only have an upper bound on the first two figures.
2431 // We also perform a consistency check at this point: the values returned by
2432 // the cpuid instruction for any thread bound to a given package had better
2433 // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
2436 __kmp_nThreadsPerCore
= 1;
2437 unsigned nCores
= 1;
2439 unsigned pkgCt
= 1; // to determine radii
2440 unsigned lastPkgId
= threadInfo
[0].pkgId
;
2441 unsigned coreCt
= 1;
2442 unsigned lastCoreId
= threadInfo
[0].coreId
;
2443 unsigned threadCt
= 1;
2444 unsigned lastThreadId
= threadInfo
[0].threadId
;
2446 // intra-pkg consist checks
2447 unsigned prevMaxCoresPerPkg
= threadInfo
[0].maxCoresPerPkg
;
2448 unsigned prevMaxThreadsPerPkg
= threadInfo
[0].maxThreadsPerPkg
;
2450 for (i
= 1; i
< nApics
; i
++) {
2451 if (threadInfo
[i
].pkgId
!= lastPkgId
) {
2454 lastPkgId
= threadInfo
[i
].pkgId
;
2455 if ((int)coreCt
> nCoresPerPkg
)
2456 nCoresPerPkg
= coreCt
;
2458 lastCoreId
= threadInfo
[i
].coreId
;
2459 if ((int)threadCt
> __kmp_nThreadsPerCore
)
2460 __kmp_nThreadsPerCore
= threadCt
;
2462 lastThreadId
= threadInfo
[i
].threadId
;
2464 // This is a different package, so go on to the next iteration without
2465 // doing any consistency checks. Reset the consistency check vars, though.
2466 prevMaxCoresPerPkg
= threadInfo
[i
].maxCoresPerPkg
;
2467 prevMaxThreadsPerPkg
= threadInfo
[i
].maxThreadsPerPkg
;
2471 if (threadInfo
[i
].coreId
!= lastCoreId
) {
2474 lastCoreId
= threadInfo
[i
].coreId
;
2475 if ((int)threadCt
> __kmp_nThreadsPerCore
)
2476 __kmp_nThreadsPerCore
= threadCt
;
2478 lastThreadId
= threadInfo
[i
].threadId
;
2479 } else if (threadInfo
[i
].threadId
!= lastThreadId
) {
2481 lastThreadId
= threadInfo
[i
].threadId
;
2483 __kmp_free(threadInfo
);
2484 *msg_id
= kmp_i18n_str_LegacyApicIDsNotUnique
;
2488 // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
2489 // fields agree between all the threads bounds to a given package.
2490 if ((prevMaxCoresPerPkg
!= threadInfo
[i
].maxCoresPerPkg
) ||
2491 (prevMaxThreadsPerPkg
!= threadInfo
[i
].maxThreadsPerPkg
)) {
2492 __kmp_free(threadInfo
);
2493 *msg_id
= kmp_i18n_str_InconsistentCpuidInfo
;
2497 // When affinity is off, this routine will still be called to set
2498 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2499 // Make sure all these vars are set correctly
2501 if ((int)coreCt
> nCoresPerPkg
)
2502 nCoresPerPkg
= coreCt
;
2503 if ((int)threadCt
> __kmp_nThreadsPerCore
)
2504 __kmp_nThreadsPerCore
= threadCt
;
2505 __kmp_ncores
= nCores
;
2506 KMP_DEBUG_ASSERT(nApics
== (unsigned)__kmp_avail_proc
);
2508 // Now that we've determined the number of packages, the number of cores per
2509 // package, and the number of threads per core, we can construct the data
2510 // structure that is to be returned.
2514 int threadLevel
= 2;
2515 //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
2516 int depth
= (pkgLevel
>= 0) + (coreLevel
>= 0) + (threadLevel
>= 0);
2519 types
[idx
++] = KMP_HW_SOCKET
;
2521 types
[idx
++] = KMP_HW_CORE
;
2522 if (threadLevel
>= 0)
2523 types
[idx
++] = KMP_HW_THREAD
;
2525 KMP_ASSERT(depth
> 0);
2526 __kmp_topology
= kmp_topology_t::allocate(nApics
, depth
, types
);
2528 for (i
= 0; i
< nApics
; ++i
) {
2530 unsigned os
= threadInfo
[i
].osId
;
2531 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
2534 if (pkgLevel
>= 0) {
2535 hw_thread
.ids
[idx
++] = threadInfo
[i
].pkgId
;
2537 if (coreLevel
>= 0) {
2538 hw_thread
.ids
[idx
++] = threadInfo
[i
].coreId
;
2540 if (threadLevel
>= 0) {
2541 hw_thread
.ids
[idx
++] = threadInfo
[i
].threadId
;
2543 hw_thread
.os_id
= os
;
2544 hw_thread
.original_idx
= i
;
2547 __kmp_free(threadInfo
);
2548 __kmp_topology
->sort_ids();
2549 if (!__kmp_topology
->check_ids()) {
2550 kmp_topology_t::deallocate(__kmp_topology
);
2551 __kmp_topology
= nullptr;
2552 *msg_id
= kmp_i18n_str_LegacyApicIDsNotUnique
;
2558 // Hybrid cpu detection using CPUID.1A
2559 // Thread should be pinned to processor already
2560 static void __kmp_get_hybrid_info(kmp_hw_core_type_t
*type
, int *efficiency
,
2561 unsigned *native_model_id
) {
2563 __kmp_x86_cpuid(0x1a, 0, &buf
);
2564 *type
= (kmp_hw_core_type_t
)__kmp_extract_bits
<24, 31>(buf
.eax
);
2566 case KMP_HW_CORE_TYPE_ATOM
:
2569 case KMP_HW_CORE_TYPE_CORE
:
2575 *native_model_id
= __kmp_extract_bits
<0, 23>(buf
.eax
);
2578 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
2579 // architectures support a newer interface for specifying the x2APIC Ids,
2580 // based on CPUID.B or CPUID.1F
2582 * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
2585 ---+-----------+--------------+-------------+-----------------+
2586 EAX| reserved | reserved | reserved | Bits to Shift |
2587 ---+-----------|--------------+-------------+-----------------|
2588 EBX| reserved | Num logical processors at level (16 bits) |
2589 ---+-----------|--------------+-------------------------------|
2590 ECX| reserved | Level Type | Level Number (8 bits) |
2591 ---+-----------+--------------+-------------------------------|
2592 EDX| X2APIC ID (32 bits) |
2593 ---+----------------------------------------------------------+
2597 INTEL_LEVEL_TYPE_INVALID
= 0, // Package level
2598 INTEL_LEVEL_TYPE_SMT
= 1,
2599 INTEL_LEVEL_TYPE_CORE
= 2,
2600 INTEL_LEVEL_TYPE_MODULE
= 3,
2601 INTEL_LEVEL_TYPE_TILE
= 4,
2602 INTEL_LEVEL_TYPE_DIE
= 5,
2603 INTEL_LEVEL_TYPE_LAST
= 6,
2605 KMP_BUILD_ASSERT(INTEL_LEVEL_TYPE_LAST
< sizeof(unsigned) * CHAR_BIT
);
2606 #define KMP_LEAF_1F_KNOWN_LEVELS ((1u << INTEL_LEVEL_TYPE_LAST) - 1u)
2608 static kmp_hw_t
__kmp_intel_type_2_topology_type(int intel_type
) {
2609 switch (intel_type
) {
2610 case INTEL_LEVEL_TYPE_INVALID
:
2611 return KMP_HW_SOCKET
;
2612 case INTEL_LEVEL_TYPE_SMT
:
2613 return KMP_HW_THREAD
;
2614 case INTEL_LEVEL_TYPE_CORE
:
2616 case INTEL_LEVEL_TYPE_TILE
:
2618 case INTEL_LEVEL_TYPE_MODULE
:
2619 return KMP_HW_MODULE
;
2620 case INTEL_LEVEL_TYPE_DIE
:
2623 return KMP_HW_UNKNOWN
;
2626 static int __kmp_topology_type_2_intel_type(kmp_hw_t type
) {
2629 return INTEL_LEVEL_TYPE_INVALID
;
2631 return INTEL_LEVEL_TYPE_SMT
;
2633 return INTEL_LEVEL_TYPE_CORE
;
2635 return INTEL_LEVEL_TYPE_TILE
;
2637 return INTEL_LEVEL_TYPE_MODULE
;
2639 return INTEL_LEVEL_TYPE_DIE
;
2641 return INTEL_LEVEL_TYPE_INVALID
;
2645 struct cpuid_level_info_t
{
2646 unsigned level_type
, mask
, mask_width
, nitems
, cache_mask
;
2649 class cpuid_topo_desc_t
{
2653 void clear() { desc
= 0; }
2654 bool contains(int intel_type
) const {
2655 KMP_DEBUG_ASSERT(intel_type
>= 0 && intel_type
< INTEL_LEVEL_TYPE_LAST
);
2656 if ((1u << intel_type
) & desc
)
2660 bool contains_topology_type(kmp_hw_t type
) const {
2661 KMP_DEBUG_ASSERT(type
>= 0 && type
< KMP_HW_LAST
);
2662 int intel_type
= __kmp_topology_type_2_intel_type(type
);
2663 return contains(intel_type
);
2665 bool contains(cpuid_topo_desc_t rhs
) const {
2666 return ((desc
| rhs
.desc
) == desc
);
2668 void add(int intel_type
) { desc
|= (1u << intel_type
); }
2669 void add(cpuid_topo_desc_t rhs
) { desc
|= rhs
.desc
; }
2672 struct cpuid_proc_info_t
{
2678 unsigned native_model_id
;
2680 kmp_hw_core_type_t type
;
2681 cpuid_topo_desc_t description
;
2683 cpuid_level_info_t levels
[INTEL_LEVEL_TYPE_LAST
];
2686 // This function takes the topology leaf, an info pointer to store the levels
2687 // detected, and writable descriptors for the total topology.
2688 // Returns whether total types, depth, or description were modified.
2689 static bool __kmp_x2apicid_get_levels(int leaf
, cpuid_proc_info_t
*info
,
2690 kmp_hw_t total_types
[KMP_HW_LAST
],
2692 cpuid_topo_desc_t
*total_description
) {
2693 unsigned level
, levels_index
;
2694 unsigned level_type
, mask_width
, nitems
;
2696 cpuid_level_info_t(&levels
)[INTEL_LEVEL_TYPE_LAST
] = info
->levels
;
2697 bool retval
= false;
2699 // New algorithm has known topology layers act as highest unknown topology
2700 // layers when unknown topology layers exist.
2701 // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z>
2702 // are unknown topology layers, Then SMT will take the characteristics of
2703 // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>).
2704 // This eliminates unknown portions of the topology while still keeping the
2705 // correct structure.
2706 level
= levels_index
= 0;
2708 __kmp_x86_cpuid(leaf
, level
, &buf
);
2709 level_type
= __kmp_extract_bits
<8, 15>(buf
.ecx
);
2710 mask_width
= __kmp_extract_bits
<0, 4>(buf
.eax
);
2711 nitems
= __kmp_extract_bits
<0, 15>(buf
.ebx
);
2712 if (level_type
!= INTEL_LEVEL_TYPE_INVALID
&& nitems
== 0) {
2717 if (KMP_LEAF_1F_KNOWN_LEVELS
& (1u << level_type
)) {
2718 // Add a new level to the topology
2719 KMP_ASSERT(levels_index
< INTEL_LEVEL_TYPE_LAST
);
2720 levels
[levels_index
].level_type
= level_type
;
2721 levels
[levels_index
].mask_width
= mask_width
;
2722 levels
[levels_index
].nitems
= nitems
;
2725 // If it is an unknown level, then logically move the previous layer up
2726 if (levels_index
> 0) {
2727 levels
[levels_index
- 1].mask_width
= mask_width
;
2728 levels
[levels_index
- 1].nitems
= nitems
;
2732 } while (level_type
!= INTEL_LEVEL_TYPE_INVALID
);
2733 KMP_ASSERT(levels_index
<= INTEL_LEVEL_TYPE_LAST
);
2734 info
->description
.clear();
2735 info
->depth
= levels_index
;
2737 // If types, depth, and total_description are uninitialized,
2738 // then initialize them now
2739 if (*total_depth
== 0) {
2740 *total_depth
= info
->depth
;
2741 total_description
->clear();
2742 for (int i
= *total_depth
- 1, j
= 0; i
>= 0; --i
, ++j
) {
2744 __kmp_intel_type_2_topology_type(info
->levels
[i
].level_type
);
2745 total_description
->add(info
->levels
[i
].level_type
);
2750 // Ensure the INTEL_LEVEL_TYPE_INVALID (Socket) layer isn't first
2751 if (levels_index
== 0 || levels
[0].level_type
== INTEL_LEVEL_TYPE_INVALID
)
2754 // Set the masks to & with apicid
2755 for (unsigned i
= 0; i
< levels_index
; ++i
) {
2756 if (levels
[i
].level_type
!= INTEL_LEVEL_TYPE_INVALID
) {
2757 levels
[i
].mask
= ~((-1) << levels
[i
].mask_width
);
2758 levels
[i
].cache_mask
= (-1) << levels
[i
].mask_width
;
2759 for (unsigned j
= 0; j
< i
; ++j
)
2760 levels
[i
].mask
^= levels
[j
].mask
;
2762 KMP_DEBUG_ASSERT(i
> 0);
2763 levels
[i
].mask
= (-1) << levels
[i
- 1].mask_width
;
2764 levels
[i
].cache_mask
= 0;
2766 info
->description
.add(info
->levels
[i
].level_type
);
2769 // If this processor has level type not on other processors, then make
2770 // sure to include it in total types, depth, and description.
2771 // One assumption here is that the first type, i.e. socket, is known.
2772 // Another assumption is that types array is always large enough to fit any
2773 // new layers since its length is KMP_HW_LAST.
2774 if (!total_description
->contains(info
->description
)) {
2775 for (int i
= info
->depth
- 1, j
= 0; i
>= 0; --i
, ++j
) {
2776 // If this level is known already, then skip it.
2777 if (total_description
->contains(levels
[i
].level_type
))
2779 // Unknown level, insert before last known level
2780 kmp_hw_t curr_type
=
2781 __kmp_intel_type_2_topology_type(levels
[i
].level_type
);
2782 KMP_ASSERT(j
!= 0 && "Bad APIC Id information");
2783 // Move over all known levels to make room for new level
2784 for (int k
= info
->depth
- 1; k
>= j
; --k
) {
2785 KMP_DEBUG_ASSERT(k
+ 1 < KMP_HW_LAST
);
2786 total_types
[k
+ 1] = total_types
[k
];
2789 total_types
[j
] = curr_type
;
2792 total_description
->add(info
->description
);
2798 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t
*const msg_id
) {
2800 kmp_hw_t types
[INTEL_LEVEL_TYPE_LAST
];
2802 int topology_leaf
, highest_leaf
;
2805 cpuid_topo_desc_t total_description
;
2806 static int leaves
[] = {0, 0};
2808 // If affinity is disabled, __kmp_avail_proc may be zero
2809 int ninfos
= (__kmp_avail_proc
> 0 ? __kmp_avail_proc
: 1);
2810 cpuid_proc_info_t
*proc_info
= (cpuid_proc_info_t
*)__kmp_allocate(
2811 (sizeof(cpuid_proc_info_t
) + sizeof(cpuid_cache_info_t
)) * ninfos
);
2812 cpuid_cache_info_t
*cache_info
= (cpuid_cache_info_t
*)(proc_info
+ ninfos
);
2814 kmp_i18n_id_t leaf_message_id
;
2816 *msg_id
= kmp_i18n_null
;
2817 if (__kmp_affinity
.flags
.verbose
) {
2818 KMP_INFORM(AffInfoStr
, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC
));
2821 // Get the highest cpuid leaf supported
2822 __kmp_x86_cpuid(0, 0, &buf
);
2823 highest_leaf
= buf
.eax
;
2825 // If a specific topology method was requested, only allow that specific leaf
2826 // otherwise, try both leaves 31 and 11 in that order
2828 if (__kmp_affinity_top_method
== affinity_top_method_x2apicid
) {
2831 leaf_message_id
= kmp_i18n_str_NoLeaf11Support
;
2832 } else if (__kmp_affinity_top_method
== affinity_top_method_x2apicid_1f
) {
2835 leaf_message_id
= kmp_i18n_str_NoLeaf31Support
;
2840 leaf_message_id
= kmp_i18n_str_NoLeaf11Support
;
2843 // Check to see if cpuid leaf 31 or 11 is supported.
2844 __kmp_nThreadsPerCore
= nCoresPerPkg
= nPackages
= 1;
2846 for (int i
= 0; i
< num_leaves
; ++i
) {
2847 int leaf
= leaves
[i
];
2848 if (highest_leaf
< leaf
)
2850 __kmp_x86_cpuid(leaf
, 0, &buf
);
2853 topology_leaf
= leaf
;
2854 __kmp_x2apicid_get_levels(leaf
, &proc_info
[0], types
, &depth
,
2855 &total_description
);
2860 if (topology_leaf
== -1 || depth
== 0) {
2861 *msg_id
= leaf_message_id
;
2862 __kmp_free(proc_info
);
2865 KMP_ASSERT(depth
<= INTEL_LEVEL_TYPE_LAST
);
2867 // The algorithm used starts by setting the affinity to each available thread
2868 // and retrieving info from the cpuid instruction, so if we are not capable of
2869 // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
2870 // we need to do something else - use the defaults that we calculated from
2871 // issuing cpuid without binding to each proc.
2872 if (!KMP_AFFINITY_CAPABLE()) {
2873 // Hack to try and infer the machine topology using only the data
2874 // available from cpuid on the current thread, and __kmp_xproc.
2875 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
2876 for (int i
= 0; i
< depth
; ++i
) {
2877 if (proc_info
[0].levels
[i
].level_type
== INTEL_LEVEL_TYPE_SMT
) {
2878 __kmp_nThreadsPerCore
= proc_info
[0].levels
[i
].nitems
;
2879 } else if (proc_info
[0].levels
[i
].level_type
== INTEL_LEVEL_TYPE_CORE
) {
2880 nCoresPerPkg
= proc_info
[0].levels
[i
].nitems
;
2883 __kmp_ncores
= __kmp_xproc
/ __kmp_nThreadsPerCore
;
2884 nPackages
= (__kmp_xproc
+ nCoresPerPkg
- 1) / nCoresPerPkg
;
2885 __kmp_free(proc_info
);
2889 // From here on, we can assume that it is safe to call
2890 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2891 // __kmp_affinity.type = affinity_none.
2893 // Save the affinity mask for the current thread.
2894 kmp_affinity_raii_t previous_affinity
;
2896 // Run through each of the available contexts, binding the current thread
2897 // to it, and obtaining the pertinent information using the cpuid instr.
2899 int hw_thread_index
= 0;
2900 bool uniform_caches
= true;
2902 KMP_CPU_SET_ITERATE(proc
, __kmp_affin_fullMask
) {
2903 // Skip this proc if it is not included in the machine model.
2904 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
2907 KMP_DEBUG_ASSERT(hw_thread_index
< __kmp_avail_proc
);
2909 // Gather topology information
2910 __kmp_affinity_dispatch
->bind_thread(proc
);
2911 __kmp_x86_cpuid(topology_leaf
, 0, &buf
);
2912 proc_info
[hw_thread_index
].os_id
= proc
;
2913 proc_info
[hw_thread_index
].apic_id
= buf
.edx
;
2914 __kmp_x2apicid_get_levels(topology_leaf
, &proc_info
[hw_thread_index
], types
,
2915 &depth
, &total_description
);
2916 if (proc_info
[hw_thread_index
].depth
== 0) {
2917 *msg_id
= kmp_i18n_str_InvalidCpuidInfo
;
2918 __kmp_free(proc_info
);
2921 // Gather cache information and insert afterwards
2922 cache_info
[hw_thread_index
].get_leaf4_levels();
2923 if (uniform_caches
&& hw_thread_index
> 0)
2924 if (cache_info
[0] != cache_info
[hw_thread_index
])
2925 uniform_caches
= false;
2926 // Hybrid information
2927 if (__kmp_is_hybrid_cpu() && highest_leaf
>= 0x1a) {
2928 __kmp_get_hybrid_info(&proc_info
[hw_thread_index
].type
,
2929 &proc_info
[hw_thread_index
].efficiency
,
2930 &proc_info
[hw_thread_index
].native_model_id
);
2934 KMP_ASSERT(hw_thread_index
> 0);
2935 previous_affinity
.restore();
2937 // Allocate the data structure to be returned.
2938 __kmp_topology
= kmp_topology_t::allocate(__kmp_avail_proc
, depth
, types
);
2940 // Create topology Ids and hybrid types in __kmp_topology
2941 for (int i
= 0; i
< __kmp_topology
->get_num_hw_threads(); ++i
) {
2942 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
2944 hw_thread
.os_id
= proc_info
[i
].os_id
;
2945 hw_thread
.original_idx
= i
;
2946 unsigned apic_id
= proc_info
[i
].apic_id
;
2947 // Put in topology information
2948 for (int j
= 0, idx
= depth
- 1; j
< depth
; ++j
, --idx
) {
2949 if (!(proc_info
[i
].description
.contains_topology_type(
2950 __kmp_topology
->get_type(j
)))) {
2951 hw_thread
.ids
[idx
] = kmp_hw_thread_t::UNKNOWN_ID
;
2953 hw_thread
.ids
[idx
] = apic_id
& proc_info
[i
].levels
[j
].mask
;
2955 hw_thread
.ids
[idx
] >>= proc_info
[i
].levels
[j
- 1].mask_width
;
2959 hw_thread
.attrs
.set_core_type(proc_info
[i
].type
);
2960 hw_thread
.attrs
.set_core_eff(proc_info
[i
].efficiency
);
2963 __kmp_topology
->sort_ids();
2965 // Change Ids to logical Ids
2966 for (int j
= 0; j
< depth
- 1; ++j
) {
2968 int prev_id
= __kmp_topology
->at(0).ids
[j
];
2969 int curr_id
= __kmp_topology
->at(0).ids
[j
+ 1];
2970 __kmp_topology
->at(0).ids
[j
+ 1] = new_id
;
2971 for (int i
= 1; i
< __kmp_topology
->get_num_hw_threads(); ++i
) {
2972 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
2973 if (hw_thread
.ids
[j
] == prev_id
&& hw_thread
.ids
[j
+ 1] == curr_id
) {
2974 hw_thread
.ids
[j
+ 1] = new_id
;
2975 } else if (hw_thread
.ids
[j
] == prev_id
&&
2976 hw_thread
.ids
[j
+ 1] != curr_id
) {
2977 curr_id
= hw_thread
.ids
[j
+ 1];
2978 hw_thread
.ids
[j
+ 1] = ++new_id
;
2980 prev_id
= hw_thread
.ids
[j
];
2981 curr_id
= hw_thread
.ids
[j
+ 1];
2982 hw_thread
.ids
[j
+ 1] = ++new_id
;
2987 // First check for easy cache placement. This occurs when caches are
2988 // equivalent to a layer in the CPUID leaf 0xb or 0x1f topology.
2989 if (uniform_caches
) {
2990 for (size_t i
= 0; i
< cache_info
[0].get_depth(); ++i
) {
2991 unsigned cache_mask
= cache_info
[0][i
].mask
;
2992 unsigned cache_level
= cache_info
[0][i
].level
;
2993 KMP_ASSERT(cache_level
<= cpuid_cache_info_t::MAX_CACHE_LEVEL
);
2994 kmp_hw_t cache_type
= cpuid_cache_info_t::get_topology_type(cache_level
);
2995 __kmp_topology
->set_equivalent_type(cache_type
, cache_type
);
2996 for (int j
= 0; j
< depth
; ++j
) {
2997 unsigned hw_cache_mask
= proc_info
[0].levels
[j
].cache_mask
;
2998 if (hw_cache_mask
== cache_mask
&& j
< depth
- 1) {
2999 kmp_hw_t type
= __kmp_intel_type_2_topology_type(
3000 proc_info
[0].levels
[j
+ 1].level_type
);
3001 __kmp_topology
->set_equivalent_type(cache_type
, type
);
3006 // If caches are non-uniform, then record which caches exist.
3007 for (int i
= 0; i
< __kmp_topology
->get_num_hw_threads(); ++i
) {
3008 for (size_t j
= 0; j
< cache_info
[i
].get_depth(); ++j
) {
3009 unsigned cache_level
= cache_info
[i
][j
].level
;
3010 kmp_hw_t cache_type
=
3011 cpuid_cache_info_t::get_topology_type(cache_level
);
3012 if (__kmp_topology
->get_equivalent_type(cache_type
) == KMP_HW_UNKNOWN
)
3013 __kmp_topology
->set_equivalent_type(cache_type
, cache_type
);
3018 // See if any cache level needs to be added manually through cache Ids
3019 bool unresolved_cache_levels
= false;
3020 for (unsigned level
= 1; level
<= cpuid_cache_info_t::MAX_CACHE_LEVEL
;
3022 kmp_hw_t cache_type
= cpuid_cache_info_t::get_topology_type(level
);
3023 // This also filters out caches which may not be in the topology
3024 // since the equivalent type might be KMP_HW_UNKNOWN.
3025 if (__kmp_topology
->get_equivalent_type(cache_type
) == cache_type
) {
3026 unresolved_cache_levels
= true;
3031 // Insert unresolved cache layers into machine topology using cache Ids
3032 if (unresolved_cache_levels
) {
3033 int num_hw_threads
= __kmp_topology
->get_num_hw_threads();
3034 int *ids
= (int *)__kmp_allocate(sizeof(int) * num_hw_threads
);
3035 for (unsigned l
= 1; l
<= cpuid_cache_info_t::MAX_CACHE_LEVEL
; ++l
) {
3036 kmp_hw_t cache_type
= cpuid_cache_info_t::get_topology_type(l
);
3037 if (__kmp_topology
->get_equivalent_type(cache_type
) != cache_type
)
3039 for (int i
= 0; i
< num_hw_threads
; ++i
) {
3040 int original_idx
= __kmp_topology
->at(i
).original_idx
;
3041 ids
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
3042 const cpuid_cache_info_t::info_t
&info
=
3043 cache_info
[original_idx
].get_level(l
);
3044 // if cache level not in topology for this processor, then skip
3045 if (info
.level
== 0)
3047 ids
[i
] = info
.mask
& proc_info
[original_idx
].apic_id
;
3049 __kmp_topology
->insert_layer(cache_type
, ids
);
3053 if (!__kmp_topology
->check_ids()) {
3054 kmp_topology_t::deallocate(__kmp_topology
);
3055 __kmp_topology
= nullptr;
3056 *msg_id
= kmp_i18n_str_x2ApicIDsNotUnique
;
3057 __kmp_free(proc_info
);
3060 __kmp_free(proc_info
);
3063 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
3066 #define threadIdIndex 1
3067 #define coreIdIndex 2
3068 #define pkgIdIndex 3
3069 #define nodeIdIndex 4
3071 typedef unsigned *ProcCpuInfo
;
3072 static unsigned maxIndex
= pkgIdIndex
;
3074 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a
,
3077 const unsigned *aa
= *(unsigned *const *)a
;
3078 const unsigned *bb
= *(unsigned *const *)b
;
3079 for (i
= maxIndex
;; i
--) {
3090 #if KMP_USE_HIER_SCHED
3091 // Set the array sizes for the hierarchy layers
3092 static void __kmp_dispatch_set_hierarchy_values() {
3093 // Set the maximum number of L1's to number of cores
3094 // Set the maximum number of L2's to either number of cores / 2 for
3095 // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
3096 // Or the number of cores for Intel(R) Xeon(R) processors
3097 // Set the maximum number of NUMA nodes and L3's to number of packages
3098 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_THREAD
+ 1] =
3099 nPackages
* nCoresPerPkg
* __kmp_nThreadsPerCore
;
3100 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L1
+ 1] = __kmp_ncores
;
3101 #if KMP_ARCH_X86_64 && \
3102 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3103 KMP_OS_WINDOWS) && \
3105 if (__kmp_mic_type
>= mic3
)
3106 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L2
+ 1] = __kmp_ncores
/ 2;
3108 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
3109 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L2
+ 1] = __kmp_ncores
;
3110 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_L3
+ 1] = nPackages
;
3111 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_NUMA
+ 1] = nPackages
;
3112 __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_LOOP
+ 1] = 1;
3113 // Set the number of threads per unit
3114 // Number of hardware threads per L1/L2/L3/NUMA/LOOP
3115 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_THREAD
+ 1] = 1;
3116 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L1
+ 1] =
3117 __kmp_nThreadsPerCore
;
3118 #if KMP_ARCH_X86_64 && \
3119 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3120 KMP_OS_WINDOWS) && \
3122 if (__kmp_mic_type
>= mic3
)
3123 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L2
+ 1] =
3124 2 * __kmp_nThreadsPerCore
;
3126 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
3127 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L2
+ 1] =
3128 __kmp_nThreadsPerCore
;
3129 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_L3
+ 1] =
3130 nCoresPerPkg
* __kmp_nThreadsPerCore
;
3131 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_NUMA
+ 1] =
3132 nCoresPerPkg
* __kmp_nThreadsPerCore
;
3133 __kmp_hier_threads_per
[kmp_hier_layer_e::LAYER_LOOP
+ 1] =
3134 nPackages
* nCoresPerPkg
* __kmp_nThreadsPerCore
;
3137 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
3138 // i.e., this thread's L1 or this thread's L2, etc.
3139 int __kmp_dispatch_get_index(int tid
, kmp_hier_layer_e type
) {
3140 int index
= type
+ 1;
3141 int num_hw_threads
= __kmp_hier_max_units
[kmp_hier_layer_e::LAYER_THREAD
+ 1];
3142 KMP_DEBUG_ASSERT(type
!= kmp_hier_layer_e::LAYER_LAST
);
3143 if (type
== kmp_hier_layer_e::LAYER_THREAD
)
3145 else if (type
== kmp_hier_layer_e::LAYER_LOOP
)
3147 KMP_DEBUG_ASSERT(__kmp_hier_max_units
[index
] != 0);
3148 if (tid
>= num_hw_threads
)
3149 tid
= tid
% num_hw_threads
;
3150 return (tid
/ __kmp_hier_threads_per
[index
]) % __kmp_hier_max_units
[index
];
3153 // Return the number of t1's per t2
3154 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1
, kmp_hier_layer_e t2
) {
3157 KMP_DEBUG_ASSERT(i1
<= i2
);
3158 KMP_DEBUG_ASSERT(t1
!= kmp_hier_layer_e::LAYER_LAST
);
3159 KMP_DEBUG_ASSERT(t2
!= kmp_hier_layer_e::LAYER_LAST
);
3160 KMP_DEBUG_ASSERT(__kmp_hier_threads_per
[i1
] != 0);
3161 // (nthreads/t2) / (nthreads/t1) = t1 / t2
3162 return __kmp_hier_threads_per
[i2
] / __kmp_hier_threads_per
[i1
];
3164 #endif // KMP_USE_HIER_SCHED
3166 static inline const char *__kmp_cpuinfo_get_filename() {
3167 const char *filename
;
3168 if (__kmp_cpuinfo_file
!= nullptr)
3169 filename
= __kmp_cpuinfo_file
;
3171 filename
= "/proc/cpuinfo";
3175 static inline const char *__kmp_cpuinfo_get_envvar() {
3176 const char *envvar
= nullptr;
3177 if (__kmp_cpuinfo_file
!= nullptr)
3178 envvar
= "KMP_CPUINFO_FILE";
3182 static bool __kmp_package_id_from_core_siblings_list(unsigned **threadInfo
,
3185 if (!KMP_AFFINITY_CAPABLE())
3189 KMP_SNPRINTF(path
, sizeof(path
),
3190 "/sys/devices/system/cpu/cpu%u/topology/core_siblings_list",
3191 threadInfo
[idx
][osIdIndex
]);
3192 kmp_affin_mask_t
*siblings
= __kmp_parse_cpu_list(path
);
3193 for (unsigned i
= 0; i
< num_avail
; ++i
) {
3194 unsigned cpu_id
= threadInfo
[i
][osIdIndex
];
3195 KMP_ASSERT(cpu_id
< __kmp_affin_mask_size
* CHAR_BIT
);
3196 if (!KMP_CPU_ISSET(cpu_id
, siblings
))
3198 if (threadInfo
[i
][pkgIdIndex
] == UINT_MAX
) {
3199 // Arbitrarily pick the first index we encounter, it only matters that
3200 // the value is the same for all siblings.
3201 threadInfo
[i
][pkgIdIndex
] = idx
;
3202 } else if (threadInfo
[i
][pkgIdIndex
] != idx
) {
3203 // Contradictory sibling lists.
3204 KMP_CPU_FREE(siblings
);
3208 KMP_ASSERT(threadInfo
[idx
][pkgIdIndex
] != UINT_MAX
);
3209 KMP_CPU_FREE(siblings
);
3213 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
3214 // affinity map. On AIX, the map is obtained through system SRAD (Scheduler
3215 // Resource Allocation Domain).
3216 static bool __kmp_affinity_create_cpuinfo_map(int *line
,
3217 kmp_i18n_id_t
*const msg_id
) {
3218 *msg_id
= kmp_i18n_null
;
3221 unsigned num_records
= __kmp_xproc
;
3223 const char *filename
= __kmp_cpuinfo_get_filename();
3224 const char *envvar
= __kmp_cpuinfo_get_envvar();
3226 if (__kmp_affinity
.flags
.verbose
) {
3227 KMP_INFORM(AffParseFilename
, "KMP_AFFINITY", filename
);
3230 kmp_safe_raii_file_t
f(filename
, "r", envvar
);
3232 // Scan of the file, and count the number of "processor" (osId) fields,
3233 // and find the highest value of <n> for a node_<n> field.
3235 unsigned num_records
= 0;
3237 buf
[sizeof(buf
) - 1] = 1;
3238 if (!fgets(buf
, sizeof(buf
), f
)) {
3239 // Read errors presumably because of EOF
3243 char s1
[] = "processor";
3244 if (strncmp(buf
, s1
, sizeof(s1
) - 1) == 0) {
3249 // FIXME - this will match "node_<n> <garbage>"
3251 if (KMP_SSCANF(buf
, "node_%u id", &level
) == 1) {
3252 // validate the input fisrt:
3253 if (level
> (unsigned)__kmp_xproc
) { // level is too big
3254 level
= __kmp_xproc
;
3256 if (nodeIdIndex
+ level
>= maxIndex
) {
3257 maxIndex
= nodeIdIndex
+ level
;
3263 // Check for empty file / no valid processor records, or too many. The number
3264 // of records can't exceed the number of valid bits in the affinity mask.
3265 if (num_records
== 0) {
3266 *msg_id
= kmp_i18n_str_NoProcRecords
;
3269 if (num_records
> (unsigned)__kmp_xproc
) {
3270 *msg_id
= kmp_i18n_str_TooManyProcRecords
;
3274 // Set the file pointer back to the beginning, so that we can scan the file
3275 // again, this time performing a full parse of the data. Allocate a vector of
3276 // ProcCpuInfo object, where we will place the data. Adding an extra element
3277 // at the end allows us to remove a lot of extra checks for termination
3279 if (fseek(f
, 0, SEEK_SET
) != 0) {
3280 *msg_id
= kmp_i18n_str_CantRewindCpuinfo
;
3283 #endif // KMP_OS_AIX
3285 // Allocate the array of records to store the proc info in. The dummy
3286 // element at the end makes the logic in filling them out easier to code.
3287 unsigned **threadInfo
=
3288 (unsigned **)__kmp_allocate((num_records
+ 1) * sizeof(unsigned *));
3290 for (i
= 0; i
<= num_records
; i
++) {
3292 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3295 #define CLEANUP_THREAD_INFO \
3296 for (i = 0; i <= num_records; i++) { \
3297 __kmp_free(threadInfo[i]); \
3299 __kmp_free(threadInfo);
3301 // A value of UINT_MAX means that we didn't find the field
3304 #define INIT_PROC_INFO(p) \
3305 for (__index = 0; __index <= maxIndex; __index++) { \
3306 (p)[__index] = UINT_MAX; \
3309 for (i
= 0; i
<= num_records
; i
++) {
3310 INIT_PROC_INFO(threadInfo
[i
]);
3315 lpar_info_format1_t cpuinfo
;
3316 unsigned num_avail
= __kmp_xproc
;
3318 if (__kmp_affinity
.flags
.verbose
)
3319 KMP_INFORM(AffParseFilename
, "KMP_AFFINITY", "system info for topology");
3321 // Get the number of SMT threads per core.
3322 smt_threads
= syssmt(GET_NUMBER_SMT_SETS
, 0, 0, NULL
);
3324 // Allocate a resource set containing available system resourses.
3325 rsethandle_t sys_rset
= rs_alloc(RS_SYSTEM
);
3326 if (sys_rset
== NULL
) {
3327 CLEANUP_THREAD_INFO
;
3328 *msg_id
= kmp_i18n_str_UnknownTopology
;
3331 // Allocate a resource set for the SRAD info.
3332 rsethandle_t srad
= rs_alloc(RS_EMPTY
);
3335 CLEANUP_THREAD_INFO
;
3336 *msg_id
= kmp_i18n_str_UnknownTopology
;
3340 // Get the SRAD system detail level.
3341 int sradsdl
= rs_getinfo(NULL
, R_SRADSDL
, 0);
3345 CLEANUP_THREAD_INFO
;
3346 *msg_id
= kmp_i18n_str_UnknownTopology
;
3349 // Get the number of RADs at that SRAD SDL.
3350 int num_rads
= rs_numrads(sys_rset
, sradsdl
, 0);
3354 CLEANUP_THREAD_INFO
;
3355 *msg_id
= kmp_i18n_str_UnknownTopology
;
3359 // Get the maximum number of procs that may be contained in a resource set.
3360 int max_procs
= rs_getinfo(NULL
, R_MAXPROCS
, 0);
3361 if (max_procs
< 0) {
3364 CLEANUP_THREAD_INFO
;
3365 *msg_id
= kmp_i18n_str_UnknownTopology
;
3371 for (int srad_idx
= 0; cur_rad
< num_rads
&& srad_idx
< VMI_MAXRADS
;
3373 // Check if the SRAD is available in the RSET.
3374 if (rs_getrad(sys_rset
, srad
, sradsdl
, srad_idx
, 0) < 0)
3377 for (int cpu
= 0; cpu
< max_procs
; cpu
++) {
3378 // Set the info for the cpu if it is in the SRAD.
3379 if (rs_op(RS_TESTRESOURCE
, srad
, NULL
, R_PROCS
, cpu
)) {
3380 threadInfo
[cpu
][osIdIndex
] = cpu
;
3381 threadInfo
[cpu
][pkgIdIndex
] = cur_rad
;
3382 threadInfo
[cpu
][coreIdIndex
] = cpu
/ smt_threads
;
3384 if (num_set
>= num_avail
) {
3385 // Done if all available CPUs have been set.
3395 // The topology is already sorted.
3397 #else // !KMP_OS_AIX
3398 unsigned num_avail
= 0;
3401 bool reading_s390x_sys_info
= true;
3404 // Create an inner scoping level, so that all the goto targets at the end of
3405 // the loop appear in an outer scoping level. This avoids warnings about
3406 // jumping past an initialization to a target in the same block.
3408 buf
[sizeof(buf
) - 1] = 1;
3409 bool long_line
= false;
3410 if (!fgets(buf
, sizeof(buf
), f
)) {
3411 // Read errors presumably because of EOF
3412 // If there is valid data in threadInfo[num_avail], then fake
3413 // a blank line in ensure that the last address gets parsed.
3415 for (i
= 0; i
<= maxIndex
; i
++) {
3416 if (threadInfo
[num_avail
][i
] != UINT_MAX
) {
3424 } else if (!buf
[sizeof(buf
) - 1]) {
3425 // The line is longer than the buffer. Set a flag and don't
3426 // emit an error if we were going to ignore the line, anyway.
3429 #define CHECK_LINE \
3431 CLEANUP_THREAD_INFO; \
3432 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
3438 #if KMP_ARCH_LOONGARCH64
3439 // The parsing logic of /proc/cpuinfo in this function highly depends on
3440 // the blank lines between each processor info block. But on LoongArch a
3441 // blank line exists before the first processor info block (i.e. after the
3442 // "system type" line). This blank line was added because the "system
3443 // type" line is unrelated to any of the CPUs. We must skip this line so
3444 // that the original logic works on LoongArch.
3445 if (*buf
== '\n' && *line
== 2)
3449 // s390x /proc/cpuinfo starts with a variable number of lines containing
3450 // the overall system information. Skip them.
3451 if (reading_s390x_sys_info
) {
3453 reading_s390x_sys_info
= false;
3459 char s1
[] = "cpu number";
3461 char s1
[] = "processor";
3463 if (strncmp(buf
, s1
, sizeof(s1
) - 1) == 0) {
3465 char *p
= strchr(buf
+ sizeof(s1
) - 1, ':');
3467 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3469 if (threadInfo
[num_avail
][osIdIndex
] != UINT_MAX
)
3470 #if KMP_ARCH_AARCH64
3471 // Handle the old AArch64 /proc/cpuinfo layout differently,
3472 // it contains all of the 'processor' entries listed in a
3473 // single 'Processor' section, therefore the normal looking
3474 // for duplicates in that section will always fail.
3479 threadInfo
[num_avail
][osIdIndex
] = val
;
3480 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
3484 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
3485 threadInfo
[num_avail
][osIdIndex
]);
3486 __kmp_read_from_file(path
, "%u", &threadInfo
[num_avail
][pkgIdIndex
]);
3489 // Disambiguate physical_package_id.
3491 KMP_SNPRINTF(path
, sizeof(path
),
3492 "/sys/devices/system/cpu/cpu%u/topology/book_id",
3493 threadInfo
[num_avail
][osIdIndex
]);
3494 __kmp_read_from_file(path
, "%u", &book_id
);
3495 threadInfo
[num_avail
][pkgIdIndex
] |= (book_id
<< 8);
3498 KMP_SNPRINTF(path
, sizeof(path
),
3499 "/sys/devices/system/cpu/cpu%u/topology/drawer_id",
3500 threadInfo
[num_avail
][osIdIndex
]);
3501 __kmp_read_from_file(path
, "%u", &drawer_id
);
3502 threadInfo
[num_avail
][pkgIdIndex
] |= (drawer_id
<< 16);
3505 KMP_SNPRINTF(path
, sizeof(path
),
3506 "/sys/devices/system/cpu/cpu%u/topology/core_id",
3507 threadInfo
[num_avail
][osIdIndex
]);
3508 __kmp_read_from_file(path
, "%u", &threadInfo
[num_avail
][coreIdIndex
]);
3512 char s2
[] = "physical id";
3513 if (strncmp(buf
, s2
, sizeof(s2
) - 1) == 0) {
3515 char *p
= strchr(buf
+ sizeof(s2
) - 1, ':');
3517 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3519 if (threadInfo
[num_avail
][pkgIdIndex
] != UINT_MAX
)
3521 threadInfo
[num_avail
][pkgIdIndex
] = val
;
3524 char s3
[] = "core id";
3525 if (strncmp(buf
, s3
, sizeof(s3
) - 1) == 0) {
3527 char *p
= strchr(buf
+ sizeof(s3
) - 1, ':');
3529 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3531 if (threadInfo
[num_avail
][coreIdIndex
] != UINT_MAX
)
3533 threadInfo
[num_avail
][coreIdIndex
] = val
;
3535 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
3537 char s4
[] = "thread id";
3538 if (strncmp(buf
, s4
, sizeof(s4
) - 1) == 0) {
3540 char *p
= strchr(buf
+ sizeof(s4
) - 1, ':');
3542 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3544 if (threadInfo
[num_avail
][threadIdIndex
] != UINT_MAX
)
3546 threadInfo
[num_avail
][threadIdIndex
] = val
;
3550 if (KMP_SSCANF(buf
, "node_%u id", &level
) == 1) {
3552 char *p
= strchr(buf
+ sizeof(s4
) - 1, ':');
3554 if ((p
== NULL
) || (KMP_SSCANF(p
+ 1, "%u\n", &val
) != 1))
3556 // validate the input before using level:
3557 if (level
> (unsigned)__kmp_xproc
) { // level is too big
3558 level
= __kmp_xproc
;
3560 if (threadInfo
[num_avail
][nodeIdIndex
+ level
] != UINT_MAX
)
3562 threadInfo
[num_avail
][nodeIdIndex
+ level
] = val
;
3566 // We didn't recognize the leading token on the line. There are lots of
3567 // leading tokens that we don't recognize - if the line isn't empty, go on
3568 // to the next line.
3569 if ((*buf
!= 0) && (*buf
!= '\n')) {
3570 // If the line is longer than the buffer, read characters
3571 // until we find a newline.
3574 while (((ch
= fgetc(f
)) != EOF
) && (ch
!= '\n'))
3580 // A newline has signalled the end of the processor record.
3581 // Check that there aren't too many procs specified.
3582 if ((int)num_avail
== __kmp_xproc
) {
3583 CLEANUP_THREAD_INFO
;
3584 *msg_id
= kmp_i18n_str_TooManyEntries
;
3588 // Check for missing fields. The osId field must be there. The physical
3589 // id field will be checked later.
3590 if (threadInfo
[num_avail
][osIdIndex
] == UINT_MAX
) {
3591 CLEANUP_THREAD_INFO
;
3592 *msg_id
= kmp_i18n_str_MissingProcField
;
3596 // Skip this proc if it is not included in the machine model.
3597 if (KMP_AFFINITY_CAPABLE() &&
3598 !KMP_CPU_ISSET(threadInfo
[num_avail
][osIdIndex
],
3599 __kmp_affin_fullMask
)) {
3600 INIT_PROC_INFO(threadInfo
[num_avail
]);
3604 // We have a successful parse of this proc's info.
3605 // Increment the counter, and prepare for the next proc.
3607 KMP_ASSERT(num_avail
<= num_records
);
3608 INIT_PROC_INFO(threadInfo
[num_avail
]);
3613 CLEANUP_THREAD_INFO
;
3614 *msg_id
= kmp_i18n_str_MissingValCpuinfo
;
3618 CLEANUP_THREAD_INFO
;
3619 *msg_id
= kmp_i18n_str_DuplicateFieldCpuinfo
;
3624 // At least on powerpc, Linux may return -1 for physical_package_id. Try
3625 // to reconstruct topology from core_siblings_list in that case.
3626 for (i
= 0; i
< num_avail
; ++i
) {
3627 if (threadInfo
[i
][pkgIdIndex
] == UINT_MAX
) {
3628 if (!__kmp_package_id_from_core_siblings_list(threadInfo
, num_avail
, i
)) {
3629 CLEANUP_THREAD_INFO
;
3630 *msg_id
= kmp_i18n_str_MissingPhysicalIDField
;
3636 #if KMP_MIC && REDUCE_TEAM_SIZE
3637 unsigned teamSize
= 0;
3638 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3640 // check for num_records == __kmp_xproc ???
3642 // If it is configured to omit the package level when there is only a single
3643 // package, the logic at the end of this routine won't work if there is only a
3645 KMP_ASSERT(num_avail
> 0);
3646 KMP_ASSERT(num_avail
<= num_records
);
3648 // Sort the threadInfo table by physical Id.
3649 qsort(threadInfo
, num_avail
, sizeof(*threadInfo
),
3650 __kmp_affinity_cmp_ProcCpuInfo_phys_id
);
3652 #endif // KMP_OS_AIX
3654 // The table is now sorted by pkgId / coreId / threadId, but we really don't
3655 // know the radix of any of the fields. pkgId's may be sparsely assigned among
3656 // the chips on a system. Although coreId's are usually assigned
3657 // [0 .. coresPerPkg-1] and threadId's are usually assigned
3658 // [0..threadsPerCore-1], we don't want to make any such assumptions.
3660 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
3661 // total # packages) are at this point - we want to determine that now. We
3662 // only have an upper bound on the first two figures.
3664 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3666 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3668 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3670 (unsigned *)__kmp_allocate((maxIndex
+ 1) * sizeof(unsigned));
3672 bool assign_thread_ids
= false;
3673 unsigned threadIdCt
;
3676 restart_radix_check
:
3679 // Initialize the counter arrays with data from threadInfo[0].
3680 if (assign_thread_ids
) {
3681 if (threadInfo
[0][threadIdIndex
] == UINT_MAX
) {
3682 threadInfo
[0][threadIdIndex
] = threadIdCt
++;
3683 } else if (threadIdCt
<= threadInfo
[0][threadIdIndex
]) {
3684 threadIdCt
= threadInfo
[0][threadIdIndex
] + 1;
3687 for (index
= 0; index
<= maxIndex
; index
++) {
3691 lastId
[index
] = threadInfo
[0][index
];
3695 // Run through the rest of the OS procs.
3696 for (i
= 1; i
< num_avail
; i
++) {
3697 // Find the most significant index whose id differs from the id for the
3698 // previous OS proc.
3699 for (index
= maxIndex
; index
>= threadIdIndex
; index
--) {
3700 if (assign_thread_ids
&& (index
== threadIdIndex
)) {
3701 // Auto-assign the thread id field if it wasn't specified.
3702 if (threadInfo
[i
][threadIdIndex
] == UINT_MAX
) {
3703 threadInfo
[i
][threadIdIndex
] = threadIdCt
++;
3705 // Apparently the thread id field was specified for some entries and not
3706 // others. Start the thread id counter off at the next higher thread id.
3707 else if (threadIdCt
<= threadInfo
[i
][threadIdIndex
]) {
3708 threadIdCt
= threadInfo
[i
][threadIdIndex
] + 1;
3711 if (threadInfo
[i
][index
] != lastId
[index
]) {
3712 // Run through all indices which are less significant, and reset the
3713 // counts to 1. At all levels up to and including index, we need to
3714 // increment the totals and record the last id.
3716 for (index2
= threadIdIndex
; index2
< index
; index2
++) {
3718 if (counts
[index2
] > maxCt
[index2
]) {
3719 maxCt
[index2
] = counts
[index2
];
3722 lastId
[index2
] = threadInfo
[i
][index2
];
3726 lastId
[index
] = threadInfo
[i
][index
];
3728 if (assign_thread_ids
&& (index
> threadIdIndex
)) {
3730 #if KMP_MIC && REDUCE_TEAM_SIZE
3731 // The default team size is the total #threads in the machine
3732 // minus 1 thread for every core that has 3 or more threads.
3733 teamSize
+= (threadIdCt
<= 2) ? (threadIdCt
) : (threadIdCt
- 1);
3734 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3736 // Restart the thread counter, as we are on a new core.
3739 // Auto-assign the thread id field if it wasn't specified.
3740 if (threadInfo
[i
][threadIdIndex
] == UINT_MAX
) {
3741 threadInfo
[i
][threadIdIndex
] = threadIdCt
++;
3744 // Apparently the thread id field was specified for some entries and
3745 // not others. Start the thread id counter off at the next higher
3747 else if (threadIdCt
<= threadInfo
[i
][threadIdIndex
]) {
3748 threadIdCt
= threadInfo
[i
][threadIdIndex
] + 1;
3754 if (index
< threadIdIndex
) {
3755 // If thread ids were specified, it is an error if they are not unique.
3756 // Also, check that we waven't already restarted the loop (to be safe -
3757 // shouldn't need to).
3758 if ((threadInfo
[i
][threadIdIndex
] != UINT_MAX
) || assign_thread_ids
) {
3763 CLEANUP_THREAD_INFO
;
3764 *msg_id
= kmp_i18n_str_PhysicalIDsNotUnique
;
3768 // If the thread ids were not specified and we see entries that
3769 // are duplicates, start the loop over and assign the thread ids manually.
3770 assign_thread_ids
= true;
3771 goto restart_radix_check
;
3775 #if KMP_MIC && REDUCE_TEAM_SIZE
3776 // The default team size is the total #threads in the machine
3777 // minus 1 thread for every core that has 3 or more threads.
3778 teamSize
+= (threadIdCt
<= 2) ? (threadIdCt
) : (threadIdCt
- 1);
3779 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3781 for (index
= threadIdIndex
; index
<= maxIndex
; index
++) {
3782 if (counts
[index
] > maxCt
[index
]) {
3783 maxCt
[index
] = counts
[index
];
3787 __kmp_nThreadsPerCore
= maxCt
[threadIdIndex
];
3788 nCoresPerPkg
= maxCt
[coreIdIndex
];
3789 nPackages
= totals
[pkgIdIndex
];
3791 // When affinity is off, this routine will still be called to set
3792 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
3793 // Make sure all these vars are set correctly, and return now if affinity is
3795 __kmp_ncores
= totals
[coreIdIndex
];
3796 if (!KMP_AFFINITY_CAPABLE()) {
3797 KMP_ASSERT(__kmp_affinity
.type
== affinity_none
);
3801 #if KMP_MIC && REDUCE_TEAM_SIZE
3802 // Set the default team size.
3803 if ((__kmp_dflt_team_nth
== 0) && (teamSize
> 0)) {
3804 __kmp_dflt_team_nth
= teamSize
;
3805 KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
3806 "__kmp_dflt_team_nth = %d\n",
3807 __kmp_dflt_team_nth
));
3809 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3811 KMP_DEBUG_ASSERT(num_avail
== (unsigned)__kmp_avail_proc
);
3813 // Count the number of levels which have more nodes at that level than at the
3814 // parent's level (with there being an implicit root node of the top level).
3815 // This is equivalent to saying that there is at least one node at this level
3816 // which has a sibling. These levels are in the map, and the package level is
3817 // always in the map.
3818 bool *inMap
= (bool *)__kmp_allocate((maxIndex
+ 1) * sizeof(bool));
3819 for (index
= threadIdIndex
; index
< maxIndex
; index
++) {
3820 KMP_ASSERT(totals
[index
] >= totals
[index
+ 1]);
3821 inMap
[index
] = (totals
[index
] > totals
[index
+ 1]);
3823 inMap
[maxIndex
] = (totals
[maxIndex
] > 1);
3824 inMap
[pkgIdIndex
] = true;
3825 inMap
[coreIdIndex
] = true;
3826 inMap
[threadIdIndex
] = true;
3830 kmp_hw_t types
[KMP_HW_LAST
];
3833 int threadLevel
= -1;
3834 for (index
= threadIdIndex
; index
<= maxIndex
; index
++) {
3839 if (inMap
[pkgIdIndex
]) {
3841 types
[idx
++] = KMP_HW_SOCKET
;
3843 if (inMap
[coreIdIndex
]) {
3845 types
[idx
++] = KMP_HW_CORE
;
3847 if (inMap
[threadIdIndex
]) {
3849 types
[idx
++] = KMP_HW_THREAD
;
3851 KMP_ASSERT(depth
> 0);
3853 // Construct the data structure that is to be returned.
3854 __kmp_topology
= kmp_topology_t::allocate(num_avail
, depth
, types
);
3856 for (i
= 0; i
< num_avail
; ++i
) {
3857 unsigned os
= threadInfo
[i
][osIdIndex
];
3859 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
3861 hw_thread
.os_id
= os
;
3862 hw_thread
.original_idx
= i
;
3865 for (src_index
= maxIndex
; src_index
>= threadIdIndex
; src_index
--) {
3866 if (!inMap
[src_index
]) {
3869 if (src_index
== pkgIdIndex
) {
3870 hw_thread
.ids
[pkgLevel
] = threadInfo
[i
][src_index
];
3871 } else if (src_index
== coreIdIndex
) {
3872 hw_thread
.ids
[coreLevel
] = threadInfo
[i
][src_index
];
3873 } else if (src_index
== threadIdIndex
) {
3874 hw_thread
.ids
[threadLevel
] = threadInfo
[i
][src_index
];
3884 CLEANUP_THREAD_INFO
;
3885 __kmp_topology
->sort_ids();
3887 int tlevel
= __kmp_topology
->get_level(KMP_HW_THREAD
);
3889 // If the thread level does not have ids, then put them in.
3890 if (__kmp_topology
->at(0).ids
[tlevel
] == kmp_hw_thread_t::UNKNOWN_ID
) {
3891 __kmp_topology
->at(0).ids
[tlevel
] = 0;
3893 for (int i
= 1; i
< __kmp_topology
->get_num_hw_threads(); ++i
) {
3894 kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
3895 if (hw_thread
.ids
[tlevel
] != kmp_hw_thread_t::UNKNOWN_ID
)
3897 kmp_hw_thread_t
&prev_hw_thread
= __kmp_topology
->at(i
- 1);
3898 // Check if socket, core, anything above thread level changed.
3899 // If the ids did change, then restart thread id at 0
3900 // Otherwise, set thread id to prev thread's id + 1
3901 for (int j
= 0; j
< tlevel
; ++j
) {
3902 if (hw_thread
.ids
[j
] != prev_hw_thread
.ids
[j
]) {
3903 hw_thread
.ids
[tlevel
] = 0;
3907 if (hw_thread
.ids
[tlevel
] == kmp_hw_thread_t::UNKNOWN_ID
)
3908 hw_thread
.ids
[tlevel
] = prev_hw_thread
.ids
[tlevel
] + 1;
3912 if (!__kmp_topology
->check_ids()) {
3913 kmp_topology_t::deallocate(__kmp_topology
);
3914 __kmp_topology
= nullptr;
3915 *msg_id
= kmp_i18n_str_PhysicalIDsNotUnique
;
3921 // Create and return a table of affinity masks, indexed by OS thread ID.
3922 // This routine handles OR'ing together all the affinity masks of threads
3923 // that are sufficiently close, if granularity > fine.
3924 template <typename FindNextFunctionType
>
3925 static void __kmp_create_os_id_masks(unsigned *numUnique
,
3926 kmp_affinity_t
&affinity
,
3927 FindNextFunctionType find_next
) {
3928 // First form a table of affinity masks in order of OS thread id.
3931 int numAddrs
= __kmp_topology
->get_num_hw_threads();
3932 int depth
= __kmp_topology
->get_depth();
3933 const char *env_var
= __kmp_get_affinity_env_var(affinity
);
3934 KMP_ASSERT(numAddrs
);
3938 // If could not find HW thread location that satisfies find_next conditions,
3939 // then return and fallback to increment find_next.
3944 for (i
= numAddrs
- 1;; --i
) {
3945 int osId
= __kmp_topology
->at(i
).os_id
;
3946 if (osId
> maxOsId
) {
3952 affinity
.num_os_id_masks
= maxOsId
+ 1;
3953 KMP_CPU_ALLOC_ARRAY(affinity
.os_id_masks
, affinity
.num_os_id_masks
);
3954 KMP_ASSERT(affinity
.gran_levels
>= 0);
3955 if (affinity
.flags
.verbose
&& (affinity
.gran_levels
> 0)) {
3956 KMP_INFORM(ThreadsMigrate
, env_var
, affinity
.gran_levels
);
3958 if (affinity
.gran_levels
>= (int)depth
) {
3959 KMP_AFF_WARNING(affinity
, AffThreadsMayMigrate
);
3962 // Run through the table, forming the masks for all threads on each core.
3963 // Threads on the same core will have identical kmp_hw_thread_t objects, not
3964 // considering the last level, which must be the thread id. All threads on a
3965 // core will appear consecutively.
3967 int j
= 0; // index of 1st thread on core
3969 kmp_affin_mask_t
*sum
;
3970 KMP_CPU_ALLOC_ON_STACK(sum
);
3973 i
= j
= leader
= find_next(-1);
3974 KMP_CPU_SET(__kmp_topology
->at(i
).os_id
, sum
);
3975 kmp_full_mask_modifier_t full_mask
;
3976 for (i
= find_next(i
); i
< numAddrs
; i
= find_next(i
)) {
3977 // If this thread is sufficiently close to the leader (within the
3978 // granularity setting), then set the bit for this os thread in the
3979 // affinity mask for this group, and go on to the next thread.
3980 if (__kmp_topology
->is_close(leader
, i
, affinity
)) {
3981 KMP_CPU_SET(__kmp_topology
->at(i
).os_id
, sum
);
3985 // For every thread in this group, copy the mask to the thread's entry in
3986 // the OS Id mask table. Mark the first address as a leader.
3987 for (; j
< i
; j
= find_next(j
)) {
3988 int osId
= __kmp_topology
->at(j
).os_id
;
3989 KMP_DEBUG_ASSERT(osId
<= maxOsId
);
3990 kmp_affin_mask_t
*mask
= KMP_CPU_INDEX(affinity
.os_id_masks
, osId
);
3991 KMP_CPU_COPY(mask
, sum
);
3992 __kmp_topology
->at(j
).leader
= (j
== leader
);
3996 // Start a new mask.
3998 full_mask
.include(sum
);
4000 KMP_CPU_SET(__kmp_topology
->at(i
).os_id
, sum
);
4003 // For every thread in last group, copy the mask to the thread's
4004 // entry in the OS Id mask table.
4005 for (; j
< i
; j
= find_next(j
)) {
4006 int osId
= __kmp_topology
->at(j
).os_id
;
4007 KMP_DEBUG_ASSERT(osId
<= maxOsId
);
4008 kmp_affin_mask_t
*mask
= KMP_CPU_INDEX(affinity
.os_id_masks
, osId
);
4009 KMP_CPU_COPY(mask
, sum
);
4010 __kmp_topology
->at(j
).leader
= (j
== leader
);
4012 full_mask
.include(sum
);
4014 KMP_CPU_FREE_FROM_STACK(sum
);
4016 // See if the OS Id mask table further restricts or changes the full mask
4017 if (full_mask
.restrict_to_mask() && affinity
.flags
.verbose
) {
4018 __kmp_topology
->print(env_var
);
4021 *numUnique
= unique
;
4024 // Stuff for the affinity proclist parsers. It's easier to declare these vars
4025 // as file-static than to try and pass them through the calling sequence of
4026 // the recursive-descent OMP_PLACES parser.
4027 static kmp_affin_mask_t
*newMasks
;
4028 static int numNewMasks
;
4029 static int nextNewMask
;
4031 #define ADD_MASK(_mask) \
4033 if (nextNewMask >= numNewMasks) { \
4036 kmp_affin_mask_t *temp; \
4037 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
4038 for (i = 0; i < numNewMasks / 2; i++) { \
4039 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
4040 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
4041 KMP_CPU_COPY(dest, src); \
4043 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
4046 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
4050 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
4052 if (((_osId) > _maxOsId) || \
4053 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
4054 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
4056 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
4060 // Re-parse the proclist (for the explicit affinity type), and form the list
4061 // of affinity newMasks indexed by gtid.
4062 static void __kmp_affinity_process_proclist(kmp_affinity_t
&affinity
) {
4064 kmp_affin_mask_t
**out_masks
= &affinity
.masks
;
4065 unsigned *out_numMasks
= &affinity
.num_masks
;
4066 const char *proclist
= affinity
.proclist
;
4067 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
4068 int maxOsId
= affinity
.num_os_id_masks
- 1;
4069 const char *scan
= proclist
;
4070 const char *next
= proclist
;
4072 // We use malloc() for the temporary mask vector, so that we can use
4073 // realloc() to extend it.
4075 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks
, numNewMasks
);
4077 kmp_affin_mask_t
*sumMask
;
4078 KMP_CPU_ALLOC(sumMask
);
4082 int start
, end
, stride
;
4086 if (*next
== '\0') {
4097 // Read the first integer in the set.
4098 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad proclist");
4100 num
= __kmp_str_to_int(scan
, *next
);
4101 KMP_ASSERT2(num
>= 0, "bad explicit proc list");
4103 // Copy the mask for that osId to the sum (union) mask.
4104 if ((num
> maxOsId
) ||
4105 (!KMP_CPU_ISSET(num
, KMP_CPU_INDEX(osId2Mask
, num
)))) {
4106 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, num
);
4107 KMP_CPU_ZERO(sumMask
);
4109 KMP_CPU_COPY(sumMask
, KMP_CPU_INDEX(osId2Mask
, num
));
4114 // Check for end of set.
4121 // Skip optional comma.
4127 // Read the next integer in the set.
4129 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
4132 num
= __kmp_str_to_int(scan
, *next
);
4133 KMP_ASSERT2(num
>= 0, "bad explicit proc list");
4135 // Add the mask for that osId to the sum mask.
4136 if ((num
> maxOsId
) ||
4137 (!KMP_CPU_ISSET(num
, KMP_CPU_INDEX(osId2Mask
, num
)))) {
4138 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, num
);
4140 KMP_CPU_UNION(sumMask
, KMP_CPU_INDEX(osId2Mask
, num
));
4156 // Read the first integer.
4157 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
4159 start
= __kmp_str_to_int(scan
, *next
);
4160 KMP_ASSERT2(start
>= 0, "bad explicit proc list");
4163 // If this isn't a range, then add a mask to the list and go on.
4165 ADD_MASK_OSID(start
, osId2Mask
, maxOsId
);
4167 // Skip optional comma.
4175 // This is a range. Skip over the '-' and read in the 2nd int.
4179 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
4181 end
= __kmp_str_to_int(scan
, *next
);
4182 KMP_ASSERT2(end
>= 0, "bad explicit proc list");
4184 // Check for a stride parameter
4188 // A stride is specified. Skip over the ':" and read the 3rd int.
4199 KMP_ASSERT2((*next
>= '0') && (*next
<= '9'), "bad explicit proc list");
4201 stride
= __kmp_str_to_int(scan
, *next
);
4202 KMP_ASSERT2(stride
>= 0, "bad explicit proc list");
4206 // Do some range checks.
4207 KMP_ASSERT2(stride
!= 0, "bad explicit proc list");
4209 KMP_ASSERT2(start
<= end
, "bad explicit proc list");
4211 KMP_ASSERT2(start
>= end
, "bad explicit proc list");
4213 KMP_ASSERT2((end
- start
) / stride
<= 65536, "bad explicit proc list");
4215 // Add the mask for each OS proc # to the list.
4218 ADD_MASK_OSID(start
, osId2Mask
, maxOsId
);
4220 } while (start
<= end
);
4223 ADD_MASK_OSID(start
, osId2Mask
, maxOsId
);
4225 } while (start
>= end
);
4228 // Skip optional comma.
4236 *out_numMasks
= nextNewMask
;
4237 if (nextNewMask
== 0) {
4239 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
4242 KMP_CPU_ALLOC_ARRAY((*out_masks
), nextNewMask
);
4243 for (i
= 0; i
< nextNewMask
; i
++) {
4244 kmp_affin_mask_t
*src
= KMP_CPU_INDEX(newMasks
, i
);
4245 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX((*out_masks
), i
);
4246 KMP_CPU_COPY(dest
, src
);
4248 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
4249 KMP_CPU_FREE(sumMask
);
4252 /*-----------------------------------------------------------------------------
4253 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
4254 places. Again, Here is the grammar:
4257 place_list := place , place_list
4259 place := place : num
4260 place := place : num : signed
4261 place := { subplacelist }
4262 place := ! place // (lowest priority)
4263 subplace_list := subplace
4264 subplace_list := subplace , subplace_list
4266 subplace := num : num
4267 subplace := num : num : signed
4271 -----------------------------------------------------------------------------*/
4272 static void __kmp_process_subplace_list(const char **scan
,
4273 kmp_affinity_t
&affinity
, int maxOsId
,
4274 kmp_affin_mask_t
*tempMask
,
4277 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
4280 int start
, count
, stride
, i
;
4282 // Read in the starting proc id
4284 KMP_ASSERT2((**scan
>= '0') && (**scan
<= '9'), "bad explicit places list");
4287 start
= __kmp_str_to_int(*scan
, *next
);
4288 KMP_ASSERT(start
>= 0);
4291 // valid follow sets are ',' ':' and '}'
4293 if (**scan
== '}' || **scan
== ',') {
4294 if ((start
> maxOsId
) ||
4295 (!KMP_CPU_ISSET(start
, KMP_CPU_INDEX(osId2Mask
, start
)))) {
4296 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, start
);
4298 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, start
));
4301 if (**scan
== '}') {
4304 (*scan
)++; // skip ','
4307 KMP_ASSERT2(**scan
== ':', "bad explicit places list");
4308 (*scan
)++; // skip ':'
4310 // Read count parameter
4312 KMP_ASSERT2((**scan
>= '0') && (**scan
<= '9'), "bad explicit places list");
4315 count
= __kmp_str_to_int(*scan
, *next
);
4316 KMP_ASSERT(count
>= 0);
4319 // valid follow sets are ',' ':' and '}'
4321 if (**scan
== '}' || **scan
== ',') {
4322 for (i
= 0; i
< count
; i
++) {
4323 if ((start
> maxOsId
) ||
4324 (!KMP_CPU_ISSET(start
, KMP_CPU_INDEX(osId2Mask
, start
)))) {
4325 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, start
);
4326 break; // don't proliferate warnings for large count
4328 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, start
));
4333 if (**scan
== '}') {
4336 (*scan
)++; // skip ','
4339 KMP_ASSERT2(**scan
== ':', "bad explicit places list");
4340 (*scan
)++; // skip ':'
4342 // Read stride parameter
4346 if (**scan
== '+') {
4347 (*scan
)++; // skip '+'
4350 if (**scan
== '-') {
4352 (*scan
)++; // skip '-'
4358 KMP_ASSERT2((**scan
>= '0') && (**scan
<= '9'), "bad explicit places list");
4361 stride
= __kmp_str_to_int(*scan
, *next
);
4362 KMP_ASSERT(stride
>= 0);
4366 // valid follow sets are ',' and '}'
4368 if (**scan
== '}' || **scan
== ',') {
4369 for (i
= 0; i
< count
; i
++) {
4370 if ((start
> maxOsId
) ||
4371 (!KMP_CPU_ISSET(start
, KMP_CPU_INDEX(osId2Mask
, start
)))) {
4372 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, start
);
4373 break; // don't proliferate warnings for large count
4375 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, start
));
4380 if (**scan
== '}') {
4383 (*scan
)++; // skip ','
4387 KMP_ASSERT2(0, "bad explicit places list");
4391 static void __kmp_process_place(const char **scan
, kmp_affinity_t
&affinity
,
4392 int maxOsId
, kmp_affin_mask_t
*tempMask
,
4395 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
4397 // valid follow sets are '{' '!' and num
4399 if (**scan
== '{') {
4400 (*scan
)++; // skip '{'
4401 __kmp_process_subplace_list(scan
, affinity
, maxOsId
, tempMask
, setSize
);
4402 KMP_ASSERT2(**scan
== '}', "bad explicit places list");
4403 (*scan
)++; // skip '}'
4404 } else if (**scan
== '!') {
4405 (*scan
)++; // skip '!'
4406 __kmp_process_place(scan
, affinity
, maxOsId
, tempMask
, setSize
);
4407 KMP_CPU_COMPLEMENT(maxOsId
, tempMask
);
4408 } else if ((**scan
>= '0') && (**scan
<= '9')) {
4411 int num
= __kmp_str_to_int(*scan
, *next
);
4412 KMP_ASSERT(num
>= 0);
4413 if ((num
> maxOsId
) ||
4414 (!KMP_CPU_ISSET(num
, KMP_CPU_INDEX(osId2Mask
, num
)))) {
4415 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, num
);
4417 KMP_CPU_UNION(tempMask
, KMP_CPU_INDEX(osId2Mask
, num
));
4420 *scan
= next
; // skip num
4422 KMP_ASSERT2(0, "bad explicit places list");
4427 void __kmp_affinity_process_placelist(kmp_affinity_t
&affinity
) {
4428 int i
, j
, count
, stride
, sign
;
4429 kmp_affin_mask_t
**out_masks
= &affinity
.masks
;
4430 unsigned *out_numMasks
= &affinity
.num_masks
;
4431 const char *placelist
= affinity
.proclist
;
4432 kmp_affin_mask_t
*osId2Mask
= affinity
.os_id_masks
;
4433 int maxOsId
= affinity
.num_os_id_masks
- 1;
4434 const char *scan
= placelist
;
4435 const char *next
= placelist
;
4438 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks
, numNewMasks
);
4441 // tempMask is modified based on the previous or initial
4442 // place to form the current place
4443 // previousMask contains the previous place
4444 kmp_affin_mask_t
*tempMask
;
4445 kmp_affin_mask_t
*previousMask
;
4446 KMP_CPU_ALLOC(tempMask
);
4447 KMP_CPU_ZERO(tempMask
);
4448 KMP_CPU_ALLOC(previousMask
);
4449 KMP_CPU_ZERO(previousMask
);
4453 __kmp_process_place(&scan
, affinity
, maxOsId
, tempMask
, &setSize
);
4455 // valid follow sets are ',' ':' and EOL
4457 if (*scan
== '\0' || *scan
== ',') {
4461 KMP_CPU_ZERO(tempMask
);
4463 if (*scan
== '\0') {
4470 KMP_ASSERT2(*scan
== ':', "bad explicit places list");
4473 // Read count parameter
4475 KMP_ASSERT2((*scan
>= '0') && (*scan
<= '9'), "bad explicit places list");
4478 count
= __kmp_str_to_int(scan
, *next
);
4479 KMP_ASSERT(count
>= 0);
4482 // valid follow sets are ',' ':' and EOL
4484 if (*scan
== '\0' || *scan
== ',') {
4487 KMP_ASSERT2(*scan
== ':', "bad explicit places list");
4490 // Read stride parameter
4506 KMP_ASSERT2((*scan
>= '0') && (*scan
<= '9'), "bad explicit places list");
4509 stride
= __kmp_str_to_int(scan
, *next
);
4510 KMP_DEBUG_ASSERT(stride
>= 0);
4515 // Add places determined by initial_place : count : stride
4516 for (i
= 0; i
< count
; i
++) {
4520 // Add the current place, then build the next place (tempMask) from that
4521 KMP_CPU_COPY(previousMask
, tempMask
);
4522 ADD_MASK(previousMask
);
4523 KMP_CPU_ZERO(tempMask
);
4525 KMP_CPU_SET_ITERATE(j
, previousMask
) {
4526 if (!KMP_CPU_ISSET(j
, previousMask
)) {
4529 if ((j
+ stride
> maxOsId
) || (j
+ stride
< 0) ||
4530 (!KMP_CPU_ISSET(j
, __kmp_affin_fullMask
)) ||
4531 (!KMP_CPU_ISSET(j
+ stride
,
4532 KMP_CPU_INDEX(osId2Mask
, j
+ stride
)))) {
4533 if (i
< count
- 1) {
4534 KMP_AFF_WARNING(affinity
, AffIgnoreInvalidProcID
, j
+ stride
);
4538 KMP_CPU_SET(j
+ stride
, tempMask
);
4542 KMP_CPU_ZERO(tempMask
);
4545 // valid follow sets are ',' and EOL
4547 if (*scan
== '\0') {
4555 KMP_ASSERT2(0, "bad explicit places list");
4558 *out_numMasks
= nextNewMask
;
4559 if (nextNewMask
== 0) {
4561 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
4564 KMP_CPU_ALLOC_ARRAY((*out_masks
), nextNewMask
);
4565 KMP_CPU_FREE(tempMask
);
4566 KMP_CPU_FREE(previousMask
);
4567 for (i
= 0; i
< nextNewMask
; i
++) {
4568 kmp_affin_mask_t
*src
= KMP_CPU_INDEX(newMasks
, i
);
4569 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX((*out_masks
), i
);
4570 KMP_CPU_COPY(dest
, src
);
4572 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks
, numNewMasks
);
4576 #undef ADD_MASK_OSID
4578 // This function figures out the deepest level at which there is at least one
4579 // cluster/core with more than one processing unit bound to it.
4580 static int __kmp_affinity_find_core_level(int nprocs
, int bottom_level
) {
4583 for (int i
= 0; i
< nprocs
; i
++) {
4584 const kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(i
);
4585 for (int j
= bottom_level
; j
> 0; j
--) {
4586 if (hw_thread
.ids
[j
] > 0) {
4587 if (core_level
< (j
- 1)) {
4596 // This function counts number of clusters/cores at given level.
4597 static int __kmp_affinity_compute_ncores(int nprocs
, int bottom_level
,
4599 return __kmp_topology
->get_count(core_level
);
4601 // This function finds to which cluster/core given processing unit is bound.
4602 static int __kmp_affinity_find_core(int proc
, int bottom_level
,
4605 KMP_DEBUG_ASSERT(proc
>= 0 && proc
< __kmp_topology
->get_num_hw_threads());
4606 for (int i
= 0; i
<= proc
; ++i
) {
4607 if (i
+ 1 <= proc
) {
4608 for (int j
= 0; j
<= core_level
; ++j
) {
4609 if (__kmp_topology
->at(i
+ 1).sub_ids
[j
] !=
4610 __kmp_topology
->at(i
).sub_ids
[j
]) {
4620 // This function finds maximal number of processing units bound to a
4621 // cluster/core at given level.
4622 static int __kmp_affinity_max_proc_per_core(int nprocs
, int bottom_level
,
4624 if (core_level
>= bottom_level
)
4626 int thread_level
= __kmp_topology
->get_level(KMP_HW_THREAD
);
4627 return __kmp_topology
->calculate_ratio(thread_level
, core_level
);
4630 static int *procarr
= NULL
;
4631 static int __kmp_aff_depth
= 0;
4632 static int *__kmp_osid_to_hwthread_map
= NULL
;
4634 static void __kmp_affinity_get_mask_topology_info(const kmp_affin_mask_t
*mask
,
4635 kmp_affinity_ids_t
&ids
,
4636 kmp_affinity_attrs_t
&attrs
) {
4637 if (!KMP_AFFINITY_CAPABLE())
4640 // Initiailze ids and attrs thread data
4641 for (int i
= 0; i
< KMP_HW_LAST
; ++i
)
4642 ids
.ids
[i
] = kmp_hw_thread_t::UNKNOWN_ID
;
4643 attrs
= KMP_AFFINITY_ATTRS_UNKNOWN
;
4645 // Iterate through each os id within the mask and determine
4646 // the topology id and attribute information
4648 int depth
= __kmp_topology
->get_depth();
4649 KMP_CPU_SET_ITERATE(cpu
, mask
) {
4650 int osid_idx
= __kmp_osid_to_hwthread_map
[cpu
];
4652 const kmp_hw_thread_t
&hw_thread
= __kmp_topology
->at(osid_idx
);
4653 for (int level
= 0; level
< depth
; ++level
) {
4654 kmp_hw_t type
= __kmp_topology
->get_type(level
);
4655 int id
= hw_thread
.sub_ids
[level
];
4656 if (ids
.ids
[type
] == kmp_hw_thread_t::UNKNOWN_ID
|| ids
.ids
[type
] == id
) {
4659 // This mask spans across multiple topology units, set it as such
4660 // and mark every level below as such as well.
4661 ids
.ids
[type
] = kmp_hw_thread_t::MULTIPLE_ID
;
4662 for (; level
< depth
; ++level
) {
4663 kmp_hw_t type
= __kmp_topology
->get_type(level
);
4664 ids
.ids
[type
] = kmp_hw_thread_t::MULTIPLE_ID
;
4669 attrs
.core_type
= hw_thread
.attrs
.get_core_type();
4670 attrs
.core_eff
= hw_thread
.attrs
.get_core_eff();
4673 // This mask spans across multiple attributes, set it as such
4674 if (attrs
.core_type
!= hw_thread
.attrs
.get_core_type())
4675 attrs
.core_type
= KMP_HW_CORE_TYPE_UNKNOWN
;
4676 if (attrs
.core_eff
!= hw_thread
.attrs
.get_core_eff())
4677 attrs
.core_eff
= kmp_hw_attr_t::UNKNOWN_CORE_EFF
;
4682 static void __kmp_affinity_get_thread_topology_info(kmp_info_t
*th
) {
4683 if (!KMP_AFFINITY_CAPABLE())
4685 const kmp_affin_mask_t
*mask
= th
->th
.th_affin_mask
;
4686 kmp_affinity_ids_t
&ids
= th
->th
.th_topology_ids
;
4687 kmp_affinity_attrs_t
&attrs
= th
->th
.th_topology_attrs
;
4688 __kmp_affinity_get_mask_topology_info(mask
, ids
, attrs
);
4691 // Assign the topology information to each place in the place list
4692 // A thread can then grab not only its affinity mask, but the topology
4693 // information associated with that mask. e.g., Which socket is a thread on
4694 static void __kmp_affinity_get_topology_info(kmp_affinity_t
&affinity
) {
4695 if (!KMP_AFFINITY_CAPABLE())
4697 if (affinity
.type
!= affinity_none
) {
4698 KMP_ASSERT(affinity
.num_os_id_masks
);
4699 KMP_ASSERT(affinity
.os_id_masks
);
4701 KMP_ASSERT(affinity
.num_masks
);
4702 KMP_ASSERT(affinity
.masks
);
4703 KMP_ASSERT(__kmp_affin_fullMask
);
4705 int max_cpu
= __kmp_affin_fullMask
->get_max_cpu();
4706 int num_hw_threads
= __kmp_topology
->get_num_hw_threads();
4708 // Allocate thread topology information
4709 if (!affinity
.ids
) {
4710 affinity
.ids
= (kmp_affinity_ids_t
*)__kmp_allocate(
4711 sizeof(kmp_affinity_ids_t
) * affinity
.num_masks
);
4713 if (!affinity
.attrs
) {
4714 affinity
.attrs
= (kmp_affinity_attrs_t
*)__kmp_allocate(
4715 sizeof(kmp_affinity_attrs_t
) * affinity
.num_masks
);
4717 if (!__kmp_osid_to_hwthread_map
) {
4718 // Want the +1 because max_cpu should be valid index into map
4719 __kmp_osid_to_hwthread_map
=
4720 (int *)__kmp_allocate(sizeof(int) * (max_cpu
+ 1));
4723 // Create the OS proc to hardware thread map
4724 for (int hw_thread
= 0; hw_thread
< num_hw_threads
; ++hw_thread
) {
4725 int os_id
= __kmp_topology
->at(hw_thread
).os_id
;
4726 if (KMP_CPU_ISSET(os_id
, __kmp_affin_fullMask
))
4727 __kmp_osid_to_hwthread_map
[os_id
] = hw_thread
;
4730 for (unsigned i
= 0; i
< affinity
.num_masks
; ++i
) {
4731 kmp_affinity_ids_t
&ids
= affinity
.ids
[i
];
4732 kmp_affinity_attrs_t
&attrs
= affinity
.attrs
[i
];
4733 kmp_affin_mask_t
*mask
= KMP_CPU_INDEX(affinity
.masks
, i
);
4734 __kmp_affinity_get_mask_topology_info(mask
, ids
, attrs
);
4738 // Called when __kmp_topology is ready
4739 static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t
&affinity
) {
4740 // Initialize other data structures which depend on the topology
4741 if (__kmp_topology
&& __kmp_topology
->get_num_hw_threads()) {
4742 machine_hierarchy
.init(__kmp_topology
->get_num_hw_threads());
4743 __kmp_affinity_get_topology_info(affinity
);
4744 #if KMP_WEIGHTED_ITERATIONS_SUPPORTED
4745 __kmp_first_osid_with_ecore
= __kmp_get_first_osid_with_ecore();
4750 // Create a one element mask array (set of places) which only contains the
4751 // initial process's affinity mask
4752 static void __kmp_create_affinity_none_places(kmp_affinity_t
&affinity
) {
4753 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
4754 KMP_ASSERT(affinity
.type
== affinity_none
);
4755 KMP_ASSERT(__kmp_avail_proc
== __kmp_topology
->get_num_hw_threads());
4756 affinity
.num_masks
= 1;
4757 KMP_CPU_ALLOC_ARRAY(affinity
.masks
, affinity
.num_masks
);
4758 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX(affinity
.masks
, 0);
4759 KMP_CPU_COPY(dest
, __kmp_affin_fullMask
);
4760 __kmp_aux_affinity_initialize_other_data(affinity
);
4763 static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t
&affinity
) {
4764 // Create the "full" mask - this defines all of the processors that we
4765 // consider to be in the machine model. If respect is set, then it is the
4766 // initialization thread's affinity mask. Otherwise, it is all processors that
4767 // we know about on the machine.
4768 int verbose
= affinity
.flags
.verbose
;
4769 const char *env_var
= affinity
.env_var
;
4771 // Already initialized
4772 if (__kmp_affin_fullMask
&& __kmp_affin_origMask
)
4775 if (__kmp_affin_fullMask
== NULL
) {
4776 KMP_CPU_ALLOC(__kmp_affin_fullMask
);
4778 if (__kmp_affin_origMask
== NULL
) {
4779 KMP_CPU_ALLOC(__kmp_affin_origMask
);
4781 if (KMP_AFFINITY_CAPABLE()) {
4782 __kmp_get_system_affinity(__kmp_affin_fullMask
, TRUE
);
4783 // Make a copy before possible expanding to the entire machine mask
4784 __kmp_affin_origMask
->copy(__kmp_affin_fullMask
);
4785 if (affinity
.flags
.respect
) {
4786 // Count the number of available processors.
4788 __kmp_avail_proc
= 0;
4789 KMP_CPU_SET_ITERATE(i
, __kmp_affin_fullMask
) {
4790 if (!KMP_CPU_ISSET(i
, __kmp_affin_fullMask
)) {
4795 if (__kmp_avail_proc
> __kmp_xproc
) {
4796 KMP_AFF_WARNING(affinity
, ErrorInitializeAffinity
);
4797 affinity
.type
= affinity_none
;
4798 KMP_AFFINITY_DISABLE();
4803 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
4804 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
4805 __kmp_affin_fullMask
);
4806 KMP_INFORM(InitOSProcSetRespect
, env_var
, buf
);
4810 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
4811 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
4812 __kmp_affin_fullMask
);
4813 KMP_INFORM(InitOSProcSetNotRespect
, env_var
, buf
);
4816 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask
);
4818 if (__kmp_num_proc_groups
<= 1) {
4819 // Copy expanded full mask if topology has single processor group
4820 __kmp_affin_origMask
->copy(__kmp_affin_fullMask
);
4822 // Set the process affinity mask since threads' affinity
4823 // masks must be subset of process mask in Windows* OS
4824 __kmp_affin_fullMask
->set_process_affinity(true);
4830 static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t
&affinity
) {
4831 bool success
= false;
4832 const char *env_var
= affinity
.env_var
;
4833 kmp_i18n_id_t msg_id
= kmp_i18n_null
;
4834 int verbose
= affinity
.flags
.verbose
;
4836 // For backward compatibility, setting KMP_CPUINFO_FILE =>
4837 // KMP_TOPOLOGY_METHOD=cpuinfo
4838 if ((__kmp_cpuinfo_file
!= NULL
) &&
4839 (__kmp_affinity_top_method
== affinity_top_method_all
)) {
4840 __kmp_affinity_top_method
= affinity_top_method_cpuinfo
;
4843 if (__kmp_affinity_top_method
== affinity_top_method_all
) {
4844 // In the default code path, errors are not fatal - we just try using
4845 // another method. We only emit a warning message if affinity is on, or the
4846 // verbose flag is set, an the nowarnings flag was not set.
4849 __kmp_affinity_dispatch
->get_api_type() == KMPAffinity::HWLOC
) {
4850 if (!__kmp_hwloc_error
) {
4851 success
= __kmp_affinity_create_hwloc_map(&msg_id
);
4852 if (!success
&& verbose
) {
4853 KMP_INFORM(AffIgnoringHwloc
, env_var
);
4855 } else if (verbose
) {
4856 KMP_INFORM(AffIgnoringHwloc
, env_var
);
4861 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4863 success
= __kmp_affinity_create_x2apicid_map(&msg_id
);
4864 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4865 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4869 success
= __kmp_affinity_create_apicid_map(&msg_id
);
4870 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4871 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4874 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4876 #if KMP_OS_LINUX || KMP_OS_AIX
4879 success
= __kmp_affinity_create_cpuinfo_map(&line
, &msg_id
);
4880 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4881 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4884 #endif /* KMP_OS_LINUX */
4886 #if KMP_GROUP_AFFINITY
4887 if (!success
&& (__kmp_num_proc_groups
> 1)) {
4888 success
= __kmp_affinity_create_proc_group_map(&msg_id
);
4889 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4890 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4893 #endif /* KMP_GROUP_AFFINITY */
4896 success
= __kmp_affinity_create_flat_map(&msg_id
);
4897 if (!success
&& verbose
&& msg_id
!= kmp_i18n_null
) {
4898 KMP_INFORM(AffInfoStr
, env_var
, __kmp_i18n_catgets(msg_id
));
4900 KMP_ASSERT(success
);
4904 // If the user has specified that a paricular topology discovery method is to be
4905 // used, then we abort if that method fails. The exception is group affinity,
4906 // which might have been implicitly set.
4908 else if (__kmp_affinity_top_method
== affinity_top_method_hwloc
) {
4909 KMP_ASSERT(__kmp_affinity_dispatch
->get_api_type() == KMPAffinity::HWLOC
);
4910 success
= __kmp_affinity_create_hwloc_map(&msg_id
);
4912 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4913 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4916 #endif // KMP_USE_HWLOC
4918 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4919 else if (__kmp_affinity_top_method
== affinity_top_method_x2apicid
||
4920 __kmp_affinity_top_method
== affinity_top_method_x2apicid_1f
) {
4921 success
= __kmp_affinity_create_x2apicid_map(&msg_id
);
4923 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4924 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4926 } else if (__kmp_affinity_top_method
== affinity_top_method_apicid
) {
4927 success
= __kmp_affinity_create_apicid_map(&msg_id
);
4929 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4930 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4933 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4935 else if (__kmp_affinity_top_method
== affinity_top_method_cpuinfo
) {
4937 success
= __kmp_affinity_create_cpuinfo_map(&line
, &msg_id
);
4939 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4940 const char *filename
= __kmp_cpuinfo_get_filename();
4942 KMP_FATAL(FileLineMsgExiting
, filename
, line
,
4943 __kmp_i18n_catgets(msg_id
));
4945 KMP_FATAL(FileMsgExiting
, filename
, __kmp_i18n_catgets(msg_id
));
4950 #if KMP_GROUP_AFFINITY
4951 else if (__kmp_affinity_top_method
== affinity_top_method_group
) {
4952 success
= __kmp_affinity_create_proc_group_map(&msg_id
);
4953 KMP_ASSERT(success
);
4955 KMP_ASSERT(msg_id
!= kmp_i18n_null
);
4956 KMP_FATAL(MsgExiting
, __kmp_i18n_catgets(msg_id
));
4959 #endif /* KMP_GROUP_AFFINITY */
4961 else if (__kmp_affinity_top_method
== affinity_top_method_flat
) {
4962 success
= __kmp_affinity_create_flat_map(&msg_id
);
4964 KMP_ASSERT(success
);
4967 // Early exit if topology could not be created
4968 if (!__kmp_topology
) {
4969 if (KMP_AFFINITY_CAPABLE()) {
4970 KMP_AFF_WARNING(affinity
, ErrorInitializeAffinity
);
4972 if (nPackages
> 0 && nCoresPerPkg
> 0 && __kmp_nThreadsPerCore
> 0 &&
4974 __kmp_topology
= kmp_topology_t::allocate(0, 0, NULL
);
4975 __kmp_topology
->canonicalize(nPackages
, nCoresPerPkg
,
4976 __kmp_nThreadsPerCore
, __kmp_ncores
);
4978 __kmp_topology
->print(env_var
);
4984 // Canonicalize, print (if requested), apply KMP_HW_SUBSET
4985 __kmp_topology
->canonicalize();
4987 __kmp_topology
->print(env_var
);
4988 bool filtered
= __kmp_topology
->filter_hw_subset();
4989 if (filtered
&& verbose
)
4990 __kmp_topology
->print("KMP_HW_SUBSET");
4994 static void __kmp_aux_affinity_initialize(kmp_affinity_t
&affinity
) {
4995 bool is_regular_affinity
= (&affinity
== &__kmp_affinity
);
4996 bool is_hidden_helper_affinity
= (&affinity
== &__kmp_hh_affinity
);
4997 const char *env_var
= __kmp_get_affinity_env_var(affinity
);
4999 if (affinity
.flags
.initialized
) {
5000 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
5004 if (is_regular_affinity
&& (!__kmp_affin_fullMask
|| !__kmp_affin_origMask
))
5005 __kmp_aux_affinity_initialize_masks(affinity
);
5007 if (is_regular_affinity
&& !__kmp_topology
) {
5008 bool success
= __kmp_aux_affinity_initialize_topology(affinity
);
5010 KMP_ASSERT(__kmp_avail_proc
== __kmp_topology
->get_num_hw_threads());
5012 affinity
.type
= affinity_none
;
5013 KMP_AFFINITY_DISABLE();
5017 // If KMP_AFFINITY=none, then only create the single "none" place
5018 // which is the process's initial affinity mask or the number of
5019 // hardware threads depending on respect,norespect
5020 if (affinity
.type
== affinity_none
) {
5021 __kmp_create_affinity_none_places(affinity
);
5022 #if KMP_USE_HIER_SCHED
5023 __kmp_dispatch_set_hierarchy_values();
5025 affinity
.flags
.initialized
= TRUE
;
5029 __kmp_topology
->set_granularity(affinity
);
5030 int depth
= __kmp_topology
->get_depth();
5032 // Create the table of masks, indexed by thread Id.
5033 unsigned numUnique
= 0;
5034 int numAddrs
= __kmp_topology
->get_num_hw_threads();
5035 // If OMP_PLACES=cores:<attribute> specified, then attempt
5036 // to make OS Id mask table using those attributes
5037 if (affinity
.core_attr_gran
.valid
) {
5038 __kmp_create_os_id_masks(&numUnique
, affinity
, [&](int idx
) {
5039 KMP_ASSERT(idx
>= -1);
5040 for (int i
= idx
+ 1; i
< numAddrs
; ++i
)
5041 if (__kmp_topology
->at(i
).attrs
.contains(affinity
.core_attr_gran
))
5045 if (!affinity
.os_id_masks
) {
5046 const char *core_attribute
;
5047 if (affinity
.core_attr_gran
.core_eff
!= kmp_hw_attr_t::UNKNOWN_CORE_EFF
)
5048 core_attribute
= "core_efficiency";
5050 core_attribute
= "core_type";
5051 KMP_AFF_WARNING(affinity
, AffIgnoringNotAvailable
, env_var
,
5053 __kmp_hw_get_catalog_string(KMP_HW_CORE
, /*plural=*/true))
5056 // If core attributes did not work, or none were specified,
5057 // then make OS Id mask table using typical incremental way with
5058 // checking for validity of each id at granularity level specified.
5059 if (!affinity
.os_id_masks
) {
5060 int gran
= affinity
.gran_levels
;
5061 int gran_level
= depth
- 1 - affinity
.gran_levels
;
5062 if (gran
>= 0 && gran_level
>= 0 && gran_level
< depth
) {
5063 __kmp_create_os_id_masks(
5064 &numUnique
, affinity
, [depth
, numAddrs
, &affinity
](int idx
) {
5065 KMP_ASSERT(idx
>= -1);
5066 int gran
= affinity
.gran_levels
;
5067 int gran_level
= depth
- 1 - affinity
.gran_levels
;
5068 for (int i
= idx
+ 1; i
< numAddrs
; ++i
)
5069 if ((gran
>= depth
) ||
5070 (gran
< depth
&& __kmp_topology
->at(i
).ids
[gran_level
] !=
5071 kmp_hw_thread_t::UNKNOWN_ID
))
5077 // Final attempt to make OS Id mask table using typical incremental way.
5078 if (!affinity
.os_id_masks
) {
5079 __kmp_create_os_id_masks(&numUnique
, affinity
, [](int idx
) {
5080 KMP_ASSERT(idx
>= -1);
5085 switch (affinity
.type
) {
5087 case affinity_explicit
:
5088 KMP_DEBUG_ASSERT(affinity
.proclist
!= NULL
);
5089 if (is_hidden_helper_affinity
||
5090 __kmp_nested_proc_bind
.bind_types
[0] == proc_bind_intel
) {
5091 __kmp_affinity_process_proclist(affinity
);
5093 __kmp_affinity_process_placelist(affinity
);
5095 if (affinity
.num_masks
== 0) {
5096 KMP_AFF_WARNING(affinity
, AffNoValidProcID
);
5097 affinity
.type
= affinity_none
;
5098 __kmp_create_affinity_none_places(affinity
);
5099 affinity
.flags
.initialized
= TRUE
;
5104 // The other affinity types rely on sorting the hardware threads according to
5105 // some permutation of the machine topology tree. Set affinity.compact
5106 // and affinity.offset appropriately, then jump to a common code
5107 // fragment to do the sort and create the array of affinity masks.
5108 case affinity_logical
:
5109 affinity
.compact
= 0;
5110 if (affinity
.offset
) {
5112 __kmp_nThreadsPerCore
* affinity
.offset
% __kmp_avail_proc
;
5116 case affinity_physical
:
5117 if (__kmp_nThreadsPerCore
> 1) {
5118 affinity
.compact
= 1;
5119 if (affinity
.compact
>= depth
) {
5120 affinity
.compact
= 0;
5123 affinity
.compact
= 0;
5125 if (affinity
.offset
) {
5127 __kmp_nThreadsPerCore
* affinity
.offset
% __kmp_avail_proc
;
5131 case affinity_scatter
:
5132 if (affinity
.compact
>= depth
) {
5133 affinity
.compact
= 0;
5135 affinity
.compact
= depth
- 1 - affinity
.compact
;
5139 case affinity_compact
:
5140 if (affinity
.compact
>= depth
) {
5141 affinity
.compact
= depth
- 1;
5145 case affinity_balanced
:
5146 if (depth
<= 1 || is_hidden_helper_affinity
) {
5147 KMP_AFF_WARNING(affinity
, AffBalancedNotAvail
, env_var
);
5148 affinity
.type
= affinity_none
;
5149 __kmp_create_affinity_none_places(affinity
);
5150 affinity
.flags
.initialized
= TRUE
;
5152 } else if (!__kmp_topology
->is_uniform()) {
5153 // Save the depth for further usage
5154 __kmp_aff_depth
= depth
;
5157 __kmp_affinity_find_core_level(__kmp_avail_proc
, depth
- 1);
5158 int ncores
= __kmp_affinity_compute_ncores(__kmp_avail_proc
, depth
- 1,
5160 int maxprocpercore
= __kmp_affinity_max_proc_per_core(
5161 __kmp_avail_proc
, depth
- 1, core_level
);
5163 int nproc
= ncores
* maxprocpercore
;
5164 if ((nproc
< 2) || (nproc
< __kmp_avail_proc
)) {
5165 KMP_AFF_WARNING(affinity
, AffBalancedNotAvail
, env_var
);
5166 affinity
.type
= affinity_none
;
5167 __kmp_create_affinity_none_places(affinity
);
5168 affinity
.flags
.initialized
= TRUE
;
5172 procarr
= (int *)__kmp_allocate(sizeof(int) * nproc
);
5173 for (int i
= 0; i
< nproc
; i
++) {
5179 for (int i
= 0; i
< __kmp_avail_proc
; i
++) {
5180 int proc
= __kmp_topology
->at(i
).os_id
;
5181 int core
= __kmp_affinity_find_core(i
, depth
- 1, core_level
);
5183 if (core
== lastcore
) {
5190 procarr
[core
* maxprocpercore
+ inlastcore
] = proc
;
5193 if (affinity
.compact
>= depth
) {
5194 affinity
.compact
= depth
- 1;
5198 // Allocate the gtid->affinity mask table.
5199 if (affinity
.flags
.dups
) {
5200 affinity
.num_masks
= __kmp_avail_proc
;
5202 affinity
.num_masks
= numUnique
;
5205 if ((__kmp_nested_proc_bind
.bind_types
[0] != proc_bind_intel
) &&
5206 (__kmp_affinity_num_places
> 0) &&
5207 ((unsigned)__kmp_affinity_num_places
< affinity
.num_masks
) &&
5208 !is_hidden_helper_affinity
) {
5209 affinity
.num_masks
= __kmp_affinity_num_places
;
5212 KMP_CPU_ALLOC_ARRAY(affinity
.masks
, affinity
.num_masks
);
5214 // Sort the topology table according to the current setting of
5215 // affinity.compact, then fill out affinity.masks.
5216 __kmp_topology
->sort_compact(affinity
);
5220 int num_hw_threads
= __kmp_topology
->get_num_hw_threads();
5221 kmp_full_mask_modifier_t full_mask
;
5222 for (i
= 0, j
= 0; i
< num_hw_threads
; i
++) {
5223 if ((!affinity
.flags
.dups
) && (!__kmp_topology
->at(i
).leader
)) {
5226 int osId
= __kmp_topology
->at(i
).os_id
;
5228 kmp_affin_mask_t
*src
= KMP_CPU_INDEX(affinity
.os_id_masks
, osId
);
5229 if (KMP_CPU_ISEMPTY(src
))
5231 kmp_affin_mask_t
*dest
= KMP_CPU_INDEX(affinity
.masks
, j
);
5232 KMP_ASSERT(KMP_CPU_ISSET(osId
, src
));
5233 KMP_CPU_COPY(dest
, src
);
5234 full_mask
.include(src
);
5235 if (++j
>= affinity
.num_masks
) {
5239 KMP_DEBUG_ASSERT(j
== affinity
.num_masks
);
5240 // See if the places list further restricts or changes the full mask
5241 if (full_mask
.restrict_to_mask() && affinity
.flags
.verbose
) {
5242 __kmp_topology
->print(env_var
);
5245 // Sort the topology back using ids
5246 __kmp_topology
->sort_ids();
5250 KMP_ASSERT2(0, "Unexpected affinity setting");
5252 __kmp_aux_affinity_initialize_other_data(affinity
);
5253 affinity
.flags
.initialized
= TRUE
;
5256 void __kmp_affinity_initialize(kmp_affinity_t
&affinity
) {
5257 // Much of the code above was written assuming that if a machine was not
5258 // affinity capable, then affinity type == affinity_none.
5259 // We now explicitly represent this as affinity type == affinity_disabled.
5260 // There are too many checks for affinity type == affinity_none in this code.
5261 // Instead of trying to change them all, check if
5262 // affinity type == affinity_disabled, and if so, slam it with affinity_none,
5263 // call the real initialization routine, then restore affinity type to
5264 // affinity_disabled.
5265 int disabled
= (affinity
.type
== affinity_disabled
);
5266 if (!KMP_AFFINITY_CAPABLE())
5267 KMP_ASSERT(disabled
);
5269 affinity
.type
= affinity_none
;
5270 __kmp_aux_affinity_initialize(affinity
);
5272 affinity
.type
= affinity_disabled
;
5275 void __kmp_affinity_uninitialize(void) {
5276 for (kmp_affinity_t
*affinity
: __kmp_affinities
) {
5277 if (affinity
->masks
!= NULL
)
5278 KMP_CPU_FREE_ARRAY(affinity
->masks
, affinity
->num_masks
);
5279 if (affinity
->os_id_masks
!= NULL
)
5280 KMP_CPU_FREE_ARRAY(affinity
->os_id_masks
, affinity
->num_os_id_masks
);
5281 if (affinity
->proclist
!= NULL
)
5282 __kmp_free(affinity
->proclist
);
5283 if (affinity
->ids
!= NULL
)
5284 __kmp_free(affinity
->ids
);
5285 if (affinity
->attrs
!= NULL
)
5286 __kmp_free(affinity
->attrs
);
5287 *affinity
= KMP_AFFINITY_INIT(affinity
->env_var
);
5289 if (__kmp_affin_origMask
!= NULL
) {
5290 if (KMP_AFFINITY_CAPABLE()) {
5292 // Uninitialize by unbinding the thread.
5293 bindprocessor(BINDTHREAD
, thread_self(), PROCESSOR_CLASS_ANY
);
5295 __kmp_set_system_affinity(__kmp_affin_origMask
, FALSE
);
5298 KMP_CPU_FREE(__kmp_affin_origMask
);
5299 __kmp_affin_origMask
= NULL
;
5301 __kmp_affinity_num_places
= 0;
5302 if (procarr
!= NULL
) {
5303 __kmp_free(procarr
);
5306 if (__kmp_osid_to_hwthread_map
) {
5307 __kmp_free(__kmp_osid_to_hwthread_map
);
5308 __kmp_osid_to_hwthread_map
= NULL
;
5311 if (__kmp_hwloc_topology
!= NULL
) {
5312 hwloc_topology_destroy(__kmp_hwloc_topology
);
5313 __kmp_hwloc_topology
= NULL
;
5316 if (__kmp_hw_subset
) {
5317 kmp_hw_subset_t::deallocate(__kmp_hw_subset
);
5318 __kmp_hw_subset
= nullptr;
5320 if (__kmp_topology
) {
5321 kmp_topology_t::deallocate(__kmp_topology
);
5322 __kmp_topology
= nullptr;
5324 KMPAffinity::destroy_api();
5327 static void __kmp_select_mask_by_gtid(int gtid
, const kmp_affinity_t
*affinity
,
5328 int *place
, kmp_affin_mask_t
**mask
) {
5330 bool is_hidden_helper
= KMP_HIDDEN_HELPER_THREAD(gtid
);
5331 if (is_hidden_helper
)
5332 // The first gtid is the regular primary thread, the second gtid is the main
5333 // thread of hidden team which does not participate in task execution.
5334 mask_idx
= gtid
- 2;
5336 mask_idx
= __kmp_adjust_gtid_for_hidden_helpers(gtid
);
5337 KMP_DEBUG_ASSERT(affinity
->num_masks
> 0);
5338 *place
= (mask_idx
+ affinity
->offset
) % affinity
->num_masks
;
5339 *mask
= KMP_CPU_INDEX(affinity
->masks
, *place
);
5342 // This function initializes the per-thread data concerning affinity including
5343 // the mask and topology information
5344 void __kmp_affinity_set_init_mask(int gtid
, int isa_root
) {
5346 kmp_info_t
*th
= (kmp_info_t
*)TCR_SYNC_PTR(__kmp_threads
[gtid
]);
5348 // Set the thread topology information to default of unknown
5349 for (int id
= 0; id
< KMP_HW_LAST
; ++id
)
5350 th
->th
.th_topology_ids
.ids
[id
] = kmp_hw_thread_t::UNKNOWN_ID
;
5351 th
->th
.th_topology_attrs
= KMP_AFFINITY_ATTRS_UNKNOWN
;
5353 if (!KMP_AFFINITY_CAPABLE()) {
5357 if (th
->th
.th_affin_mask
== NULL
) {
5358 KMP_CPU_ALLOC(th
->th
.th_affin_mask
);
5360 KMP_CPU_ZERO(th
->th
.th_affin_mask
);
5363 // Copy the thread mask to the kmp_info_t structure. If
5364 // __kmp_affinity.type == affinity_none, copy the "full" mask, i.e.
5365 // one that has all of the OS proc ids set, or if
5366 // __kmp_affinity.flags.respect is set, then the full mask is the
5367 // same as the mask of the initialization thread.
5368 kmp_affin_mask_t
*mask
;
5370 const kmp_affinity_t
*affinity
;
5371 bool is_hidden_helper
= KMP_HIDDEN_HELPER_THREAD(gtid
);
5373 if (is_hidden_helper
)
5374 affinity
= &__kmp_hh_affinity
;
5376 affinity
= &__kmp_affinity
;
5378 if (KMP_AFFINITY_NON_PROC_BIND
|| is_hidden_helper
) {
5379 if ((affinity
->type
== affinity_none
) ||
5380 (affinity
->type
== affinity_balanced
) ||
5381 KMP_HIDDEN_HELPER_MAIN_THREAD(gtid
)) {
5382 #if KMP_GROUP_AFFINITY
5383 if (__kmp_num_proc_groups
> 1) {
5387 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
5389 mask
= __kmp_affin_fullMask
;
5391 __kmp_select_mask_by_gtid(gtid
, affinity
, &i
, &mask
);
5394 if (!isa_root
|| __kmp_nested_proc_bind
.bind_types
[0] == proc_bind_false
) {
5395 #if KMP_GROUP_AFFINITY
5396 if (__kmp_num_proc_groups
> 1) {
5400 KMP_ASSERT(__kmp_affin_fullMask
!= NULL
);
5402 mask
= __kmp_affin_fullMask
;
5404 __kmp_select_mask_by_gtid(gtid
, affinity
, &i
, &mask
);
5408 th
->th
.th_current_place
= i
;
5409 if (isa_root
&& !is_hidden_helper
) {
5410 th
->th
.th_new_place
= i
;
5411 th
->th
.th_first_place
= 0;
5412 th
->th
.th_last_place
= affinity
->num_masks
- 1;
5413 } else if (KMP_AFFINITY_NON_PROC_BIND
) {
5414 // When using a Non-OMP_PROC_BIND affinity method,
5415 // set all threads' place-partition-var to the entire place list
5416 th
->th
.th_first_place
= 0;
5417 th
->th
.th_last_place
= affinity
->num_masks
- 1;
5419 // Copy topology information associated with the place
5421 th
->th
.th_topology_ids
= __kmp_affinity
.ids
[i
];
5422 th
->th
.th_topology_attrs
= __kmp_affinity
.attrs
[i
];
5425 if (i
== KMP_PLACE_ALL
) {
5426 KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to all places\n",
5429 KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to place %d\n",
5433 KMP_CPU_COPY(th
->th
.th_affin_mask
, mask
);
5436 void __kmp_affinity_bind_init_mask(int gtid
) {
5437 if (!KMP_AFFINITY_CAPABLE()) {
5440 kmp_info_t
*th
= (kmp_info_t
*)TCR_SYNC_PTR(__kmp_threads
[gtid
]);
5441 const kmp_affinity_t
*affinity
;
5442 const char *env_var
;
5443 bool is_hidden_helper
= KMP_HIDDEN_HELPER_THREAD(gtid
);
5445 if (is_hidden_helper
)
5446 affinity
= &__kmp_hh_affinity
;
5448 affinity
= &__kmp_affinity
;
5449 env_var
= __kmp_get_affinity_env_var(*affinity
, /*for_binding=*/true);
5450 /* to avoid duplicate printing (will be correctly printed on barrier) */
5451 if (affinity
->flags
.verbose
&& (affinity
->type
== affinity_none
||
5452 (th
->th
.th_current_place
!= KMP_PLACE_ALL
&&
5453 affinity
->type
!= affinity_balanced
)) &&
5454 !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid
)) {
5455 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5456 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5457 th
->th
.th_affin_mask
);
5458 KMP_INFORM(BoundToOSProcSet
, env_var
, (kmp_int32
)getpid(), __kmp_gettid(),
5463 // On Windows* OS, the process affinity mask might have changed. If the user
5464 // didn't request affinity and this call fails, just continue silently.
5466 if (affinity
->type
== affinity_none
) {
5467 __kmp_set_system_affinity(th
->th
.th_affin_mask
, FALSE
);
5471 // Do not set the full mask as the init mask on AIX.
5472 __kmp_set_system_affinity(th
->th
.th_affin_mask
, TRUE
);
5476 void __kmp_affinity_bind_place(int gtid
) {
5477 // Hidden helper threads should not be affected by OMP_PLACES/OMP_PROC_BIND
5478 if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid
)) {
5482 kmp_info_t
*th
= (kmp_info_t
*)TCR_SYNC_PTR(__kmp_threads
[gtid
]);
5484 KA_TRACE(100, ("__kmp_affinity_bind_place: binding T#%d to place %d (current "
5486 gtid
, th
->th
.th_new_place
, th
->th
.th_current_place
));
5488 // Check that the new place is within this thread's partition.
5489 KMP_DEBUG_ASSERT(th
->th
.th_affin_mask
!= NULL
);
5490 KMP_ASSERT(th
->th
.th_new_place
>= 0);
5491 KMP_ASSERT((unsigned)th
->th
.th_new_place
<= __kmp_affinity
.num_masks
);
5492 if (th
->th
.th_first_place
<= th
->th
.th_last_place
) {
5493 KMP_ASSERT((th
->th
.th_new_place
>= th
->th
.th_first_place
) &&
5494 (th
->th
.th_new_place
<= th
->th
.th_last_place
));
5496 KMP_ASSERT((th
->th
.th_new_place
<= th
->th
.th_first_place
) ||
5497 (th
->th
.th_new_place
>= th
->th
.th_last_place
));
5500 // Copy the thread mask to the kmp_info_t structure,
5501 // and set this thread's affinity.
5502 kmp_affin_mask_t
*mask
=
5503 KMP_CPU_INDEX(__kmp_affinity
.masks
, th
->th
.th_new_place
);
5504 KMP_CPU_COPY(th
->th
.th_affin_mask
, mask
);
5505 th
->th
.th_current_place
= th
->th
.th_new_place
;
5507 if (__kmp_affinity
.flags
.verbose
) {
5508 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5509 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5510 th
->th
.th_affin_mask
);
5511 KMP_INFORM(BoundToOSProcSet
, "OMP_PROC_BIND", (kmp_int32
)getpid(),
5512 __kmp_gettid(), gtid
, buf
);
5514 __kmp_set_system_affinity(th
->th
.th_affin_mask
, TRUE
);
5517 int __kmp_aux_set_affinity(void **mask
) {
5522 if (!KMP_AFFINITY_CAPABLE()) {
5526 gtid
= __kmp_entry_gtid();
5529 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5530 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5531 (kmp_affin_mask_t
*)(*mask
));
5533 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
5537 if (__kmp_env_consistency_check
) {
5538 if ((mask
== NULL
) || (*mask
== NULL
)) {
5539 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5544 KMP_CPU_SET_ITERATE(proc
, ((kmp_affin_mask_t
*)(*mask
))) {
5545 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5546 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5548 if (!KMP_CPU_ISSET(proc
, (kmp_affin_mask_t
*)(*mask
))) {
5553 if (num_procs
== 0) {
5554 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5557 #if KMP_GROUP_AFFINITY
5558 if (__kmp_get_proc_group((kmp_affin_mask_t
*)(*mask
)) < 0) {
5559 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity");
5561 #endif /* KMP_GROUP_AFFINITY */
5565 th
= __kmp_threads
[gtid
];
5566 KMP_DEBUG_ASSERT(th
->th
.th_affin_mask
!= NULL
);
5567 retval
= __kmp_set_system_affinity((kmp_affin_mask_t
*)(*mask
), FALSE
);
5569 KMP_CPU_COPY(th
->th
.th_affin_mask
, (kmp_affin_mask_t
*)(*mask
));
5572 th
->th
.th_current_place
= KMP_PLACE_UNDEFINED
;
5573 th
->th
.th_new_place
= KMP_PLACE_UNDEFINED
;
5574 th
->th
.th_first_place
= 0;
5575 th
->th
.th_last_place
= __kmp_affinity
.num_masks
- 1;
5577 // Turn off 4.0 affinity for the current tread at this parallel level.
5578 th
->th
.th_current_task
->td_icvs
.proc_bind
= proc_bind_false
;
5583 int __kmp_aux_get_affinity(void **mask
) {
5586 #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5589 if (!KMP_AFFINITY_CAPABLE()) {
5593 gtid
= __kmp_entry_gtid();
5594 #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5595 th
= __kmp_threads
[gtid
];
5597 (void)gtid
; // unused variable
5599 KMP_DEBUG_ASSERT(th
->th
.th_affin_mask
!= NULL
);
5603 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5604 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5605 th
->th
.th_affin_mask
);
5607 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid
,
5611 if (__kmp_env_consistency_check
) {
5612 if ((mask
== NULL
) || (*mask
== NULL
)) {
5613 KMP_FATAL(AffinityInvalidMask
, "kmp_get_affinity");
5617 #if !KMP_OS_WINDOWS && !KMP_OS_AIX
5619 retval
= __kmp_get_system_affinity((kmp_affin_mask_t
*)(*mask
), FALSE
);
5622 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5623 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5624 (kmp_affin_mask_t
*)(*mask
));
5626 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid
,
5634 KMP_CPU_COPY((kmp_affin_mask_t
*)(*mask
), th
->th
.th_affin_mask
);
5637 #endif /* !KMP_OS_WINDOWS && !KMP_OS_AIX */
5640 int __kmp_aux_get_affinity_max_proc() {
5641 if (!KMP_AFFINITY_CAPABLE()) {
5644 #if KMP_GROUP_AFFINITY
5645 if (__kmp_num_proc_groups
> 1) {
5646 return (int)(__kmp_num_proc_groups
* sizeof(DWORD_PTR
) * CHAR_BIT
);
5652 int __kmp_aux_set_affinity_mask_proc(int proc
, void **mask
) {
5653 if (!KMP_AFFINITY_CAPABLE()) {
5659 int gtid
= __kmp_entry_gtid();
5660 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5661 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5662 (kmp_affin_mask_t
*)(*mask
));
5663 __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
5664 "affinity mask for thread %d = %s\n",
5668 if (__kmp_env_consistency_check
) {
5669 if ((mask
== NULL
) || (*mask
== NULL
)) {
5670 KMP_FATAL(AffinityInvalidMask
, "kmp_set_affinity_mask_proc");
5674 if ((proc
< 0) || (proc
>= __kmp_aux_get_affinity_max_proc())) {
5677 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5681 KMP_CPU_SET(proc
, (kmp_affin_mask_t
*)(*mask
));
5685 int __kmp_aux_unset_affinity_mask_proc(int proc
, void **mask
) {
5686 if (!KMP_AFFINITY_CAPABLE()) {
5692 int gtid
= __kmp_entry_gtid();
5693 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5694 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5695 (kmp_affin_mask_t
*)(*mask
));
5696 __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
5697 "affinity mask for thread %d = %s\n",
5701 if (__kmp_env_consistency_check
) {
5702 if ((mask
== NULL
) || (*mask
== NULL
)) {
5703 KMP_FATAL(AffinityInvalidMask
, "kmp_unset_affinity_mask_proc");
5707 if ((proc
< 0) || (proc
>= __kmp_aux_get_affinity_max_proc())) {
5710 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5714 KMP_CPU_CLR(proc
, (kmp_affin_mask_t
*)(*mask
));
5718 int __kmp_aux_get_affinity_mask_proc(int proc
, void **mask
) {
5719 if (!KMP_AFFINITY_CAPABLE()) {
5725 int gtid
= __kmp_entry_gtid();
5726 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5727 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
,
5728 (kmp_affin_mask_t
*)(*mask
));
5729 __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
5730 "affinity mask for thread %d = %s\n",
5734 if (__kmp_env_consistency_check
) {
5735 if ((mask
== NULL
) || (*mask
== NULL
)) {
5736 KMP_FATAL(AffinityInvalidMask
, "kmp_get_affinity_mask_proc");
5740 if ((proc
< 0) || (proc
>= __kmp_aux_get_affinity_max_proc())) {
5743 if (!KMP_CPU_ISSET(proc
, __kmp_affin_fullMask
)) {
5747 return KMP_CPU_ISSET(proc
, (kmp_affin_mask_t
*)(*mask
));
5750 #if KMP_WEIGHTED_ITERATIONS_SUPPORTED
5751 // Returns first os proc id with ATOM core
5752 int __kmp_get_first_osid_with_ecore(void) {
5754 int high
= __kmp_topology
->get_num_hw_threads() - 1;
5756 while (high
- low
> 1) {
5757 mid
= (high
+ low
) / 2;
5758 if (__kmp_topology
->at(mid
).attrs
.get_core_type() ==
5759 KMP_HW_CORE_TYPE_CORE
) {
5765 if (__kmp_topology
->at(mid
).attrs
.get_core_type() == KMP_HW_CORE_TYPE_ATOM
) {
5772 // Dynamic affinity settings - Affinity balanced
5773 void __kmp_balanced_affinity(kmp_info_t
*th
, int nthreads
) {
5774 KMP_DEBUG_ASSERT(th
);
5775 bool fine_gran
= true;
5776 int tid
= th
->th
.th_info
.ds
.ds_tid
;
5777 const char *env_var
= "KMP_AFFINITY";
5779 // Do not perform balanced affinity for the hidden helper threads
5780 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th
)))
5783 switch (__kmp_affinity
.gran
) {
5787 if (__kmp_nThreadsPerCore
> 1) {
5792 if (nCoresPerPkg
> 1) {
5800 if (__kmp_topology
->is_uniform()) {
5803 // Number of hyper threads per core in HT machine
5804 int __kmp_nth_per_core
= __kmp_avail_proc
/ __kmp_ncores
;
5806 int ncores
= __kmp_ncores
;
5807 if ((nPackages
> 1) && (__kmp_nth_per_core
<= 1)) {
5808 __kmp_nth_per_core
= __kmp_avail_proc
/ nPackages
;
5811 // How many threads will be bound to each core
5812 int chunk
= nthreads
/ ncores
;
5813 // How many cores will have an additional thread bound to it - "big cores"
5814 int big_cores
= nthreads
% ncores
;
5815 // Number of threads on the big cores
5816 int big_nth
= (chunk
+ 1) * big_cores
;
5817 if (tid
< big_nth
) {
5818 coreID
= tid
/ (chunk
+ 1);
5819 threadID
= (tid
% (chunk
+ 1)) % __kmp_nth_per_core
;
5820 } else { // tid >= big_nth
5821 coreID
= (tid
- big_cores
) / chunk
;
5822 threadID
= ((tid
- big_cores
) % chunk
) % __kmp_nth_per_core
;
5824 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5825 "Illegal set affinity operation when not capable");
5827 kmp_affin_mask_t
*mask
= th
->th
.th_affin_mask
;
5832 __kmp_topology
->at(coreID
* __kmp_nth_per_core
+ threadID
).os_id
;
5833 KMP_CPU_SET(osID
, mask
);
5835 for (int i
= 0; i
< __kmp_nth_per_core
; i
++) {
5837 osID
= __kmp_topology
->at(coreID
* __kmp_nth_per_core
+ i
).os_id
;
5838 KMP_CPU_SET(osID
, mask
);
5841 if (__kmp_affinity
.flags
.verbose
) {
5842 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
5843 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
, mask
);
5844 KMP_INFORM(BoundToOSProcSet
, env_var
, (kmp_int32
)getpid(), __kmp_gettid(),
5847 __kmp_affinity_get_thread_topology_info(th
);
5848 __kmp_set_system_affinity(mask
, TRUE
);
5849 } else { // Non-uniform topology
5851 kmp_affin_mask_t
*mask
= th
->th
.th_affin_mask
;
5855 __kmp_affinity_find_core_level(__kmp_avail_proc
, __kmp_aff_depth
- 1);
5856 int ncores
= __kmp_affinity_compute_ncores(__kmp_avail_proc
,
5857 __kmp_aff_depth
- 1, core_level
);
5858 int nth_per_core
= __kmp_affinity_max_proc_per_core(
5859 __kmp_avail_proc
, __kmp_aff_depth
- 1, core_level
);
5861 // For performance gain consider the special case nthreads ==
5863 if (nthreads
== __kmp_avail_proc
) {
5865 int osID
= __kmp_topology
->at(tid
).os_id
;
5866 KMP_CPU_SET(osID
, mask
);
5869 __kmp_affinity_find_core(tid
, __kmp_aff_depth
- 1, core_level
);
5870 for (int i
= 0; i
< __kmp_avail_proc
; i
++) {
5871 int osID
= __kmp_topology
->at(i
).os_id
;
5872 if (__kmp_affinity_find_core(i
, __kmp_aff_depth
- 1, core_level
) ==
5874 KMP_CPU_SET(osID
, mask
);
5878 } else if (nthreads
<= ncores
) {
5881 for (int i
= 0; i
< ncores
; i
++) {
5882 // Check if this core from procarr[] is in the mask
5884 for (int j
= 0; j
< nth_per_core
; j
++) {
5885 if (procarr
[i
* nth_per_core
+ j
] != -1) {
5892 for (int j
= 0; j
< nth_per_core
; j
++) {
5893 int osID
= procarr
[i
* nth_per_core
+ j
];
5895 KMP_CPU_SET(osID
, mask
);
5896 // For fine granularity it is enough to set the first available
5897 // osID for this core
5909 } else { // nthreads > ncores
5910 // Array to save the number of processors at each core
5911 int *nproc_at_core
= (int *)KMP_ALLOCA(sizeof(int) * ncores
);
5912 // Array to save the number of cores with "x" available processors;
5913 int *ncores_with_x_procs
=
5914 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core
+ 1));
5915 // Array to save the number of cores with # procs from x to nth_per_core
5916 int *ncores_with_x_to_max_procs
=
5917 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core
+ 1));
5919 for (int i
= 0; i
<= nth_per_core
; i
++) {
5920 ncores_with_x_procs
[i
] = 0;
5921 ncores_with_x_to_max_procs
[i
] = 0;
5924 for (int i
= 0; i
< ncores
; i
++) {
5926 for (int j
= 0; j
< nth_per_core
; j
++) {
5927 if (procarr
[i
* nth_per_core
+ j
] != -1) {
5931 nproc_at_core
[i
] = cnt
;
5932 ncores_with_x_procs
[cnt
]++;
5935 for (int i
= 0; i
<= nth_per_core
; i
++) {
5936 for (int j
= i
; j
<= nth_per_core
; j
++) {
5937 ncores_with_x_to_max_procs
[i
] += ncores_with_x_procs
[j
];
5941 // Max number of processors
5942 int nproc
= nth_per_core
* ncores
;
5943 // An array to keep number of threads per each context
5944 int *newarr
= (int *)__kmp_allocate(sizeof(int) * nproc
);
5945 for (int i
= 0; i
< nproc
; i
++) {
5952 for (int j
= 1; j
<= nth_per_core
; j
++) {
5953 int cnt
= ncores_with_x_to_max_procs
[j
];
5954 for (int i
= 0; i
< ncores
; i
++) {
5955 // Skip the core with 0 processors
5956 if (nproc_at_core
[i
] == 0) {
5959 for (int k
= 0; k
< nth_per_core
; k
++) {
5960 if (procarr
[i
* nth_per_core
+ k
] != -1) {
5961 if (newarr
[i
* nth_per_core
+ k
] == 0) {
5962 newarr
[i
* nth_per_core
+ k
] = 1;
5968 newarr
[i
* nth_per_core
+ k
]++;
5976 if (cnt
== 0 || nth
== 0) {
5987 for (int i
= 0; i
< nproc
; i
++) {
5991 int osID
= procarr
[i
];
5992 KMP_CPU_SET(osID
, mask
);
5994 int coreID
= i
/ nth_per_core
;
5995 for (int ii
= 0; ii
< nth_per_core
; ii
++) {
5996 int osID
= procarr
[coreID
* nth_per_core
+ ii
];
5998 KMP_CPU_SET(osID
, mask
);
6008 if (__kmp_affinity
.flags
.verbose
) {
6009 char buf
[KMP_AFFIN_MASK_PRINT_LEN
];
6010 __kmp_affinity_print_mask(buf
, KMP_AFFIN_MASK_PRINT_LEN
, mask
);
6011 KMP_INFORM(BoundToOSProcSet
, env_var
, (kmp_int32
)getpid(), __kmp_gettid(),
6014 __kmp_affinity_get_thread_topology_info(th
);
6015 __kmp_set_system_affinity(mask
, TRUE
);
6019 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
6021 // We don't need this entry for Windows because
6022 // there is GetProcessAffinityMask() api
6024 // The intended usage is indicated by these steps:
6025 // 1) The user gets the current affinity mask
6026 // 2) Then sets the affinity by calling this function
6027 // 3) Error check the return value
6028 // 4) Use non-OpenMP parallelization
6029 // 5) Reset the affinity to what was stored in step 1)
6034 kmp_set_thread_affinity_mask_initial()
6035 // the function returns 0 on success,
6036 // -1 if we cannot bind thread
6037 // >0 (errno) if an error happened during binding
6039 int gtid
= __kmp_get_gtid();
6041 // Do not touch non-omp threads
6042 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
6043 "non-omp thread, returning\n"));
6046 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle
) {
6047 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
6048 "affinity not initialized, returning\n"));
6051 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
6052 "set full mask for thread %d\n",
6054 KMP_DEBUG_ASSERT(__kmp_affin_fullMask
!= NULL
);
6056 return bindprocessor(BINDTHREAD
, thread_self(), PROCESSOR_CLASS_ANY
);
6058 return __kmp_set_system_affinity(__kmp_affin_fullMask
, FALSE
);
6063 #endif // KMP_AFFINITY_SUPPORTED