1 // SPDX-License-Identifier: GPL-2.0
3 * Arch specific cpu topology information
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
9 #include <linux/acpi.h>
10 #include <linux/cacheinfo.h>
11 #include <linux/cleanup.h>
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/sched/topology.h>
18 #include <linux/cpuset.h>
19 #include <linux/cpumask.h>
20 #include <linux/init.h>
21 #include <linux/rcupdate.h>
22 #include <linux/sched.h>
23 #include <linux/units.h>
25 #define CREATE_TRACE_POINTS
26 #include <trace/events/hw_pressure.h>
28 static DEFINE_PER_CPU(struct scale_freq_data __rcu
*, sft_data
);
29 static struct cpumask scale_freq_counters_mask
;
30 static bool scale_freq_invariant
;
31 DEFINE_PER_CPU(unsigned long, capacity_freq_ref
) = 1;
32 EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref
);
34 static bool supports_scale_freq_counters(const struct cpumask
*cpus
)
36 return cpumask_subset(cpus
, &scale_freq_counters_mask
);
39 bool topology_scale_freq_invariant(void)
41 return cpufreq_supports_freq_invariance() ||
42 supports_scale_freq_counters(cpu_online_mask
);
45 static void update_scale_freq_invariant(bool status
)
47 if (scale_freq_invariant
== status
)
51 * Task scheduler behavior depends on frequency invariance support,
52 * either cpufreq or counter driven. If the support status changes as
53 * a result of counter initialisation and use, retrigger the build of
54 * scheduling domains to ensure the information is propagated properly.
56 if (topology_scale_freq_invariant() == status
) {
57 scale_freq_invariant
= status
;
58 rebuild_sched_domains_energy();
62 void topology_set_scale_freq_source(struct scale_freq_data
*data
,
63 const struct cpumask
*cpus
)
65 struct scale_freq_data
*sfd
;
69 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
70 * supported by cpufreq.
72 if (cpumask_empty(&scale_freq_counters_mask
))
73 scale_freq_invariant
= topology_scale_freq_invariant();
77 for_each_cpu(cpu
, cpus
) {
78 sfd
= rcu_dereference(*per_cpu_ptr(&sft_data
, cpu
));
80 /* Use ARCH provided counters whenever possible */
81 if (!sfd
|| sfd
->source
!= SCALE_FREQ_SOURCE_ARCH
) {
82 rcu_assign_pointer(per_cpu(sft_data
, cpu
), data
);
83 cpumask_set_cpu(cpu
, &scale_freq_counters_mask
);
89 update_scale_freq_invariant(true);
91 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source
);
93 void topology_clear_scale_freq_source(enum scale_freq_source source
,
94 const struct cpumask
*cpus
)
96 struct scale_freq_data
*sfd
;
101 for_each_cpu(cpu
, cpus
) {
102 sfd
= rcu_dereference(*per_cpu_ptr(&sft_data
, cpu
));
104 if (sfd
&& sfd
->source
== source
) {
105 rcu_assign_pointer(per_cpu(sft_data
, cpu
), NULL
);
106 cpumask_clear_cpu(cpu
, &scale_freq_counters_mask
);
113 * Make sure all references to previous sft_data are dropped to avoid
114 * use-after-free races.
118 update_scale_freq_invariant(false);
120 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source
);
122 void topology_scale_freq_tick(void)
124 struct scale_freq_data
*sfd
= rcu_dereference_sched(*this_cpu_ptr(&sft_data
));
127 sfd
->set_freq_scale();
130 DEFINE_PER_CPU(unsigned long, arch_freq_scale
) = SCHED_CAPACITY_SCALE
;
131 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale
);
133 void topology_set_freq_scale(const struct cpumask
*cpus
, unsigned long cur_freq
,
134 unsigned long max_freq
)
139 if (WARN_ON_ONCE(!cur_freq
|| !max_freq
))
143 * If the use of counters for FIE is enabled, just return as we don't
144 * want to update the scale factor with information from CPUFREQ.
145 * Instead the scale factor will be updated from arch_scale_freq_tick.
147 if (supports_scale_freq_counters(cpus
))
150 scale
= (cur_freq
<< SCHED_CAPACITY_SHIFT
) / max_freq
;
152 for_each_cpu(i
, cpus
)
153 per_cpu(arch_freq_scale
, i
) = scale
;
156 DEFINE_PER_CPU(unsigned long, cpu_scale
) = SCHED_CAPACITY_SCALE
;
157 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale
);
159 void topology_set_cpu_scale(unsigned int cpu
, unsigned long capacity
)
161 per_cpu(cpu_scale
, cpu
) = capacity
;
164 DEFINE_PER_CPU(unsigned long, hw_pressure
);
167 * topology_update_hw_pressure() - Update HW pressure for CPUs
168 * @cpus : The related CPUs for which capacity has been reduced
169 * @capped_freq : The maximum allowed frequency that CPUs can run at
171 * Update the value of HW pressure for all @cpus in the mask. The
172 * cpumask should include all (online+offline) affected CPUs, to avoid
173 * operating on stale data when hot-plug is used for some CPUs. The
174 * @capped_freq reflects the currently allowed max CPUs frequency due to
175 * HW capping. It might be also a boost frequency value, which is bigger
176 * than the internal 'capacity_freq_ref' max frequency. In such case the
177 * pressure value should simply be removed, since this is an indication that
178 * there is no HW throttling. The @capped_freq must be provided in kHz.
180 void topology_update_hw_pressure(const struct cpumask
*cpus
,
181 unsigned long capped_freq
)
183 unsigned long max_capacity
, capacity
, pressure
;
187 cpu
= cpumask_first(cpus
);
188 max_capacity
= arch_scale_cpu_capacity(cpu
);
189 max_freq
= arch_scale_freq_ref(cpu
);
192 * Handle properly the boost frequencies, which should simply clean
193 * the HW pressure value.
195 if (max_freq
<= capped_freq
)
196 capacity
= max_capacity
;
198 capacity
= mult_frac(max_capacity
, capped_freq
, max_freq
);
200 pressure
= max_capacity
- capacity
;
202 trace_hw_pressure_update(cpu
, pressure
);
204 for_each_cpu(cpu
, cpus
)
205 WRITE_ONCE(per_cpu(hw_pressure
, cpu
), pressure
);
207 EXPORT_SYMBOL_GPL(topology_update_hw_pressure
);
209 static ssize_t
cpu_capacity_show(struct device
*dev
,
210 struct device_attribute
*attr
,
213 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
215 return sysfs_emit(buf
, "%lu\n", topology_get_cpu_scale(cpu
->dev
.id
));
218 static void update_topology_flags_workfn(struct work_struct
*work
);
219 static DECLARE_WORK(update_topology_flags_work
, update_topology_flags_workfn
);
221 static DEVICE_ATTR_RO(cpu_capacity
);
223 static int cpu_capacity_sysctl_add(unsigned int cpu
)
225 struct device
*cpu_dev
= get_cpu_device(cpu
);
230 device_create_file(cpu_dev
, &dev_attr_cpu_capacity
);
235 static int cpu_capacity_sysctl_remove(unsigned int cpu
)
237 struct device
*cpu_dev
= get_cpu_device(cpu
);
242 device_remove_file(cpu_dev
, &dev_attr_cpu_capacity
);
247 static int register_cpu_capacity_sysctl(void)
249 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "topology/cpu-capacity",
250 cpu_capacity_sysctl_add
, cpu_capacity_sysctl_remove
);
254 subsys_initcall(register_cpu_capacity_sysctl
);
256 static int update_topology
;
258 int topology_update_cpu_topology(void)
260 return update_topology
;
264 * Updating the sched_domains can't be done directly from cpufreq callbacks
265 * due to locking, so queue the work for later.
267 static void update_topology_flags_workfn(struct work_struct
*work
)
270 rebuild_sched_domains();
271 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
275 static u32
*raw_capacity
;
277 static int free_raw_capacity(void)
285 void topology_normalize_cpu_scale(void)
295 for_each_possible_cpu(cpu
) {
296 capacity
= raw_capacity
[cpu
] * per_cpu(capacity_freq_ref
, cpu
);
297 capacity_scale
= max(capacity
, capacity_scale
);
300 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale
);
301 for_each_possible_cpu(cpu
) {
302 capacity
= raw_capacity
[cpu
] * per_cpu(capacity_freq_ref
, cpu
);
303 capacity
= div64_u64(capacity
<< SCHED_CAPACITY_SHIFT
,
305 topology_set_cpu_scale(cpu
, capacity
);
306 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
307 cpu
, topology_get_cpu_scale(cpu
));
311 bool __init
topology_parse_cpu_capacity(struct device_node
*cpu_node
, int cpu
)
314 static bool cap_parsing_failed
;
318 if (cap_parsing_failed
)
321 ret
= of_property_read_u32(cpu_node
, "capacity-dmips-mhz",
325 raw_capacity
= kcalloc(num_possible_cpus(),
326 sizeof(*raw_capacity
),
329 cap_parsing_failed
= true;
333 raw_capacity
[cpu
] = cpu_capacity
;
334 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
335 cpu_node
, raw_capacity
[cpu
]);
338 * Update capacity_freq_ref for calculating early boot CPU capacities.
339 * For non-clk CPU DVFS mechanism, there's no way to get the
340 * frequency value now, assuming they are running at the same
341 * frequency (by keeping the initial capacity_freq_ref value).
343 cpu_clk
= of_clk_get(cpu_node
, 0);
344 if (!PTR_ERR_OR_ZERO(cpu_clk
)) {
345 per_cpu(capacity_freq_ref
, cpu
) =
346 clk_get_rate(cpu_clk
) / HZ_PER_KHZ
;
351 pr_err("cpu_capacity: missing %pOF raw capacity\n",
353 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
355 cap_parsing_failed
= true;
362 void __weak
freq_inv_set_max_ratio(int cpu
, u64 max_rate
)
366 #ifdef CONFIG_ACPI_CPPC_LIB
367 #include <acpi/cppc_acpi.h>
369 static inline void topology_init_cpu_capacity_cppc(void)
371 u64 capacity
, capacity_scale
= 0;
372 struct cppc_perf_caps perf_caps
;
375 if (likely(!acpi_cpc_valid()))
378 raw_capacity
= kcalloc(num_possible_cpus(), sizeof(*raw_capacity
),
383 for_each_possible_cpu(cpu
) {
384 if (!cppc_get_perf_caps(cpu
, &perf_caps
) &&
385 (perf_caps
.highest_perf
>= perf_caps
.nominal_perf
) &&
386 (perf_caps
.highest_perf
>= perf_caps
.lowest_perf
)) {
387 raw_capacity
[cpu
] = perf_caps
.highest_perf
;
388 capacity_scale
= max_t(u64
, capacity_scale
, raw_capacity
[cpu
]);
390 per_cpu(capacity_freq_ref
, cpu
) = cppc_perf_to_khz(&perf_caps
, raw_capacity
[cpu
]);
392 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
393 cpu
, raw_capacity
[cpu
]);
397 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu
);
398 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
402 for_each_possible_cpu(cpu
) {
403 freq_inv_set_max_ratio(cpu
,
404 per_cpu(capacity_freq_ref
, cpu
) * HZ_PER_KHZ
);
406 capacity
= raw_capacity
[cpu
];
407 capacity
= div64_u64(capacity
<< SCHED_CAPACITY_SHIFT
,
409 topology_set_cpu_scale(cpu
, capacity
);
410 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
411 cpu
, topology_get_cpu_scale(cpu
));
414 schedule_work(&update_topology_flags_work
);
415 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
420 void acpi_processor_init_invariance_cppc(void)
422 topology_init_cpu_capacity_cppc();
426 #ifdef CONFIG_CPU_FREQ
427 static cpumask_var_t cpus_to_visit
;
428 static void parsing_done_workfn(struct work_struct
*work
);
429 static DECLARE_WORK(parsing_done_work
, parsing_done_workfn
);
432 init_cpu_capacity_callback(struct notifier_block
*nb
,
436 struct cpufreq_policy
*policy
= data
;
439 if (val
!= CPUFREQ_CREATE_POLICY
)
442 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
443 cpumask_pr_args(policy
->related_cpus
),
444 cpumask_pr_args(cpus_to_visit
));
446 cpumask_andnot(cpus_to_visit
, cpus_to_visit
, policy
->related_cpus
);
448 for_each_cpu(cpu
, policy
->related_cpus
) {
449 per_cpu(capacity_freq_ref
, cpu
) = policy
->cpuinfo
.max_freq
;
450 freq_inv_set_max_ratio(cpu
,
451 per_cpu(capacity_freq_ref
, cpu
) * HZ_PER_KHZ
);
454 if (cpumask_empty(cpus_to_visit
)) {
456 topology_normalize_cpu_scale();
457 schedule_work(&update_topology_flags_work
);
460 pr_debug("cpu_capacity: parsing done\n");
461 schedule_work(&parsing_done_work
);
467 static struct notifier_block init_cpu_capacity_notifier
= {
468 .notifier_call
= init_cpu_capacity_callback
,
471 static int __init
register_cpufreq_notifier(void)
476 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
477 * information is not needed for cpu capacity initialization.
482 if (!alloc_cpumask_var(&cpus_to_visit
, GFP_KERNEL
))
485 cpumask_copy(cpus_to_visit
, cpu_possible_mask
);
487 ret
= cpufreq_register_notifier(&init_cpu_capacity_notifier
,
488 CPUFREQ_POLICY_NOTIFIER
);
491 free_cpumask_var(cpus_to_visit
);
495 core_initcall(register_cpufreq_notifier
);
497 static void parsing_done_workfn(struct work_struct
*work
)
499 cpufreq_unregister_notifier(&init_cpu_capacity_notifier
,
500 CPUFREQ_POLICY_NOTIFIER
);
501 free_cpumask_var(cpus_to_visit
);
505 core_initcall(free_raw_capacity
);
508 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
510 * This function returns the logic cpu number of the node.
511 * There are basically three kinds of return values:
512 * (1) logic cpu number which is > 0.
513 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
514 * there is no possible logical CPU in the kernel to match. This happens
515 * when CONFIG_NR_CPUS is configure to be smaller than the number of
516 * CPU nodes in DT. We need to just ignore this case.
517 * (3) -1 if the node does not exist in the device tree
519 static int __init
get_cpu_for_node(struct device_node
*node
)
522 struct device_node
*cpu_node
__free(device_node
) =
523 of_parse_phandle(node
, "cpu", 0);
528 cpu
= of_cpu_node_to_id(cpu_node
);
530 topology_parse_cpu_capacity(cpu_node
, cpu
);
532 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
533 cpu_node
, cpumask_pr_args(cpu_possible_mask
));
538 static int __init
parse_core(struct device_node
*core
, int package_id
,
539 int cluster_id
, int core_id
)
547 snprintf(name
, sizeof(name
), "thread%d", i
);
548 struct device_node
*t
__free(device_node
) =
549 of_get_child_by_name(core
, name
);
555 cpu
= get_cpu_for_node(t
);
557 cpu_topology
[cpu
].package_id
= package_id
;
558 cpu_topology
[cpu
].cluster_id
= cluster_id
;
559 cpu_topology
[cpu
].core_id
= core_id
;
560 cpu_topology
[cpu
].thread_id
= i
;
561 } else if (cpu
!= -ENODEV
) {
562 pr_err("%pOF: Can't get CPU for thread\n", t
);
568 cpu
= get_cpu_for_node(core
);
571 pr_err("%pOF: Core has both threads and CPU\n",
576 cpu_topology
[cpu
].package_id
= package_id
;
577 cpu_topology
[cpu
].cluster_id
= cluster_id
;
578 cpu_topology
[cpu
].core_id
= core_id
;
579 } else if (leaf
&& cpu
!= -ENODEV
) {
580 pr_err("%pOF: Can't get CPU for leaf core\n", core
);
587 static int __init
parse_cluster(struct device_node
*cluster
, int package_id
,
588 int cluster_id
, int depth
)
592 bool has_cores
= false;
597 * First check for child clusters; we currently ignore any
598 * information about the nesting of clusters and present the
599 * scheduler with a flat list of them.
603 snprintf(name
, sizeof(name
), "cluster%d", i
);
604 struct device_node
*c
__free(device_node
) =
605 of_get_child_by_name(cluster
, name
);
611 ret
= parse_cluster(c
, package_id
, i
, depth
+ 1);
613 pr_warn("Topology for clusters of clusters not yet supported\n");
619 /* Now check for cores */
622 snprintf(name
, sizeof(name
), "core%d", i
);
623 struct device_node
*c
__free(device_node
) =
624 of_get_child_by_name(cluster
, name
);
632 pr_err("%pOF: cpu-map children should be clusters\n", c
);
637 ret
= parse_core(c
, package_id
, cluster_id
, core_id
++);
641 pr_err("%pOF: Non-leaf cluster with core %s\n",
649 if (leaf
&& !has_cores
)
650 pr_warn("%pOF: empty cluster\n", cluster
);
655 static int __init
parse_socket(struct device_node
*socket
)
658 bool has_socket
= false;
659 int package_id
= 0, ret
;
662 snprintf(name
, sizeof(name
), "socket%d", package_id
);
663 struct device_node
*c
__free(device_node
) =
664 of_get_child_by_name(socket
, name
);
670 ret
= parse_cluster(c
, package_id
, -1, 0);
678 ret
= parse_cluster(socket
, 0, -1, 0);
683 static int __init
parse_dt_topology(void)
687 struct device_node
*cn
__free(device_node
) =
688 of_find_node_by_path("/cpus");
691 pr_err("No CPU information found in DT\n");
696 * When topology is provided cpu-map is essentially a root
697 * cluster with restricted subnodes.
699 struct device_node
*map
__free(device_node
) =
700 of_get_child_by_name(cn
, "cpu-map");
705 ret
= parse_socket(map
);
709 topology_normalize_cpu_scale();
712 * Check that all cores are in the topology; the SMP code will
713 * only mark cores described in the DT as possible.
715 for_each_possible_cpu(cpu
)
716 if (cpu_topology
[cpu
].package_id
< 0) {
727 struct cpu_topology cpu_topology
[NR_CPUS
];
728 EXPORT_SYMBOL_GPL(cpu_topology
);
730 const struct cpumask
*cpu_coregroup_mask(int cpu
)
732 const cpumask_t
*core_mask
= cpumask_of_node(cpu_to_node(cpu
));
734 /* Find the smaller of NUMA, core or LLC siblings */
735 if (cpumask_subset(&cpu_topology
[cpu
].core_sibling
, core_mask
)) {
736 /* not numa in package, lets use the package siblings */
737 core_mask
= &cpu_topology
[cpu
].core_sibling
;
740 if (last_level_cache_is_valid(cpu
)) {
741 if (cpumask_subset(&cpu_topology
[cpu
].llc_sibling
, core_mask
))
742 core_mask
= &cpu_topology
[cpu
].llc_sibling
;
746 * For systems with no shared cpu-side LLC but with clusters defined,
747 * extend core_mask to cluster_siblings. The sched domain builder will
748 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
750 if (IS_ENABLED(CONFIG_SCHED_CLUSTER
) &&
751 cpumask_subset(core_mask
, &cpu_topology
[cpu
].cluster_sibling
))
752 core_mask
= &cpu_topology
[cpu
].cluster_sibling
;
757 const struct cpumask
*cpu_clustergroup_mask(int cpu
)
760 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
761 * cpu_coregroup_mask().
763 if (cpumask_subset(cpu_coregroup_mask(cpu
),
764 &cpu_topology
[cpu
].cluster_sibling
))
765 return topology_sibling_cpumask(cpu
);
767 return &cpu_topology
[cpu
].cluster_sibling
;
770 void update_siblings_masks(unsigned int cpuid
)
772 struct cpu_topology
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
775 ret
= detect_cache_attributes(cpuid
);
776 if (ret
&& ret
!= -ENOENT
)
777 pr_info("Early cacheinfo allocation failed, ret = %d\n", ret
);
779 /* update core and thread sibling masks */
780 for_each_online_cpu(cpu
) {
781 cpu_topo
= &cpu_topology
[cpu
];
783 if (last_level_cache_is_shared(cpu
, cpuid
)) {
784 cpumask_set_cpu(cpu
, &cpuid_topo
->llc_sibling
);
785 cpumask_set_cpu(cpuid
, &cpu_topo
->llc_sibling
);
788 if (cpuid_topo
->package_id
!= cpu_topo
->package_id
)
791 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
792 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
794 if (cpuid_topo
->cluster_id
!= cpu_topo
->cluster_id
)
797 if (cpuid_topo
->cluster_id
>= 0) {
798 cpumask_set_cpu(cpu
, &cpuid_topo
->cluster_sibling
);
799 cpumask_set_cpu(cpuid
, &cpu_topo
->cluster_sibling
);
802 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
805 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
806 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
810 static void clear_cpu_topology(int cpu
)
812 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
814 cpumask_clear(&cpu_topo
->llc_sibling
);
815 cpumask_set_cpu(cpu
, &cpu_topo
->llc_sibling
);
817 cpumask_clear(&cpu_topo
->cluster_sibling
);
818 cpumask_set_cpu(cpu
, &cpu_topo
->cluster_sibling
);
820 cpumask_clear(&cpu_topo
->core_sibling
);
821 cpumask_set_cpu(cpu
, &cpu_topo
->core_sibling
);
822 cpumask_clear(&cpu_topo
->thread_sibling
);
823 cpumask_set_cpu(cpu
, &cpu_topo
->thread_sibling
);
826 void __init
reset_cpu_topology(void)
830 for_each_possible_cpu(cpu
) {
831 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
833 cpu_topo
->thread_id
= -1;
834 cpu_topo
->core_id
= -1;
835 cpu_topo
->cluster_id
= -1;
836 cpu_topo
->package_id
= -1;
838 clear_cpu_topology(cpu
);
842 void remove_cpu_topology(unsigned int cpu
)
846 for_each_cpu(sibling
, topology_core_cpumask(cpu
))
847 cpumask_clear_cpu(cpu
, topology_core_cpumask(sibling
));
848 for_each_cpu(sibling
, topology_sibling_cpumask(cpu
))
849 cpumask_clear_cpu(cpu
, topology_sibling_cpumask(sibling
));
850 for_each_cpu(sibling
, topology_cluster_cpumask(cpu
))
851 cpumask_clear_cpu(cpu
, topology_cluster_cpumask(sibling
));
852 for_each_cpu(sibling
, topology_llc_cpumask(cpu
))
853 cpumask_clear_cpu(cpu
, topology_llc_cpumask(sibling
));
855 clear_cpu_topology(cpu
);
858 __weak
int __init
parse_acpi_topology(void)
863 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
864 void __init
init_cpu_topology(void)
868 reset_cpu_topology();
869 ret
= parse_acpi_topology();
871 ret
= of_have_populated_dt() && parse_dt_topology();
875 * Discard anything that was parsed if we hit an error so we
876 * don't use partial information. But do not return yet to give
877 * arch-specific early cache level detection a chance to run.
879 reset_cpu_topology();
882 for_each_possible_cpu(cpu
) {
883 ret
= fetch_cache_info(cpu
);
886 else if (ret
!= -ENOENT
)
887 pr_err("Early cacheinfo failed, ret = %d\n", ret
);
892 void store_cpu_topology(unsigned int cpuid
)
894 struct cpu_topology
*cpuid_topo
= &cpu_topology
[cpuid
];
896 if (cpuid_topo
->package_id
!= -1)
897 goto topology_populated
;
899 cpuid_topo
->thread_id
= -1;
900 cpuid_topo
->core_id
= cpuid
;
901 cpuid_topo
->package_id
= cpu_to_node(cpuid
);
903 pr_debug("CPU%u: package %d core %d thread %d\n",
904 cpuid
, cpuid_topo
->package_id
, cpuid_topo
->core_id
,
905 cpuid_topo
->thread_id
);
908 update_siblings_masks(cpuid
);