drm/nouveau: fix kernel-doc comments
[drm/drm-misc.git] / drivers / base / arch_topology.c
blob3ebe77566788b40961246dfecbc440d5925bcf08
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Arch specific cpu topology information
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
7 */
9 #include <linux/acpi.h>
10 #include <linux/cacheinfo.h>
11 #include <linux/cleanup.h>
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/device.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17 #include <linux/sched/topology.h>
18 #include <linux/cpuset.h>
19 #include <linux/cpumask.h>
20 #include <linux/init.h>
21 #include <linux/rcupdate.h>
22 #include <linux/sched.h>
23 #include <linux/units.h>
25 #define CREATE_TRACE_POINTS
26 #include <trace/events/hw_pressure.h>
28 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
29 static struct cpumask scale_freq_counters_mask;
30 static bool scale_freq_invariant;
31 DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
32 EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
34 static bool supports_scale_freq_counters(const struct cpumask *cpus)
36 return cpumask_subset(cpus, &scale_freq_counters_mask);
39 bool topology_scale_freq_invariant(void)
41 return cpufreq_supports_freq_invariance() ||
42 supports_scale_freq_counters(cpu_online_mask);
45 static void update_scale_freq_invariant(bool status)
47 if (scale_freq_invariant == status)
48 return;
51 * Task scheduler behavior depends on frequency invariance support,
52 * either cpufreq or counter driven. If the support status changes as
53 * a result of counter initialisation and use, retrigger the build of
54 * scheduling domains to ensure the information is propagated properly.
56 if (topology_scale_freq_invariant() == status) {
57 scale_freq_invariant = status;
58 rebuild_sched_domains_energy();
62 void topology_set_scale_freq_source(struct scale_freq_data *data,
63 const struct cpumask *cpus)
65 struct scale_freq_data *sfd;
66 int cpu;
69 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
70 * supported by cpufreq.
72 if (cpumask_empty(&scale_freq_counters_mask))
73 scale_freq_invariant = topology_scale_freq_invariant();
75 rcu_read_lock();
77 for_each_cpu(cpu, cpus) {
78 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
80 /* Use ARCH provided counters whenever possible */
81 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
82 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
83 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
87 rcu_read_unlock();
89 update_scale_freq_invariant(true);
91 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
93 void topology_clear_scale_freq_source(enum scale_freq_source source,
94 const struct cpumask *cpus)
96 struct scale_freq_data *sfd;
97 int cpu;
99 rcu_read_lock();
101 for_each_cpu(cpu, cpus) {
102 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
104 if (sfd && sfd->source == source) {
105 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
106 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
110 rcu_read_unlock();
113 * Make sure all references to previous sft_data are dropped to avoid
114 * use-after-free races.
116 synchronize_rcu();
118 update_scale_freq_invariant(false);
120 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
122 void topology_scale_freq_tick(void)
124 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
126 if (sfd)
127 sfd->set_freq_scale();
130 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
131 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
133 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
134 unsigned long max_freq)
136 unsigned long scale;
137 int i;
139 if (WARN_ON_ONCE(!cur_freq || !max_freq))
140 return;
143 * If the use of counters for FIE is enabled, just return as we don't
144 * want to update the scale factor with information from CPUFREQ.
145 * Instead the scale factor will be updated from arch_scale_freq_tick.
147 if (supports_scale_freq_counters(cpus))
148 return;
150 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
152 for_each_cpu(i, cpus)
153 per_cpu(arch_freq_scale, i) = scale;
156 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
157 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
159 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
161 per_cpu(cpu_scale, cpu) = capacity;
164 DEFINE_PER_CPU(unsigned long, hw_pressure);
167 * topology_update_hw_pressure() - Update HW pressure for CPUs
168 * @cpus : The related CPUs for which capacity has been reduced
169 * @capped_freq : The maximum allowed frequency that CPUs can run at
171 * Update the value of HW pressure for all @cpus in the mask. The
172 * cpumask should include all (online+offline) affected CPUs, to avoid
173 * operating on stale data when hot-plug is used for some CPUs. The
174 * @capped_freq reflects the currently allowed max CPUs frequency due to
175 * HW capping. It might be also a boost frequency value, which is bigger
176 * than the internal 'capacity_freq_ref' max frequency. In such case the
177 * pressure value should simply be removed, since this is an indication that
178 * there is no HW throttling. The @capped_freq must be provided in kHz.
180 void topology_update_hw_pressure(const struct cpumask *cpus,
181 unsigned long capped_freq)
183 unsigned long max_capacity, capacity, pressure;
184 u32 max_freq;
185 int cpu;
187 cpu = cpumask_first(cpus);
188 max_capacity = arch_scale_cpu_capacity(cpu);
189 max_freq = arch_scale_freq_ref(cpu);
192 * Handle properly the boost frequencies, which should simply clean
193 * the HW pressure value.
195 if (max_freq <= capped_freq)
196 capacity = max_capacity;
197 else
198 capacity = mult_frac(max_capacity, capped_freq, max_freq);
200 pressure = max_capacity - capacity;
202 trace_hw_pressure_update(cpu, pressure);
204 for_each_cpu(cpu, cpus)
205 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
207 EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
209 static ssize_t cpu_capacity_show(struct device *dev,
210 struct device_attribute *attr,
211 char *buf)
213 struct cpu *cpu = container_of(dev, struct cpu, dev);
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
218 static void update_topology_flags_workfn(struct work_struct *work);
219 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
221 static DEVICE_ATTR_RO(cpu_capacity);
223 static int cpu_capacity_sysctl_add(unsigned int cpu)
225 struct device *cpu_dev = get_cpu_device(cpu);
227 if (!cpu_dev)
228 return -ENOENT;
230 device_create_file(cpu_dev, &dev_attr_cpu_capacity);
232 return 0;
235 static int cpu_capacity_sysctl_remove(unsigned int cpu)
237 struct device *cpu_dev = get_cpu_device(cpu);
239 if (!cpu_dev)
240 return -ENOENT;
242 device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
244 return 0;
247 static int register_cpu_capacity_sysctl(void)
249 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
250 cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
252 return 0;
254 subsys_initcall(register_cpu_capacity_sysctl);
256 static int update_topology;
258 int topology_update_cpu_topology(void)
260 return update_topology;
264 * Updating the sched_domains can't be done directly from cpufreq callbacks
265 * due to locking, so queue the work for later.
267 static void update_topology_flags_workfn(struct work_struct *work)
269 update_topology = 1;
270 rebuild_sched_domains();
271 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
272 update_topology = 0;
275 static u32 *raw_capacity;
277 static int free_raw_capacity(void)
279 kfree(raw_capacity);
280 raw_capacity = NULL;
282 return 0;
285 void topology_normalize_cpu_scale(void)
287 u64 capacity;
288 u64 capacity_scale;
289 int cpu;
291 if (!raw_capacity)
292 return;
294 capacity_scale = 1;
295 for_each_possible_cpu(cpu) {
296 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
297 capacity_scale = max(capacity, capacity_scale);
300 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
301 for_each_possible_cpu(cpu) {
302 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
303 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
304 capacity_scale);
305 topology_set_cpu_scale(cpu, capacity);
306 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
307 cpu, topology_get_cpu_scale(cpu));
311 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
313 struct clk *cpu_clk;
314 static bool cap_parsing_failed;
315 int ret;
316 u32 cpu_capacity;
318 if (cap_parsing_failed)
319 return false;
321 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
322 &cpu_capacity);
323 if (!ret) {
324 if (!raw_capacity) {
325 raw_capacity = kcalloc(num_possible_cpus(),
326 sizeof(*raw_capacity),
327 GFP_KERNEL);
328 if (!raw_capacity) {
329 cap_parsing_failed = true;
330 return false;
333 raw_capacity[cpu] = cpu_capacity;
334 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
335 cpu_node, raw_capacity[cpu]);
338 * Update capacity_freq_ref for calculating early boot CPU capacities.
339 * For non-clk CPU DVFS mechanism, there's no way to get the
340 * frequency value now, assuming they are running at the same
341 * frequency (by keeping the initial capacity_freq_ref value).
343 cpu_clk = of_clk_get(cpu_node, 0);
344 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
345 per_cpu(capacity_freq_ref, cpu) =
346 clk_get_rate(cpu_clk) / HZ_PER_KHZ;
347 clk_put(cpu_clk);
349 } else {
350 if (raw_capacity) {
351 pr_err("cpu_capacity: missing %pOF raw capacity\n",
352 cpu_node);
353 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
355 cap_parsing_failed = true;
356 free_raw_capacity();
359 return !ret;
362 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
366 #ifdef CONFIG_ACPI_CPPC_LIB
367 #include <acpi/cppc_acpi.h>
369 static inline void topology_init_cpu_capacity_cppc(void)
371 u64 capacity, capacity_scale = 0;
372 struct cppc_perf_caps perf_caps;
373 int cpu;
375 if (likely(!acpi_cpc_valid()))
376 return;
378 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
379 GFP_KERNEL);
380 if (!raw_capacity)
381 return;
383 for_each_possible_cpu(cpu) {
384 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
385 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
386 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
387 raw_capacity[cpu] = perf_caps.highest_perf;
388 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
390 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
392 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
393 cpu, raw_capacity[cpu]);
394 continue;
397 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
398 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
399 goto exit;
402 for_each_possible_cpu(cpu) {
403 freq_inv_set_max_ratio(cpu,
404 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
406 capacity = raw_capacity[cpu];
407 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
408 capacity_scale);
409 topology_set_cpu_scale(cpu, capacity);
410 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
411 cpu, topology_get_cpu_scale(cpu));
414 schedule_work(&update_topology_flags_work);
415 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
417 exit:
418 free_raw_capacity();
420 void acpi_processor_init_invariance_cppc(void)
422 topology_init_cpu_capacity_cppc();
424 #endif
426 #ifdef CONFIG_CPU_FREQ
427 static cpumask_var_t cpus_to_visit;
428 static void parsing_done_workfn(struct work_struct *work);
429 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
431 static int
432 init_cpu_capacity_callback(struct notifier_block *nb,
433 unsigned long val,
434 void *data)
436 struct cpufreq_policy *policy = data;
437 int cpu;
439 if (val != CPUFREQ_CREATE_POLICY)
440 return 0;
442 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
443 cpumask_pr_args(policy->related_cpus),
444 cpumask_pr_args(cpus_to_visit));
446 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
448 for_each_cpu(cpu, policy->related_cpus) {
449 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
450 freq_inv_set_max_ratio(cpu,
451 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
454 if (cpumask_empty(cpus_to_visit)) {
455 if (raw_capacity) {
456 topology_normalize_cpu_scale();
457 schedule_work(&update_topology_flags_work);
458 free_raw_capacity();
460 pr_debug("cpu_capacity: parsing done\n");
461 schedule_work(&parsing_done_work);
464 return 0;
467 static struct notifier_block init_cpu_capacity_notifier = {
468 .notifier_call = init_cpu_capacity_callback,
471 static int __init register_cpufreq_notifier(void)
473 int ret;
476 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
477 * information is not needed for cpu capacity initialization.
479 if (!acpi_disabled)
480 return -EINVAL;
482 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
483 return -ENOMEM;
485 cpumask_copy(cpus_to_visit, cpu_possible_mask);
487 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
488 CPUFREQ_POLICY_NOTIFIER);
490 if (ret)
491 free_cpumask_var(cpus_to_visit);
493 return ret;
495 core_initcall(register_cpufreq_notifier);
497 static void parsing_done_workfn(struct work_struct *work)
499 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
500 CPUFREQ_POLICY_NOTIFIER);
501 free_cpumask_var(cpus_to_visit);
504 #else
505 core_initcall(free_raw_capacity);
506 #endif
508 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
510 * This function returns the logic cpu number of the node.
511 * There are basically three kinds of return values:
512 * (1) logic cpu number which is > 0.
513 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
514 * there is no possible logical CPU in the kernel to match. This happens
515 * when CONFIG_NR_CPUS is configure to be smaller than the number of
516 * CPU nodes in DT. We need to just ignore this case.
517 * (3) -1 if the node does not exist in the device tree
519 static int __init get_cpu_for_node(struct device_node *node)
521 int cpu;
522 struct device_node *cpu_node __free(device_node) =
523 of_parse_phandle(node, "cpu", 0);
525 if (!cpu_node)
526 return -1;
528 cpu = of_cpu_node_to_id(cpu_node);
529 if (cpu >= 0)
530 topology_parse_cpu_capacity(cpu_node, cpu);
531 else
532 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
533 cpu_node, cpumask_pr_args(cpu_possible_mask));
535 return cpu;
538 static int __init parse_core(struct device_node *core, int package_id,
539 int cluster_id, int core_id)
541 char name[20];
542 bool leaf = true;
543 int i = 0;
544 int cpu;
546 do {
547 snprintf(name, sizeof(name), "thread%d", i);
548 struct device_node *t __free(device_node) =
549 of_get_child_by_name(core, name);
551 if (!t)
552 break;
554 leaf = false;
555 cpu = get_cpu_for_node(t);
556 if (cpu >= 0) {
557 cpu_topology[cpu].package_id = package_id;
558 cpu_topology[cpu].cluster_id = cluster_id;
559 cpu_topology[cpu].core_id = core_id;
560 cpu_topology[cpu].thread_id = i;
561 } else if (cpu != -ENODEV) {
562 pr_err("%pOF: Can't get CPU for thread\n", t);
563 return -EINVAL;
565 i++;
566 } while (1);
568 cpu = get_cpu_for_node(core);
569 if (cpu >= 0) {
570 if (!leaf) {
571 pr_err("%pOF: Core has both threads and CPU\n",
572 core);
573 return -EINVAL;
576 cpu_topology[cpu].package_id = package_id;
577 cpu_topology[cpu].cluster_id = cluster_id;
578 cpu_topology[cpu].core_id = core_id;
579 } else if (leaf && cpu != -ENODEV) {
580 pr_err("%pOF: Can't get CPU for leaf core\n", core);
581 return -EINVAL;
584 return 0;
587 static int __init parse_cluster(struct device_node *cluster, int package_id,
588 int cluster_id, int depth)
590 char name[20];
591 bool leaf = true;
592 bool has_cores = false;
593 int core_id = 0;
594 int i, ret;
597 * First check for child clusters; we currently ignore any
598 * information about the nesting of clusters and present the
599 * scheduler with a flat list of them.
601 i = 0;
602 do {
603 snprintf(name, sizeof(name), "cluster%d", i);
604 struct device_node *c __free(device_node) =
605 of_get_child_by_name(cluster, name);
607 if (!c)
608 break;
610 leaf = false;
611 ret = parse_cluster(c, package_id, i, depth + 1);
612 if (depth > 0)
613 pr_warn("Topology for clusters of clusters not yet supported\n");
614 if (ret != 0)
615 return ret;
616 i++;
617 } while (1);
619 /* Now check for cores */
620 i = 0;
621 do {
622 snprintf(name, sizeof(name), "core%d", i);
623 struct device_node *c __free(device_node) =
624 of_get_child_by_name(cluster, name);
626 if (!c)
627 break;
629 has_cores = true;
631 if (depth == 0) {
632 pr_err("%pOF: cpu-map children should be clusters\n", c);
633 return -EINVAL;
636 if (leaf) {
637 ret = parse_core(c, package_id, cluster_id, core_id++);
638 if (ret != 0)
639 return ret;
640 } else {
641 pr_err("%pOF: Non-leaf cluster with core %s\n",
642 cluster, name);
643 return -EINVAL;
646 i++;
647 } while (1);
649 if (leaf && !has_cores)
650 pr_warn("%pOF: empty cluster\n", cluster);
652 return 0;
655 static int __init parse_socket(struct device_node *socket)
657 char name[20];
658 bool has_socket = false;
659 int package_id = 0, ret;
661 do {
662 snprintf(name, sizeof(name), "socket%d", package_id);
663 struct device_node *c __free(device_node) =
664 of_get_child_by_name(socket, name);
666 if (!c)
667 break;
669 has_socket = true;
670 ret = parse_cluster(c, package_id, -1, 0);
671 if (ret != 0)
672 return ret;
674 package_id++;
675 } while (1);
677 if (!has_socket)
678 ret = parse_cluster(socket, 0, -1, 0);
680 return ret;
683 static int __init parse_dt_topology(void)
685 int ret = 0;
686 int cpu;
687 struct device_node *cn __free(device_node) =
688 of_find_node_by_path("/cpus");
690 if (!cn) {
691 pr_err("No CPU information found in DT\n");
692 return 0;
696 * When topology is provided cpu-map is essentially a root
697 * cluster with restricted subnodes.
699 struct device_node *map __free(device_node) =
700 of_get_child_by_name(cn, "cpu-map");
702 if (!map)
703 return ret;
705 ret = parse_socket(map);
706 if (ret != 0)
707 return ret;
709 topology_normalize_cpu_scale();
712 * Check that all cores are in the topology; the SMP code will
713 * only mark cores described in the DT as possible.
715 for_each_possible_cpu(cpu)
716 if (cpu_topology[cpu].package_id < 0) {
717 return -EINVAL;
720 return ret;
722 #endif
725 * cpu topology table
727 struct cpu_topology cpu_topology[NR_CPUS];
728 EXPORT_SYMBOL_GPL(cpu_topology);
730 const struct cpumask *cpu_coregroup_mask(int cpu)
732 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
734 /* Find the smaller of NUMA, core or LLC siblings */
735 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
736 /* not numa in package, lets use the package siblings */
737 core_mask = &cpu_topology[cpu].core_sibling;
740 if (last_level_cache_is_valid(cpu)) {
741 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
742 core_mask = &cpu_topology[cpu].llc_sibling;
746 * For systems with no shared cpu-side LLC but with clusters defined,
747 * extend core_mask to cluster_siblings. The sched domain builder will
748 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
750 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
751 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
752 core_mask = &cpu_topology[cpu].cluster_sibling;
754 return core_mask;
757 const struct cpumask *cpu_clustergroup_mask(int cpu)
760 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
761 * cpu_coregroup_mask().
763 if (cpumask_subset(cpu_coregroup_mask(cpu),
764 &cpu_topology[cpu].cluster_sibling))
765 return topology_sibling_cpumask(cpu);
767 return &cpu_topology[cpu].cluster_sibling;
770 void update_siblings_masks(unsigned int cpuid)
772 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
773 int cpu, ret;
775 ret = detect_cache_attributes(cpuid);
776 if (ret && ret != -ENOENT)
777 pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
779 /* update core and thread sibling masks */
780 for_each_online_cpu(cpu) {
781 cpu_topo = &cpu_topology[cpu];
783 if (last_level_cache_is_shared(cpu, cpuid)) {
784 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
785 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
788 if (cpuid_topo->package_id != cpu_topo->package_id)
789 continue;
791 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
792 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
794 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
795 continue;
797 if (cpuid_topo->cluster_id >= 0) {
798 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
799 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
802 if (cpuid_topo->core_id != cpu_topo->core_id)
803 continue;
805 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
806 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
810 static void clear_cpu_topology(int cpu)
812 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
814 cpumask_clear(&cpu_topo->llc_sibling);
815 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
817 cpumask_clear(&cpu_topo->cluster_sibling);
818 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
820 cpumask_clear(&cpu_topo->core_sibling);
821 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
822 cpumask_clear(&cpu_topo->thread_sibling);
823 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
826 void __init reset_cpu_topology(void)
828 unsigned int cpu;
830 for_each_possible_cpu(cpu) {
831 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
833 cpu_topo->thread_id = -1;
834 cpu_topo->core_id = -1;
835 cpu_topo->cluster_id = -1;
836 cpu_topo->package_id = -1;
838 clear_cpu_topology(cpu);
842 void remove_cpu_topology(unsigned int cpu)
844 int sibling;
846 for_each_cpu(sibling, topology_core_cpumask(cpu))
847 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
848 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
849 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
850 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
851 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
852 for_each_cpu(sibling, topology_llc_cpumask(cpu))
853 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
855 clear_cpu_topology(cpu);
858 __weak int __init parse_acpi_topology(void)
860 return 0;
863 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
864 void __init init_cpu_topology(void)
866 int cpu, ret;
868 reset_cpu_topology();
869 ret = parse_acpi_topology();
870 if (!ret)
871 ret = of_have_populated_dt() && parse_dt_topology();
873 if (ret) {
875 * Discard anything that was parsed if we hit an error so we
876 * don't use partial information. But do not return yet to give
877 * arch-specific early cache level detection a chance to run.
879 reset_cpu_topology();
882 for_each_possible_cpu(cpu) {
883 ret = fetch_cache_info(cpu);
884 if (!ret)
885 continue;
886 else if (ret != -ENOENT)
887 pr_err("Early cacheinfo failed, ret = %d\n", ret);
888 return;
892 void store_cpu_topology(unsigned int cpuid)
894 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
896 if (cpuid_topo->package_id != -1)
897 goto topology_populated;
899 cpuid_topo->thread_id = -1;
900 cpuid_topo->core_id = cpuid;
901 cpuid_topo->package_id = cpu_to_node(cpuid);
903 pr_debug("CPU%u: package %d core %d thread %d\n",
904 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
905 cpuid_topo->thread_id);
907 topology_populated:
908 update_siblings_masks(cpuid);
910 #endif