2 * arch/parisc/kernel/topology.c
4 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
6 * based on arch/arm/kernel/topology.c
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/percpu.h>
14 #include <linux/sched.h>
15 #include <linux/sched/topology.h>
17 #include <asm/topology.h>
22 struct cputopo_parisc cpu_topology
[NR_CPUS
] __read_mostly
;
23 EXPORT_SYMBOL_GPL(cpu_topology
);
25 const struct cpumask
*cpu_coregroup_mask(int cpu
)
27 return &cpu_topology
[cpu
].core_sibling
;
30 static void update_siblings_masks(unsigned int cpuid
)
32 struct cputopo_parisc
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
35 /* update core and thread sibling masks */
36 for_each_possible_cpu(cpu
) {
37 cpu_topo
= &cpu_topology
[cpu
];
39 if (cpuid_topo
->socket_id
!= cpu_topo
->socket_id
)
42 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
44 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
46 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
49 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
51 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
56 static int dualcores_found __initdata
;
59 * store_cpu_topology is called at boot when only one cpu is running
60 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
61 * which prevents simultaneous write access to cpu_topology array
63 void __init
store_cpu_topology(unsigned int cpuid
)
65 struct cputopo_parisc
*cpuid_topo
= &cpu_topology
[cpuid
];
66 struct cpuinfo_parisc
*p
;
70 /* If the cpu topology has been already set, just return */
71 if (cpuid_topo
->core_id
!= -1)
74 /* create cpu topology mapping */
75 cpuid_topo
->thread_id
= -1;
76 cpuid_topo
->core_id
= 0;
78 p
= &per_cpu(cpu_data
, cpuid
);
79 for_each_online_cpu(cpu
) {
80 const struct cpuinfo_parisc
*cpuinfo
= &per_cpu(cpu_data
, cpu
);
82 if (cpu
== cpuid
) /* ignore current cpu */
85 if (cpuinfo
->cpu_loc
== p
->cpu_loc
) {
86 cpuid_topo
->core_id
= cpu_topology
[cpu
].core_id
;
88 cpuid_topo
->core_id
++;
89 cpuid_topo
->socket_id
= cpu_topology
[cpu
].socket_id
;
95 if (cpuid_topo
->socket_id
== -1)
96 max_socket
= max(max_socket
, cpu_topology
[cpu
].socket_id
);
99 if (cpuid_topo
->socket_id
== -1)
100 cpuid_topo
->socket_id
= max_socket
+ 1;
102 update_siblings_masks(cpuid
);
104 pr_info("CPU%u: thread %d, cpu %d, socket %d\n",
105 cpuid
, cpu_topology
[cpuid
].thread_id
,
106 cpu_topology
[cpuid
].core_id
,
107 cpu_topology
[cpuid
].socket_id
);
110 static struct sched_domain_topology_level parisc_mc_topology
[] = {
111 #ifdef CONFIG_SCHED_MC
112 { cpu_coregroup_mask
, cpu_core_flags
, SD_INIT_NAME(MC
) },
115 { cpu_cpu_mask
, SD_INIT_NAME(DIE
) },
120 * init_cpu_topology is called at boot when only one cpu is running
121 * which prevent simultaneous write access to cpu_topology array
123 void __init
init_cpu_topology(void)
127 /* init core mask and capacity */
128 for_each_possible_cpu(cpu
) {
129 struct cputopo_parisc
*cpu_topo
= &(cpu_topology
[cpu
]);
131 cpu_topo
->thread_id
= -1;
132 cpu_topo
->core_id
= -1;
133 cpu_topo
->socket_id
= -1;
134 cpumask_clear(&cpu_topo
->core_sibling
);
135 cpumask_clear(&cpu_topo
->thread_sibling
);
139 /* Set scheduler topology descriptor */
141 set_sched_topology(parisc_mc_topology
);