1 #include <linux/device.h>
4 #include <linux/percpu.h>
5 #include <linux/init.h>
6 #include <linux/sched.h>
7 #include <linux/export.h>
8 #include <linux/nodemask.h>
9 #include <linux/cpumask.h>
10 #include <linux/notifier.h>
12 #include <asm/current.h>
13 #include <asm/processor.h>
14 #include <asm/cputable.h>
15 #include <asm/hvcall.h>
17 #include <asm/machdep.h>
20 #include <asm/firmware.h>
22 #include "cacheinfo.h"
26 #include <asm/lppaca.h>
29 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
32 * SMT snooze delay stuff, 64-bit only for now
37 /* Time in microseconds we delay before sleeping in the idle loop */
38 DEFINE_PER_CPU(long, smt_snooze_delay
) = { 100 };
40 static ssize_t
store_smt_snooze_delay(struct device
*dev
,
41 struct device_attribute
*attr
,
45 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
49 ret
= sscanf(buf
, "%ld", &snooze
);
53 per_cpu(smt_snooze_delay
, cpu
->dev
.id
) = snooze
;
54 update_smt_snooze_delay(cpu
->dev
.id
, snooze
);
59 static ssize_t
show_smt_snooze_delay(struct device
*dev
,
60 struct device_attribute
*attr
,
63 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
65 return sprintf(buf
, "%ld\n", per_cpu(smt_snooze_delay
, cpu
->dev
.id
));
68 static DEVICE_ATTR(smt_snooze_delay
, 0644, show_smt_snooze_delay
,
69 store_smt_snooze_delay
);
71 static int __init
setup_smt_snooze_delay(char *str
)
76 if (!cpu_has_feature(CPU_FTR_SMT
))
79 snooze
= simple_strtol(str
, NULL
, 10);
80 for_each_possible_cpu(cpu
)
81 per_cpu(smt_snooze_delay
, cpu
) = snooze
;
85 __setup("smt-snooze-delay=", setup_smt_snooze_delay
);
87 #endif /* CONFIG_PPC64 */
90 * Enabling PMCs will slow partition context switch times so we only do
91 * it the first time we write to the PMCs.
94 static DEFINE_PER_CPU(char, pmcs_enabled
);
96 void ppc_enable_pmcs(void)
100 /* Only need to enable them once */
101 if (__get_cpu_var(pmcs_enabled
))
104 __get_cpu_var(pmcs_enabled
) = 1;
106 if (ppc_md
.enable_pmcs
)
107 ppc_md
.enable_pmcs();
109 EXPORT_SYMBOL(ppc_enable_pmcs
);
111 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
112 static void read_##NAME(void *val) \
114 *(unsigned long *)val = mfspr(ADDRESS); \
116 static void write_##NAME(void *val) \
119 mtspr(ADDRESS, *(unsigned long *)val); \
121 static ssize_t show_##NAME(struct device *dev, \
122 struct device_attribute *attr, \
125 struct cpu *cpu = container_of(dev, struct cpu, dev); \
127 smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
128 return sprintf(buf, "%lx\n", val); \
130 static ssize_t __used \
131 store_##NAME(struct device *dev, struct device_attribute *attr, \
132 const char *buf, size_t count) \
134 struct cpu *cpu = container_of(dev, struct cpu, dev); \
136 int ret = sscanf(buf, "%lx", &val); \
139 smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
144 /* Let's define all possible registers, we'll only hook up the ones
145 * that are implemented on the current processor
148 #if defined(CONFIG_PPC64)
149 #define HAS_PPC_PMC_CLASSIC 1
150 #define HAS_PPC_PMC_IBM 1
151 #define HAS_PPC_PMC_PA6T 1
152 #elif defined(CONFIG_6xx)
153 #define HAS_PPC_PMC_CLASSIC 1
154 #define HAS_PPC_PMC_IBM 1
155 #define HAS_PPC_PMC_G4 1
159 #ifdef HAS_PPC_PMC_CLASSIC
160 SYSFS_PMCSETUP(mmcr0
, SPRN_MMCR0
);
161 SYSFS_PMCSETUP(mmcr1
, SPRN_MMCR1
);
162 SYSFS_PMCSETUP(pmc1
, SPRN_PMC1
);
163 SYSFS_PMCSETUP(pmc2
, SPRN_PMC2
);
164 SYSFS_PMCSETUP(pmc3
, SPRN_PMC3
);
165 SYSFS_PMCSETUP(pmc4
, SPRN_PMC4
);
166 SYSFS_PMCSETUP(pmc5
, SPRN_PMC5
);
167 SYSFS_PMCSETUP(pmc6
, SPRN_PMC6
);
169 #ifdef HAS_PPC_PMC_G4
170 SYSFS_PMCSETUP(mmcr2
, SPRN_MMCR2
);
174 SYSFS_PMCSETUP(pmc7
, SPRN_PMC7
);
175 SYSFS_PMCSETUP(pmc8
, SPRN_PMC8
);
177 SYSFS_PMCSETUP(mmcra
, SPRN_MMCRA
);
178 SYSFS_PMCSETUP(purr
, SPRN_PURR
);
179 SYSFS_PMCSETUP(spurr
, SPRN_SPURR
);
180 SYSFS_PMCSETUP(dscr
, SPRN_DSCR
);
181 SYSFS_PMCSETUP(pir
, SPRN_PIR
);
184 Lets only enable read for phyp resources and
185 enable write when needed with a separate function.
186 Lets be conservative and default to pseries.
188 static DEVICE_ATTR(mmcra
, 0600, show_mmcra
, store_mmcra
);
189 static DEVICE_ATTR(spurr
, 0400, show_spurr
, NULL
);
190 static DEVICE_ATTR(dscr
, 0600, show_dscr
, store_dscr
);
191 static DEVICE_ATTR(purr
, 0400, show_purr
, store_purr
);
192 static DEVICE_ATTR(pir
, 0400, show_pir
, NULL
);
194 unsigned long dscr_default
= 0;
195 EXPORT_SYMBOL(dscr_default
);
197 static void add_write_permission_dev_attr(struct device_attribute
*attr
)
199 attr
->attr
.mode
|= 0200;
202 static ssize_t
show_dscr_default(struct device
*dev
,
203 struct device_attribute
*attr
, char *buf
)
205 return sprintf(buf
, "%lx\n", dscr_default
);
208 static void update_dscr(void *dummy
)
210 if (!current
->thread
.dscr_inherit
) {
211 current
->thread
.dscr
= dscr_default
;
212 mtspr(SPRN_DSCR
, dscr_default
);
216 static ssize_t __used
store_dscr_default(struct device
*dev
,
217 struct device_attribute
*attr
, const char *buf
,
223 ret
= sscanf(buf
, "%lx", &val
);
228 on_each_cpu(update_dscr
, NULL
, 1);
233 static DEVICE_ATTR(dscr_default
, 0600,
234 show_dscr_default
, store_dscr_default
);
236 static void sysfs_create_dscr_default(void)
239 if (cpu_has_feature(CPU_FTR_DSCR
))
240 err
= device_create_file(cpu_subsys
.dev_root
, &dev_attr_dscr_default
);
242 #endif /* CONFIG_PPC64 */
244 #ifdef HAS_PPC_PMC_PA6T
245 SYSFS_PMCSETUP(pa6t_pmc0
, SPRN_PA6T_PMC0
);
246 SYSFS_PMCSETUP(pa6t_pmc1
, SPRN_PA6T_PMC1
);
247 SYSFS_PMCSETUP(pa6t_pmc2
, SPRN_PA6T_PMC2
);
248 SYSFS_PMCSETUP(pa6t_pmc3
, SPRN_PA6T_PMC3
);
249 SYSFS_PMCSETUP(pa6t_pmc4
, SPRN_PA6T_PMC4
);
250 SYSFS_PMCSETUP(pa6t_pmc5
, SPRN_PA6T_PMC5
);
251 #ifdef CONFIG_DEBUG_KERNEL
252 SYSFS_PMCSETUP(hid0
, SPRN_HID0
);
253 SYSFS_PMCSETUP(hid1
, SPRN_HID1
);
254 SYSFS_PMCSETUP(hid4
, SPRN_HID4
);
255 SYSFS_PMCSETUP(hid5
, SPRN_HID5
);
256 SYSFS_PMCSETUP(ima0
, SPRN_PA6T_IMA0
);
257 SYSFS_PMCSETUP(ima1
, SPRN_PA6T_IMA1
);
258 SYSFS_PMCSETUP(ima2
, SPRN_PA6T_IMA2
);
259 SYSFS_PMCSETUP(ima3
, SPRN_PA6T_IMA3
);
260 SYSFS_PMCSETUP(ima4
, SPRN_PA6T_IMA4
);
261 SYSFS_PMCSETUP(ima5
, SPRN_PA6T_IMA5
);
262 SYSFS_PMCSETUP(ima6
, SPRN_PA6T_IMA6
);
263 SYSFS_PMCSETUP(ima7
, SPRN_PA6T_IMA7
);
264 SYSFS_PMCSETUP(ima8
, SPRN_PA6T_IMA8
);
265 SYSFS_PMCSETUP(ima9
, SPRN_PA6T_IMA9
);
266 SYSFS_PMCSETUP(imaat
, SPRN_PA6T_IMAAT
);
267 SYSFS_PMCSETUP(btcr
, SPRN_PA6T_BTCR
);
268 SYSFS_PMCSETUP(pccr
, SPRN_PA6T_PCCR
);
269 SYSFS_PMCSETUP(rpccr
, SPRN_PA6T_RPCCR
);
270 SYSFS_PMCSETUP(der
, SPRN_PA6T_DER
);
271 SYSFS_PMCSETUP(mer
, SPRN_PA6T_MER
);
272 SYSFS_PMCSETUP(ber
, SPRN_PA6T_BER
);
273 SYSFS_PMCSETUP(ier
, SPRN_PA6T_IER
);
274 SYSFS_PMCSETUP(sier
, SPRN_PA6T_SIER
);
275 SYSFS_PMCSETUP(siar
, SPRN_PA6T_SIAR
);
276 SYSFS_PMCSETUP(tsr0
, SPRN_PA6T_TSR0
);
277 SYSFS_PMCSETUP(tsr1
, SPRN_PA6T_TSR1
);
278 SYSFS_PMCSETUP(tsr2
, SPRN_PA6T_TSR2
);
279 SYSFS_PMCSETUP(tsr3
, SPRN_PA6T_TSR3
);
280 #endif /* CONFIG_DEBUG_KERNEL */
281 #endif /* HAS_PPC_PMC_PA6T */
283 #ifdef HAS_PPC_PMC_IBM
284 static struct device_attribute ibm_common_attrs
[] = {
285 __ATTR(mmcr0
, 0600, show_mmcr0
, store_mmcr0
),
286 __ATTR(mmcr1
, 0600, show_mmcr1
, store_mmcr1
),
288 #endif /* HAS_PPC_PMC_G4 */
290 #ifdef HAS_PPC_PMC_G4
291 static struct device_attribute g4_common_attrs
[] = {
292 __ATTR(mmcr0
, 0600, show_mmcr0
, store_mmcr0
),
293 __ATTR(mmcr1
, 0600, show_mmcr1
, store_mmcr1
),
294 __ATTR(mmcr2
, 0600, show_mmcr2
, store_mmcr2
),
296 #endif /* HAS_PPC_PMC_G4 */
298 static struct device_attribute classic_pmc_attrs
[] = {
299 __ATTR(pmc1
, 0600, show_pmc1
, store_pmc1
),
300 __ATTR(pmc2
, 0600, show_pmc2
, store_pmc2
),
301 __ATTR(pmc3
, 0600, show_pmc3
, store_pmc3
),
302 __ATTR(pmc4
, 0600, show_pmc4
, store_pmc4
),
303 __ATTR(pmc5
, 0600, show_pmc5
, store_pmc5
),
304 __ATTR(pmc6
, 0600, show_pmc6
, store_pmc6
),
306 __ATTR(pmc7
, 0600, show_pmc7
, store_pmc7
),
307 __ATTR(pmc8
, 0600, show_pmc8
, store_pmc8
),
311 #ifdef HAS_PPC_PMC_PA6T
312 static struct device_attribute pa6t_attrs
[] = {
313 __ATTR(mmcr0
, 0600, show_mmcr0
, store_mmcr0
),
314 __ATTR(mmcr1
, 0600, show_mmcr1
, store_mmcr1
),
315 __ATTR(pmc0
, 0600, show_pa6t_pmc0
, store_pa6t_pmc0
),
316 __ATTR(pmc1
, 0600, show_pa6t_pmc1
, store_pa6t_pmc1
),
317 __ATTR(pmc2
, 0600, show_pa6t_pmc2
, store_pa6t_pmc2
),
318 __ATTR(pmc3
, 0600, show_pa6t_pmc3
, store_pa6t_pmc3
),
319 __ATTR(pmc4
, 0600, show_pa6t_pmc4
, store_pa6t_pmc4
),
320 __ATTR(pmc5
, 0600, show_pa6t_pmc5
, store_pa6t_pmc5
),
321 #ifdef CONFIG_DEBUG_KERNEL
322 __ATTR(hid0
, 0600, show_hid0
, store_hid0
),
323 __ATTR(hid1
, 0600, show_hid1
, store_hid1
),
324 __ATTR(hid4
, 0600, show_hid4
, store_hid4
),
325 __ATTR(hid5
, 0600, show_hid5
, store_hid5
),
326 __ATTR(ima0
, 0600, show_ima0
, store_ima0
),
327 __ATTR(ima1
, 0600, show_ima1
, store_ima1
),
328 __ATTR(ima2
, 0600, show_ima2
, store_ima2
),
329 __ATTR(ima3
, 0600, show_ima3
, store_ima3
),
330 __ATTR(ima4
, 0600, show_ima4
, store_ima4
),
331 __ATTR(ima5
, 0600, show_ima5
, store_ima5
),
332 __ATTR(ima6
, 0600, show_ima6
, store_ima6
),
333 __ATTR(ima7
, 0600, show_ima7
, store_ima7
),
334 __ATTR(ima8
, 0600, show_ima8
, store_ima8
),
335 __ATTR(ima9
, 0600, show_ima9
, store_ima9
),
336 __ATTR(imaat
, 0600, show_imaat
, store_imaat
),
337 __ATTR(btcr
, 0600, show_btcr
, store_btcr
),
338 __ATTR(pccr
, 0600, show_pccr
, store_pccr
),
339 __ATTR(rpccr
, 0600, show_rpccr
, store_rpccr
),
340 __ATTR(der
, 0600, show_der
, store_der
),
341 __ATTR(mer
, 0600, show_mer
, store_mer
),
342 __ATTR(ber
, 0600, show_ber
, store_ber
),
343 __ATTR(ier
, 0600, show_ier
, store_ier
),
344 __ATTR(sier
, 0600, show_sier
, store_sier
),
345 __ATTR(siar
, 0600, show_siar
, store_siar
),
346 __ATTR(tsr0
, 0600, show_tsr0
, store_tsr0
),
347 __ATTR(tsr1
, 0600, show_tsr1
, store_tsr1
),
348 __ATTR(tsr2
, 0600, show_tsr2
, store_tsr2
),
349 __ATTR(tsr3
, 0600, show_tsr3
, store_tsr3
),
350 #endif /* CONFIG_DEBUG_KERNEL */
352 #endif /* HAS_PPC_PMC_PA6T */
353 #endif /* HAS_PPC_PMC_CLASSIC */
355 static void register_cpu_online(unsigned int cpu
)
357 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
358 struct device
*s
= &c
->dev
;
359 struct device_attribute
*attrs
, *pmc_attrs
;
363 if (cpu_has_feature(CPU_FTR_SMT
))
364 device_create_file(s
, &dev_attr_smt_snooze_delay
);
368 switch (cur_cpu_spec
->pmc_type
) {
369 #ifdef HAS_PPC_PMC_IBM
371 attrs
= ibm_common_attrs
;
372 nattrs
= sizeof(ibm_common_attrs
) / sizeof(struct device_attribute
);
373 pmc_attrs
= classic_pmc_attrs
;
375 #endif /* HAS_PPC_PMC_IBM */
376 #ifdef HAS_PPC_PMC_G4
378 attrs
= g4_common_attrs
;
379 nattrs
= sizeof(g4_common_attrs
) / sizeof(struct device_attribute
);
380 pmc_attrs
= classic_pmc_attrs
;
382 #endif /* HAS_PPC_PMC_G4 */
383 #ifdef HAS_PPC_PMC_PA6T
385 /* PA Semi starts counting at PMC0 */
387 nattrs
= sizeof(pa6t_attrs
) / sizeof(struct device_attribute
);
390 #endif /* HAS_PPC_PMC_PA6T */
397 for (i
= 0; i
< nattrs
; i
++)
398 device_create_file(s
, &attrs
[i
]);
401 for (i
= 0; i
< cur_cpu_spec
->num_pmcs
; i
++)
402 device_create_file(s
, &pmc_attrs
[i
]);
405 if (cpu_has_feature(CPU_FTR_MMCRA
))
406 device_create_file(s
, &dev_attr_mmcra
);
408 if (cpu_has_feature(CPU_FTR_PURR
)) {
409 if (!firmware_has_feature(FW_FEATURE_LPAR
))
410 add_write_permission_dev_attr(&dev_attr_purr
);
411 device_create_file(s
, &dev_attr_purr
);
414 if (cpu_has_feature(CPU_FTR_SPURR
))
415 device_create_file(s
, &dev_attr_spurr
);
417 if (cpu_has_feature(CPU_FTR_DSCR
))
418 device_create_file(s
, &dev_attr_dscr
);
420 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2
))
421 device_create_file(s
, &dev_attr_pir
);
422 #endif /* CONFIG_PPC64 */
424 cacheinfo_cpu_online(cpu
);
427 #ifdef CONFIG_HOTPLUG_CPU
428 static void unregister_cpu_online(unsigned int cpu
)
430 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
431 struct device
*s
= &c
->dev
;
432 struct device_attribute
*attrs
, *pmc_attrs
;
435 BUG_ON(!c
->hotpluggable
);
438 if (cpu_has_feature(CPU_FTR_SMT
))
439 device_remove_file(s
, &dev_attr_smt_snooze_delay
);
443 switch (cur_cpu_spec
->pmc_type
) {
444 #ifdef HAS_PPC_PMC_IBM
446 attrs
= ibm_common_attrs
;
447 nattrs
= sizeof(ibm_common_attrs
) / sizeof(struct device_attribute
);
448 pmc_attrs
= classic_pmc_attrs
;
450 #endif /* HAS_PPC_PMC_IBM */
451 #ifdef HAS_PPC_PMC_G4
453 attrs
= g4_common_attrs
;
454 nattrs
= sizeof(g4_common_attrs
) / sizeof(struct device_attribute
);
455 pmc_attrs
= classic_pmc_attrs
;
457 #endif /* HAS_PPC_PMC_G4 */
458 #ifdef HAS_PPC_PMC_PA6T
460 /* PA Semi starts counting at PMC0 */
462 nattrs
= sizeof(pa6t_attrs
) / sizeof(struct device_attribute
);
465 #endif /* HAS_PPC_PMC_PA6T */
472 for (i
= 0; i
< nattrs
; i
++)
473 device_remove_file(s
, &attrs
[i
]);
476 for (i
= 0; i
< cur_cpu_spec
->num_pmcs
; i
++)
477 device_remove_file(s
, &pmc_attrs
[i
]);
480 if (cpu_has_feature(CPU_FTR_MMCRA
))
481 device_remove_file(s
, &dev_attr_mmcra
);
483 if (cpu_has_feature(CPU_FTR_PURR
))
484 device_remove_file(s
, &dev_attr_purr
);
486 if (cpu_has_feature(CPU_FTR_SPURR
))
487 device_remove_file(s
, &dev_attr_spurr
);
489 if (cpu_has_feature(CPU_FTR_DSCR
))
490 device_remove_file(s
, &dev_attr_dscr
);
492 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2
))
493 device_remove_file(s
, &dev_attr_pir
);
494 #endif /* CONFIG_PPC64 */
496 cacheinfo_cpu_offline(cpu
);
499 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
500 ssize_t
arch_cpu_probe(const char *buf
, size_t count
)
502 if (ppc_md
.cpu_probe
)
503 return ppc_md
.cpu_probe(buf
, count
);
508 ssize_t
arch_cpu_release(const char *buf
, size_t count
)
510 if (ppc_md
.cpu_release
)
511 return ppc_md
.cpu_release(buf
, count
);
515 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
517 #endif /* CONFIG_HOTPLUG_CPU */
519 static int sysfs_cpu_notify(struct notifier_block
*self
,
520 unsigned long action
, void *hcpu
)
522 unsigned int cpu
= (unsigned int)(long)hcpu
;
526 case CPU_ONLINE_FROZEN
:
527 register_cpu_online(cpu
);
529 #ifdef CONFIG_HOTPLUG_CPU
531 case CPU_DEAD_FROZEN
:
532 unregister_cpu_online(cpu
);
539 static struct notifier_block sysfs_cpu_nb
= {
540 .notifier_call
= sysfs_cpu_notify
,
543 static DEFINE_MUTEX(cpu_mutex
);
545 int cpu_add_dev_attr(struct device_attribute
*attr
)
549 mutex_lock(&cpu_mutex
);
551 for_each_possible_cpu(cpu
) {
552 device_create_file(get_cpu_device(cpu
), attr
);
555 mutex_unlock(&cpu_mutex
);
558 EXPORT_SYMBOL_GPL(cpu_add_dev_attr
);
560 int cpu_add_dev_attr_group(struct attribute_group
*attrs
)
566 mutex_lock(&cpu_mutex
);
568 for_each_possible_cpu(cpu
) {
569 dev
= get_cpu_device(cpu
);
570 ret
= sysfs_create_group(&dev
->kobj
, attrs
);
574 mutex_unlock(&cpu_mutex
);
577 EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group
);
580 void cpu_remove_dev_attr(struct device_attribute
*attr
)
584 mutex_lock(&cpu_mutex
);
586 for_each_possible_cpu(cpu
) {
587 device_remove_file(get_cpu_device(cpu
), attr
);
590 mutex_unlock(&cpu_mutex
);
592 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr
);
594 void cpu_remove_dev_attr_group(struct attribute_group
*attrs
)
599 mutex_lock(&cpu_mutex
);
601 for_each_possible_cpu(cpu
) {
602 dev
= get_cpu_device(cpu
);
603 sysfs_remove_group(&dev
->kobj
, attrs
);
606 mutex_unlock(&cpu_mutex
);
608 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group
);
614 static void register_nodes(void)
618 for (i
= 0; i
< MAX_NUMNODES
; i
++)
619 register_one_node(i
);
622 int sysfs_add_device_to_node(struct device
*dev
, int nid
)
624 struct node
*node
= node_devices
[nid
];
625 return sysfs_create_link(&node
->dev
.kobj
, &dev
->kobj
,
626 kobject_name(&dev
->kobj
));
628 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node
);
630 void sysfs_remove_device_from_node(struct device
*dev
, int nid
)
632 struct node
*node
= node_devices
[nid
];
633 sysfs_remove_link(&node
->dev
.kobj
, kobject_name(&dev
->kobj
));
635 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node
);
638 static void register_nodes(void)
645 /* Only valid if CPU is present. */
646 static ssize_t
show_physical_id(struct device
*dev
,
647 struct device_attribute
*attr
, char *buf
)
649 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
651 return sprintf(buf
, "%d\n", get_hard_smp_processor_id(cpu
->dev
.id
));
653 static DEVICE_ATTR(physical_id
, 0444, show_physical_id
, NULL
);
655 static int __init
topology_init(void)
660 register_cpu_notifier(&sysfs_cpu_nb
);
662 for_each_possible_cpu(cpu
) {
663 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
666 * For now, we just see if the system supports making
667 * the RTAS calls for CPU hotplug. But, there may be a
668 * more comprehensive way to do this for an individual
669 * CPU. For instance, the boot cpu might never be valid
675 if (cpu_online(cpu
) || c
->hotpluggable
) {
676 register_cpu(c
, cpu
);
678 device_create_file(&c
->dev
, &dev_attr_physical_id
);
682 register_cpu_online(cpu
);
685 sysfs_create_dscr_default();
686 #endif /* CONFIG_PPC64 */
690 subsys_initcall(topology_init
);