1 #include <linux/config.h>
2 #include <linux/sysdev.h>
5 #include <linux/percpu.h>
6 #include <linux/init.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
9 #include <linux/nodemask.h>
10 #include <linux/cpumask.h>
11 #include <linux/notifier.h>
13 #include <asm/current.h>
14 #include <asm/processor.h>
15 #include <asm/cputable.h>
16 #include <asm/hvcall.h>
18 #include <asm/systemcfg.h>
20 #include <asm/lppaca.h>
21 #include <asm/machdep.h>
23 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
27 #ifdef CONFIG_PPC_MULTIPLATFORM
28 /* default to snooze disabled */
29 DEFINE_PER_CPU(unsigned long, smt_snooze_delay
);
31 static ssize_t
store_smt_snooze_delay(struct sys_device
*dev
, const char *buf
,
34 struct cpu
*cpu
= container_of(dev
, struct cpu
, sysdev
);
38 ret
= sscanf(buf
, "%lu", &snooze
);
42 per_cpu(smt_snooze_delay
, cpu
->sysdev
.id
) = snooze
;
47 static ssize_t
show_smt_snooze_delay(struct sys_device
*dev
, char *buf
)
49 struct cpu
*cpu
= container_of(dev
, struct cpu
, sysdev
);
51 return sprintf(buf
, "%lu\n", per_cpu(smt_snooze_delay
, cpu
->sysdev
.id
));
54 static SYSDEV_ATTR(smt_snooze_delay
, 0644, show_smt_snooze_delay
,
55 store_smt_snooze_delay
);
57 /* Only parse OF options if the matching cmdline option was not specified */
58 static int smt_snooze_cmdline
;
60 static int __init
smt_setup(void)
62 struct device_node
*options
;
66 if (!cpu_has_feature(CPU_FTR_SMT
))
69 options
= find_path_device("/options");
73 val
= (unsigned int *)get_property(options
, "ibm,smt-snooze-delay",
75 if (!smt_snooze_cmdline
&& val
) {
77 per_cpu(smt_snooze_delay
, cpu
) = *val
;
82 __initcall(smt_setup
);
84 static int __init
setup_smt_snooze_delay(char *str
)
89 if (!cpu_has_feature(CPU_FTR_SMT
))
92 smt_snooze_cmdline
= 1;
94 if (get_option(&str
, &snooze
)) {
96 per_cpu(smt_snooze_delay
, cpu
) = snooze
;
101 __setup("smt-snooze-delay=", setup_smt_snooze_delay
);
104 * Enabling PMCs will slow partition context switch times so we only do
105 * it the first time we write to the PMCs.
108 static DEFINE_PER_CPU(char, pmcs_enabled
);
110 void ppc64_enable_pmcs(void)
113 #ifdef CONFIG_PPC_PSERIES
114 unsigned long set
, reset
;
116 #endif /* CONFIG_PPC_PSERIES */
118 /* Only need to enable them once */
119 if (__get_cpu_var(pmcs_enabled
))
122 __get_cpu_var(pmcs_enabled
) = 1;
124 switch (systemcfg
->platform
) {
125 case PLATFORM_PSERIES
:
126 case PLATFORM_POWERMAC
:
128 hid0
|= 1UL << (63 - 20);
130 /* POWER4 requires the following sequence */
140 "isync" : "=&r" (hid0
) : "i" (HID0
), "0" (hid0
):
144 #ifdef CONFIG_PPC_PSERIES
145 case PLATFORM_PSERIES_LPAR
:
148 ret
= plpar_hcall_norets(H_PERFMON
, set
, reset
);
150 printk(KERN_ERR
"H_PERFMON call on cpu %u "
152 smp_processor_id(), ret
);
154 #endif /* CONFIG_PPC_PSERIES */
160 #ifdef CONFIG_PPC_PSERIES
161 /* instruct hypervisor to maintain PMCs */
162 if (cur_cpu_spec
->firmware_features
& FW_FEATURE_SPLPAR
)
163 get_paca()->lppaca
.pmcregs_in_use
= 1;
166 * On SMT machines we have to set the run latch in the ctrl register
167 * in order to make PMC6 spin.
169 if (cpu_has_feature(CPU_FTR_SMT
))
171 #endif /* CONFIG_PPC_PSERIES */
177 void ppc64_enable_pmcs(void)
179 /* XXX Implement for iseries */
181 #endif /* CONFIG_PPC_MULTIPLATFORM */
183 EXPORT_SYMBOL(ppc64_enable_pmcs
);
185 /* XXX convert to rusty's on_one_cpu */
186 static unsigned long run_on_cpu(unsigned long cpu
,
187 unsigned long (*func
)(unsigned long),
190 cpumask_t old_affinity
= current
->cpus_allowed
;
193 /* should return -EINVAL to userspace */
194 if (set_cpus_allowed(current
, cpumask_of_cpu(cpu
)))
199 set_cpus_allowed(current
, old_affinity
);
204 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
205 static unsigned long read_##NAME(unsigned long junk) \
207 return mfspr(ADDRESS); \
209 static unsigned long write_##NAME(unsigned long val) \
211 ppc64_enable_pmcs(); \
212 mtspr(ADDRESS, val); \
215 static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
217 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
218 unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
219 return sprintf(buf, "%lx\n", val); \
221 static ssize_t __attribute_used__ \
222 store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
224 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
226 int ret = sscanf(buf, "%lx", &val); \
229 run_on_cpu(cpu->sysdev.id, write_##NAME, val); \
233 SYSFS_PMCSETUP(mmcr0
, SPRN_MMCR0
);
234 SYSFS_PMCSETUP(mmcr1
, SPRN_MMCR1
);
235 SYSFS_PMCSETUP(mmcra
, SPRN_MMCRA
);
236 SYSFS_PMCSETUP(pmc1
, SPRN_PMC1
);
237 SYSFS_PMCSETUP(pmc2
, SPRN_PMC2
);
238 SYSFS_PMCSETUP(pmc3
, SPRN_PMC3
);
239 SYSFS_PMCSETUP(pmc4
, SPRN_PMC4
);
240 SYSFS_PMCSETUP(pmc5
, SPRN_PMC5
);
241 SYSFS_PMCSETUP(pmc6
, SPRN_PMC6
);
242 SYSFS_PMCSETUP(pmc7
, SPRN_PMC7
);
243 SYSFS_PMCSETUP(pmc8
, SPRN_PMC8
);
244 SYSFS_PMCSETUP(purr
, SPRN_PURR
);
246 static SYSDEV_ATTR(mmcr0
, 0600, show_mmcr0
, store_mmcr0
);
247 static SYSDEV_ATTR(mmcr1
, 0600, show_mmcr1
, store_mmcr1
);
248 static SYSDEV_ATTR(mmcra
, 0600, show_mmcra
, store_mmcra
);
249 static SYSDEV_ATTR(pmc1
, 0600, show_pmc1
, store_pmc1
);
250 static SYSDEV_ATTR(pmc2
, 0600, show_pmc2
, store_pmc2
);
251 static SYSDEV_ATTR(pmc3
, 0600, show_pmc3
, store_pmc3
);
252 static SYSDEV_ATTR(pmc4
, 0600, show_pmc4
, store_pmc4
);
253 static SYSDEV_ATTR(pmc5
, 0600, show_pmc5
, store_pmc5
);
254 static SYSDEV_ATTR(pmc6
, 0600, show_pmc6
, store_pmc6
);
255 static SYSDEV_ATTR(pmc7
, 0600, show_pmc7
, store_pmc7
);
256 static SYSDEV_ATTR(pmc8
, 0600, show_pmc8
, store_pmc8
);
257 static SYSDEV_ATTR(purr
, 0600, show_purr
, NULL
);
259 static void register_cpu_online(unsigned int cpu
)
261 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
262 struct sys_device
*s
= &c
->sysdev
;
264 #ifndef CONFIG_PPC_ISERIES
265 if (cpu_has_feature(CPU_FTR_SMT
))
266 sysdev_create_file(s
, &attr_smt_snooze_delay
);
271 sysdev_create_file(s
, &attr_mmcr0
);
272 sysdev_create_file(s
, &attr_mmcr1
);
274 if (cpu_has_feature(CPU_FTR_MMCRA
))
275 sysdev_create_file(s
, &attr_mmcra
);
277 sysdev_create_file(s
, &attr_pmc1
);
278 sysdev_create_file(s
, &attr_pmc2
);
279 sysdev_create_file(s
, &attr_pmc3
);
280 sysdev_create_file(s
, &attr_pmc4
);
281 sysdev_create_file(s
, &attr_pmc5
);
282 sysdev_create_file(s
, &attr_pmc6
);
284 if (cpu_has_feature(CPU_FTR_PMC8
)) {
285 sysdev_create_file(s
, &attr_pmc7
);
286 sysdev_create_file(s
, &attr_pmc8
);
289 if (cpu_has_feature(CPU_FTR_SMT
))
290 sysdev_create_file(s
, &attr_purr
);
293 #ifdef CONFIG_HOTPLUG_CPU
294 static void unregister_cpu_online(unsigned int cpu
)
296 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
297 struct sys_device
*s
= &c
->sysdev
;
299 BUG_ON(c
->no_control
);
301 #ifndef CONFIG_PPC_ISERIES
302 if (cpu_has_feature(CPU_FTR_SMT
))
303 sysdev_remove_file(s
, &attr_smt_snooze_delay
);
308 sysdev_remove_file(s
, &attr_mmcr0
);
309 sysdev_remove_file(s
, &attr_mmcr1
);
311 if (cpu_has_feature(CPU_FTR_MMCRA
))
312 sysdev_remove_file(s
, &attr_mmcra
);
314 sysdev_remove_file(s
, &attr_pmc1
);
315 sysdev_remove_file(s
, &attr_pmc2
);
316 sysdev_remove_file(s
, &attr_pmc3
);
317 sysdev_remove_file(s
, &attr_pmc4
);
318 sysdev_remove_file(s
, &attr_pmc5
);
319 sysdev_remove_file(s
, &attr_pmc6
);
321 if (cpu_has_feature(CPU_FTR_PMC8
)) {
322 sysdev_remove_file(s
, &attr_pmc7
);
323 sysdev_remove_file(s
, &attr_pmc8
);
326 if (cpu_has_feature(CPU_FTR_SMT
))
327 sysdev_remove_file(s
, &attr_purr
);
329 #endif /* CONFIG_HOTPLUG_CPU */
331 static int __devinit
sysfs_cpu_notify(struct notifier_block
*self
,
332 unsigned long action
, void *hcpu
)
334 unsigned int cpu
= (unsigned int)(long)hcpu
;
338 register_cpu_online(cpu
);
340 #ifdef CONFIG_HOTPLUG_CPU
342 unregister_cpu_online(cpu
);
349 static struct notifier_block __devinitdata sysfs_cpu_nb
= {
350 .notifier_call
= sysfs_cpu_notify
,
356 static struct node node_devices
[MAX_NUMNODES
];
358 static void register_nodes(void)
362 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
363 if (node_online(i
)) {
364 int p_node
= parent_node(i
);
365 struct node
*parent
= NULL
;
368 parent
= &node_devices
[p_node
];
370 register_node(&node_devices
[i
], i
, parent
);
375 static void register_nodes(void)
381 /* Only valid if CPU is present. */
382 static ssize_t
show_physical_id(struct sys_device
*dev
, char *buf
)
384 struct cpu
*cpu
= container_of(dev
, struct cpu
, sysdev
);
386 return sprintf(buf
, "%d\n", get_hard_smp_processor_id(cpu
->sysdev
.id
));
388 static SYSDEV_ATTR(physical_id
, 0444, show_physical_id
, NULL
);
390 static int __init
topology_init(void)
393 struct node
*parent
= NULL
;
397 register_cpu_notifier(&sysfs_cpu_nb
);
400 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
403 /* The node to which a cpu belongs can't be known
404 * until the cpu is made present.
407 if (cpu_present(cpu
))
408 parent
= &node_devices
[cpu_to_node(cpu
)];
411 * For now, we just see if the system supports making
412 * the RTAS calls for CPU hotplug. But, there may be a
413 * more comprehensive way to do this for an individual
414 * CPU. For instance, the boot cpu might never be valid
420 if (cpu_online(cpu
) || (c
->no_control
== 0)) {
421 register_cpu(c
, cpu
, parent
);
423 sysdev_create_file(&c
->sysdev
, &attr_physical_id
);
427 register_cpu_online(cpu
);
432 __initcall(topology_init
);