initial commit with v3.6.7
[linux-3.6.7-moxart.git] / arch / s390 / kernel / smp.c
blob720fda1620f29119f56f437fafef8f04657cc893
1 /*
2 * SMP related functions
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/mm.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
37 #include <asm/ipl.h>
38 #include <asm/setup.h>
39 #include <asm/irq.h>
40 #include <asm/tlbflush.h>
41 #include <asm/vtimer.h>
42 #include <asm/lowcore.h>
43 #include <asm/sclp.h>
44 #include <asm/vdso.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
47 #include <asm/sigp.h>
48 #include "entry.h"
50 enum {
51 ec_schedule = 0,
52 ec_call_function,
53 ec_call_function_single,
54 ec_stop_cpu,
57 enum {
58 CPU_STATE_STANDBY,
59 CPU_STATE_CONFIGURED,
62 struct pcpu {
63 struct cpu cpu;
64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65 unsigned long async_stack; /* async stack for the cpu */
66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */
69 u32 status; /* last status received via sigp */
70 u16 address; /* physical cpu address */
73 static u8 boot_cpu_type;
74 static u16 boot_cpu_address;
75 static struct pcpu pcpu_devices[NR_CPUS];
77 DEFINE_MUTEX(smp_cpu_state_mutex);
80 * Signal processor helper functions.
82 static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
84 register unsigned int reg1 asm ("1") = parm;
85 int cc;
87 asm volatile(
88 " sigp %1,%2,0(%3)\n"
89 " ipm %0\n"
90 " srl %0,28\n"
91 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
92 if (status && cc == 1)
93 *status = reg1;
94 return cc;
97 static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
99 int cc;
101 while (1) {
102 cc = __pcpu_sigp(addr, order, parm, status);
103 if (cc != SIGP_CC_BUSY)
104 return cc;
105 cpu_relax();
109 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
111 int cc, retry;
113 for (retry = 0; ; retry++) {
114 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
115 if (cc != SIGP_CC_BUSY)
116 break;
117 if (retry >= 3)
118 udelay(10);
120 return cc;
123 static inline int pcpu_stopped(struct pcpu *pcpu)
125 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
126 0, &pcpu->status) != SIGP_CC_STATUS_STORED)
127 return 0;
128 return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
131 static inline int pcpu_running(struct pcpu *pcpu)
133 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
134 0, &pcpu->status) != SIGP_CC_STATUS_STORED)
135 return 1;
136 /* Status stored condition code is equivalent to cpu not running. */
137 return 0;
141 * Find struct pcpu by cpu address.
143 static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
145 int cpu;
147 for_each_cpu(cpu, mask)
148 if (pcpu_devices[cpu].address == address)
149 return pcpu_devices + cpu;
150 return NULL;
153 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
155 int order;
157 set_bit(ec_bit, &pcpu->ec_mask);
158 order = pcpu_running(pcpu) ?
159 SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
160 pcpu_sigp_retry(pcpu, order, 0);
163 static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
165 struct _lowcore *lc;
167 if (pcpu != &pcpu_devices[0]) {
168 pcpu->lowcore = (struct _lowcore *)
169 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
170 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
171 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
172 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
173 goto out;
175 lc = pcpu->lowcore;
176 memcpy(lc, &S390_lowcore, 512);
177 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
178 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
179 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
180 lc->cpu_nr = cpu;
181 #ifndef CONFIG_64BIT
182 if (MACHINE_HAS_IEEE) {
183 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
184 if (!lc->extended_save_area_addr)
185 goto out;
187 #else
188 if (vdso_alloc_per_cpu(lc))
189 goto out;
190 #endif
191 lowcore_ptr[cpu] = lc;
192 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
193 return 0;
194 out:
195 if (pcpu != &pcpu_devices[0]) {
196 free_page(pcpu->panic_stack);
197 free_pages(pcpu->async_stack, ASYNC_ORDER);
198 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
200 return -ENOMEM;
203 #ifdef CONFIG_HOTPLUG_CPU
205 static void pcpu_free_lowcore(struct pcpu *pcpu)
207 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
208 lowcore_ptr[pcpu - pcpu_devices] = NULL;
209 #ifndef CONFIG_64BIT
210 if (MACHINE_HAS_IEEE) {
211 struct _lowcore *lc = pcpu->lowcore;
213 free_page((unsigned long) lc->extended_save_area_addr);
214 lc->extended_save_area_addr = 0;
216 #else
217 vdso_free_per_cpu(pcpu->lowcore);
218 #endif
219 if (pcpu != &pcpu_devices[0]) {
220 free_page(pcpu->panic_stack);
221 free_pages(pcpu->async_stack, ASYNC_ORDER);
222 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
226 #endif /* CONFIG_HOTPLUG_CPU */
228 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
230 struct _lowcore *lc = pcpu->lowcore;
232 atomic_inc(&init_mm.context.attach_count);
233 lc->cpu_nr = cpu;
234 lc->percpu_offset = __per_cpu_offset[cpu];
235 lc->kernel_asce = S390_lowcore.kernel_asce;
236 lc->machine_flags = S390_lowcore.machine_flags;
237 lc->ftrace_func = S390_lowcore.ftrace_func;
238 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
239 __ctl_store(lc->cregs_save_area, 0, 15);
240 save_access_regs((unsigned int *) lc->access_regs_save_area);
241 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
242 MAX_FACILITY_BIT/8);
245 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
247 struct _lowcore *lc = pcpu->lowcore;
248 struct thread_info *ti = task_thread_info(tsk);
250 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
251 lc->thread_info = (unsigned long) task_thread_info(tsk);
252 lc->current_task = (unsigned long) tsk;
253 lc->user_timer = ti->user_timer;
254 lc->system_timer = ti->system_timer;
255 lc->steal_timer = 0;
258 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
260 struct _lowcore *lc = pcpu->lowcore;
262 lc->restart_stack = lc->kernel_stack;
263 lc->restart_fn = (unsigned long) func;
264 lc->restart_data = (unsigned long) data;
265 lc->restart_source = -1UL;
266 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
270 * Call function via PSW restart on pcpu and stop the current cpu.
272 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
273 void *data, unsigned long stack)
275 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
276 unsigned long source_cpu = stap();
278 __load_psw_mask(psw_kernel_bits);
279 if (pcpu->address == source_cpu)
280 func(data); /* should not return */
281 /* Stop target cpu (if func returns this stops the current cpu). */
282 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
283 /* Restart func on the target cpu and stop the current cpu. */
284 mem_assign_absolute(lc->restart_stack, stack);
285 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
286 mem_assign_absolute(lc->restart_data, (unsigned long) data);
287 mem_assign_absolute(lc->restart_source, source_cpu);
288 asm volatile(
289 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
290 " brc 2,0b # busy, try again\n"
291 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
292 " brc 2,1b # busy, try again\n"
293 : : "d" (pcpu->address), "d" (source_cpu),
294 "K" (SIGP_RESTART), "K" (SIGP_STOP)
295 : "0", "1", "cc");
296 for (;;) ;
300 * Call function on an online CPU.
302 void smp_call_online_cpu(void (*func)(void *), void *data)
304 struct pcpu *pcpu;
306 /* Use the current cpu if it is online. */
307 pcpu = pcpu_find_address(cpu_online_mask, stap());
308 if (!pcpu)
309 /* Use the first online cpu. */
310 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
311 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
315 * Call function on the ipl CPU.
317 void smp_call_ipl_cpu(void (*func)(void *), void *data)
319 pcpu_delegate(&pcpu_devices[0], func, data,
320 pcpu_devices->panic_stack + PAGE_SIZE);
323 int smp_find_processor_id(u16 address)
325 int cpu;
327 for_each_present_cpu(cpu)
328 if (pcpu_devices[cpu].address == address)
329 return cpu;
330 return -1;
333 int smp_vcpu_scheduled(int cpu)
335 return pcpu_running(pcpu_devices + cpu);
338 void smp_yield(void)
340 if (MACHINE_HAS_DIAG44)
341 asm volatile("diag 0,0,0x44");
344 void smp_yield_cpu(int cpu)
346 if (MACHINE_HAS_DIAG9C)
347 asm volatile("diag %0,0,0x9c"
348 : : "d" (pcpu_devices[cpu].address));
349 else if (MACHINE_HAS_DIAG44)
350 asm volatile("diag 0,0,0x44");
354 * Send cpus emergency shutdown signal. This gives the cpus the
355 * opportunity to complete outstanding interrupts.
357 void smp_emergency_stop(cpumask_t *cpumask)
359 u64 end;
360 int cpu;
362 end = get_clock() + (1000000UL << 12);
363 for_each_cpu(cpu, cpumask) {
364 struct pcpu *pcpu = pcpu_devices + cpu;
365 set_bit(ec_stop_cpu, &pcpu->ec_mask);
366 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
367 0, NULL) == SIGP_CC_BUSY &&
368 get_clock() < end)
369 cpu_relax();
371 while (get_clock() < end) {
372 for_each_cpu(cpu, cpumask)
373 if (pcpu_stopped(pcpu_devices + cpu))
374 cpumask_clear_cpu(cpu, cpumask);
375 if (cpumask_empty(cpumask))
376 break;
377 cpu_relax();
382 * Stop all cpus but the current one.
384 void smp_send_stop(void)
386 cpumask_t cpumask;
387 int cpu;
389 /* Disable all interrupts/machine checks */
390 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
391 trace_hardirqs_off();
393 debug_set_critical();
394 cpumask_copy(&cpumask, cpu_online_mask);
395 cpumask_clear_cpu(smp_processor_id(), &cpumask);
397 if (oops_in_progress)
398 smp_emergency_stop(&cpumask);
400 /* stop all processors */
401 for_each_cpu(cpu, &cpumask) {
402 struct pcpu *pcpu = pcpu_devices + cpu;
403 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
404 while (!pcpu_stopped(pcpu))
405 cpu_relax();
410 * Stop the current cpu.
412 void smp_stop_cpu(void)
414 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
415 for (;;) ;
419 * This is the main routine where commands issued by other
420 * cpus are handled.
422 static void do_ext_call_interrupt(struct ext_code ext_code,
423 unsigned int param32, unsigned long param64)
425 unsigned long bits;
426 int cpu;
428 cpu = smp_processor_id();
429 if (ext_code.code == 0x1202)
430 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
431 else
432 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
434 * handle bit signal external calls
436 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
438 if (test_bit(ec_stop_cpu, &bits))
439 smp_stop_cpu();
441 if (test_bit(ec_schedule, &bits))
442 scheduler_ipi();
444 if (test_bit(ec_call_function, &bits))
445 generic_smp_call_function_interrupt();
447 if (test_bit(ec_call_function_single, &bits))
448 generic_smp_call_function_single_interrupt();
452 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
454 int cpu;
456 for_each_cpu(cpu, mask)
457 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
460 void arch_send_call_function_single_ipi(int cpu)
462 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
465 #ifndef CONFIG_64BIT
467 * this function sends a 'purge tlb' signal to another CPU.
469 static void smp_ptlb_callback(void *info)
471 __tlb_flush_local();
474 void smp_ptlb_all(void)
476 on_each_cpu(smp_ptlb_callback, NULL, 1);
478 EXPORT_SYMBOL(smp_ptlb_all);
479 #endif /* ! CONFIG_64BIT */
482 * this function sends a 'reschedule' IPI to another CPU.
483 * it goes straight through and wastes no time serializing
484 * anything. Worst case is that we lose a reschedule ...
486 void smp_send_reschedule(int cpu)
488 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
492 * parameter area for the set/clear control bit callbacks
494 struct ec_creg_mask_parms {
495 unsigned long orval;
496 unsigned long andval;
497 int cr;
501 * callback for setting/clearing control bits
503 static void smp_ctl_bit_callback(void *info)
505 struct ec_creg_mask_parms *pp = info;
506 unsigned long cregs[16];
508 __ctl_store(cregs, 0, 15);
509 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
510 __ctl_load(cregs, 0, 15);
514 * Set a bit in a control register of all cpus
516 void smp_ctl_set_bit(int cr, int bit)
518 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
520 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
522 EXPORT_SYMBOL(smp_ctl_set_bit);
525 * Clear a bit in a control register of all cpus
527 void smp_ctl_clear_bit(int cr, int bit)
529 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
531 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
533 EXPORT_SYMBOL(smp_ctl_clear_bit);
535 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
537 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
538 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
540 static void __init smp_get_save_area(int cpu, u16 address)
542 void *lc = pcpu_devices[0].lowcore;
543 struct save_area *save_area;
545 if (is_kdump_kernel())
546 return;
547 if (!OLDMEM_BASE && (address == boot_cpu_address ||
548 ipl_info.type != IPL_TYPE_FCP_DUMP))
549 return;
550 if (cpu >= NR_CPUS) {
551 pr_warning("CPU %i exceeds the maximum %i and is excluded "
552 "from the dump\n", cpu, NR_CPUS - 1);
553 return;
555 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
556 if (!save_area)
557 panic("could not allocate memory for save area\n");
558 zfcpdump_save_areas[cpu] = save_area;
559 #ifdef CONFIG_CRASH_DUMP
560 if (address == boot_cpu_address) {
561 /* Copy the registers of the boot cpu. */
562 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
563 SAVE_AREA_BASE - PAGE_SIZE, 0);
564 return;
566 #endif
567 /* Get the registers of a non-boot cpu. */
568 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
569 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
572 int smp_store_status(int cpu)
574 struct pcpu *pcpu;
576 pcpu = pcpu_devices + cpu;
577 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
578 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
579 return -EIO;
580 return 0;
583 #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
585 static inline void smp_get_save_area(int cpu, u16 address) { }
587 #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
589 static struct sclp_cpu_info *smp_get_cpu_info(void)
591 static int use_sigp_detection;
592 struct sclp_cpu_info *info;
593 int address;
595 info = kzalloc(sizeof(*info), GFP_KERNEL);
596 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
597 use_sigp_detection = 1;
598 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
599 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
600 SIGP_CC_NOT_OPERATIONAL)
601 continue;
602 info->cpu[info->configured].address = address;
603 info->configured++;
605 info->combined = info->configured;
607 return info;
610 static int __devinit smp_add_present_cpu(int cpu);
612 static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
613 int sysfs_add)
615 struct pcpu *pcpu;
616 cpumask_t avail;
617 int cpu, nr, i;
619 nr = 0;
620 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
621 cpu = cpumask_first(&avail);
622 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
623 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
624 continue;
625 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
626 continue;
627 pcpu = pcpu_devices + cpu;
628 pcpu->address = info->cpu[i].address;
629 pcpu->state = (cpu >= info->configured) ?
630 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
631 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
632 set_cpu_present(cpu, true);
633 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
634 set_cpu_present(cpu, false);
635 else
636 nr++;
637 cpu = cpumask_next(cpu, &avail);
639 return nr;
642 static void __init smp_detect_cpus(void)
644 unsigned int cpu, c_cpus, s_cpus;
645 struct sclp_cpu_info *info;
647 info = smp_get_cpu_info();
648 if (!info)
649 panic("smp_detect_cpus failed to allocate memory\n");
650 if (info->has_cpu_type) {
651 for (cpu = 0; cpu < info->combined; cpu++) {
652 if (info->cpu[cpu].address != boot_cpu_address)
653 continue;
654 /* The boot cpu dictates the cpu type. */
655 boot_cpu_type = info->cpu[cpu].type;
656 break;
659 c_cpus = s_cpus = 0;
660 for (cpu = 0; cpu < info->combined; cpu++) {
661 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
662 continue;
663 if (cpu < info->configured) {
664 smp_get_save_area(c_cpus, info->cpu[cpu].address);
665 c_cpus++;
666 } else
667 s_cpus++;
669 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
670 get_online_cpus();
671 __smp_rescan_cpus(info, 0);
672 put_online_cpus();
673 kfree(info);
677 * Activate a secondary processor.
679 static void __cpuinit smp_start_secondary(void *cpuvoid)
681 S390_lowcore.last_update_clock = get_clock();
682 S390_lowcore.restart_stack = (unsigned long) restart_stack;
683 S390_lowcore.restart_fn = (unsigned long) do_restart;
684 S390_lowcore.restart_data = 0;
685 S390_lowcore.restart_source = -1UL;
686 restore_access_regs(S390_lowcore.access_regs_save_area);
687 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
688 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
689 cpu_init();
690 preempt_disable();
691 init_cpu_timer();
692 init_cpu_vtimer();
693 pfault_init();
694 notify_cpu_starting(smp_processor_id());
695 set_cpu_online(smp_processor_id(), true);
696 local_irq_enable();
697 /* cpu_idle will call schedule for us */
698 cpu_idle();
701 /* Upping and downing of CPUs */
702 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
704 struct pcpu *pcpu;
705 int rc;
707 pcpu = pcpu_devices + cpu;
708 if (pcpu->state != CPU_STATE_CONFIGURED)
709 return -EIO;
710 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
711 SIGP_CC_ORDER_CODE_ACCEPTED)
712 return -EIO;
714 rc = pcpu_alloc_lowcore(pcpu, cpu);
715 if (rc)
716 return rc;
717 pcpu_prepare_secondary(pcpu, cpu);
718 pcpu_attach_task(pcpu, tidle);
719 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
720 while (!cpu_online(cpu))
721 cpu_relax();
722 return 0;
725 static int __init setup_possible_cpus(char *s)
727 int max, cpu;
729 if (kstrtoint(s, 0, &max) < 0)
730 return 0;
731 init_cpu_possible(cpumask_of(0));
732 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
733 set_cpu_possible(cpu, true);
734 return 0;
736 early_param("possible_cpus", setup_possible_cpus);
738 #ifdef CONFIG_HOTPLUG_CPU
740 int __cpu_disable(void)
742 unsigned long cregs[16];
744 set_cpu_online(smp_processor_id(), false);
745 /* Disable pseudo page faults on this cpu. */
746 pfault_fini();
747 /* Disable interrupt sources via control register. */
748 __ctl_store(cregs, 0, 15);
749 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
750 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
751 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
752 __ctl_load(cregs, 0, 15);
753 return 0;
756 void __cpu_die(unsigned int cpu)
758 struct pcpu *pcpu;
760 /* Wait until target cpu is down */
761 pcpu = pcpu_devices + cpu;
762 while (!pcpu_stopped(pcpu))
763 cpu_relax();
764 pcpu_free_lowcore(pcpu);
765 atomic_dec(&init_mm.context.attach_count);
768 void __noreturn cpu_die(void)
770 idle_task_exit();
771 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
772 for (;;) ;
775 #endif /* CONFIG_HOTPLUG_CPU */
777 void __init smp_prepare_cpus(unsigned int max_cpus)
779 /* request the 0x1201 emergency signal external interrupt */
780 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
781 panic("Couldn't request external interrupt 0x1201");
782 /* request the 0x1202 external call external interrupt */
783 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
784 panic("Couldn't request external interrupt 0x1202");
785 smp_detect_cpus();
788 void __init smp_prepare_boot_cpu(void)
790 struct pcpu *pcpu = pcpu_devices;
792 boot_cpu_address = stap();
793 pcpu->state = CPU_STATE_CONFIGURED;
794 pcpu->address = boot_cpu_address;
795 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
796 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
797 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
798 S390_lowcore.percpu_offset = __per_cpu_offset[0];
799 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
800 set_cpu_present(0, true);
801 set_cpu_online(0, true);
804 void __init smp_cpus_done(unsigned int max_cpus)
808 void __init smp_setup_processor_id(void)
810 S390_lowcore.cpu_nr = 0;
814 * the frequency of the profiling timer can be changed
815 * by writing a multiplier value into /proc/profile.
817 * usually you want to run this on all CPUs ;)
819 int setup_profiling_timer(unsigned int multiplier)
821 return 0;
824 #ifdef CONFIG_HOTPLUG_CPU
825 static ssize_t cpu_configure_show(struct device *dev,
826 struct device_attribute *attr, char *buf)
828 ssize_t count;
830 mutex_lock(&smp_cpu_state_mutex);
831 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
832 mutex_unlock(&smp_cpu_state_mutex);
833 return count;
836 static ssize_t cpu_configure_store(struct device *dev,
837 struct device_attribute *attr,
838 const char *buf, size_t count)
840 struct pcpu *pcpu;
841 int cpu, val, rc;
842 char delim;
844 if (sscanf(buf, "%d %c", &val, &delim) != 1)
845 return -EINVAL;
846 if (val != 0 && val != 1)
847 return -EINVAL;
848 get_online_cpus();
849 mutex_lock(&smp_cpu_state_mutex);
850 rc = -EBUSY;
851 /* disallow configuration changes of online cpus and cpu 0 */
852 cpu = dev->id;
853 if (cpu_online(cpu) || cpu == 0)
854 goto out;
855 pcpu = pcpu_devices + cpu;
856 rc = 0;
857 switch (val) {
858 case 0:
859 if (pcpu->state != CPU_STATE_CONFIGURED)
860 break;
861 rc = sclp_cpu_deconfigure(pcpu->address);
862 if (rc)
863 break;
864 pcpu->state = CPU_STATE_STANDBY;
865 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
866 topology_expect_change();
867 break;
868 case 1:
869 if (pcpu->state != CPU_STATE_STANDBY)
870 break;
871 rc = sclp_cpu_configure(pcpu->address);
872 if (rc)
873 break;
874 pcpu->state = CPU_STATE_CONFIGURED;
875 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
876 topology_expect_change();
877 break;
878 default:
879 break;
881 out:
882 mutex_unlock(&smp_cpu_state_mutex);
883 put_online_cpus();
884 return rc ? rc : count;
886 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
887 #endif /* CONFIG_HOTPLUG_CPU */
889 static ssize_t show_cpu_address(struct device *dev,
890 struct device_attribute *attr, char *buf)
892 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
894 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
896 static struct attribute *cpu_common_attrs[] = {
897 #ifdef CONFIG_HOTPLUG_CPU
898 &dev_attr_configure.attr,
899 #endif
900 &dev_attr_address.attr,
901 NULL,
904 static struct attribute_group cpu_common_attr_group = {
905 .attrs = cpu_common_attrs,
908 static ssize_t show_idle_count(struct device *dev,
909 struct device_attribute *attr, char *buf)
911 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
912 unsigned long long idle_count;
913 unsigned int sequence;
915 do {
916 sequence = ACCESS_ONCE(idle->sequence);
917 idle_count = ACCESS_ONCE(idle->idle_count);
918 if (ACCESS_ONCE(idle->clock_idle_enter))
919 idle_count++;
920 } while ((sequence & 1) || (idle->sequence != sequence));
921 return sprintf(buf, "%llu\n", idle_count);
923 static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
925 static ssize_t show_idle_time(struct device *dev,
926 struct device_attribute *attr, char *buf)
928 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
929 unsigned long long now, idle_time, idle_enter, idle_exit;
930 unsigned int sequence;
932 do {
933 now = get_clock();
934 sequence = ACCESS_ONCE(idle->sequence);
935 idle_time = ACCESS_ONCE(idle->idle_time);
936 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
937 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
938 } while ((sequence & 1) || (idle->sequence != sequence));
939 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
940 return sprintf(buf, "%llu\n", idle_time >> 12);
942 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
944 static struct attribute *cpu_online_attrs[] = {
945 &dev_attr_idle_count.attr,
946 &dev_attr_idle_time_us.attr,
947 NULL,
950 static struct attribute_group cpu_online_attr_group = {
951 .attrs = cpu_online_attrs,
954 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
955 unsigned long action, void *hcpu)
957 unsigned int cpu = (unsigned int)(long)hcpu;
958 struct cpu *c = &pcpu_devices[cpu].cpu;
959 struct device *s = &c->dev;
960 int err = 0;
962 switch (action) {
963 case CPU_ONLINE:
964 case CPU_ONLINE_FROZEN:
965 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
966 break;
967 case CPU_DEAD:
968 case CPU_DEAD_FROZEN:
969 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
970 break;
972 return notifier_from_errno(err);
975 static struct notifier_block __cpuinitdata smp_cpu_nb = {
976 .notifier_call = smp_cpu_notify,
979 static int __devinit smp_add_present_cpu(int cpu)
981 struct cpu *c = &pcpu_devices[cpu].cpu;
982 struct device *s = &c->dev;
983 int rc;
985 c->hotpluggable = 1;
986 rc = register_cpu(c, cpu);
987 if (rc)
988 goto out;
989 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
990 if (rc)
991 goto out_cpu;
992 if (cpu_online(cpu)) {
993 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
994 if (rc)
995 goto out_online;
997 rc = topology_cpu_init(c);
998 if (rc)
999 goto out_topology;
1000 return 0;
1002 out_topology:
1003 if (cpu_online(cpu))
1004 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1005 out_online:
1006 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1007 out_cpu:
1008 #ifdef CONFIG_HOTPLUG_CPU
1009 unregister_cpu(c);
1010 #endif
1011 out:
1012 return rc;
1015 #ifdef CONFIG_HOTPLUG_CPU
1017 int __ref smp_rescan_cpus(void)
1019 struct sclp_cpu_info *info;
1020 int nr;
1022 info = smp_get_cpu_info();
1023 if (!info)
1024 return -ENOMEM;
1025 get_online_cpus();
1026 mutex_lock(&smp_cpu_state_mutex);
1027 nr = __smp_rescan_cpus(info, 1);
1028 mutex_unlock(&smp_cpu_state_mutex);
1029 put_online_cpus();
1030 kfree(info);
1031 if (nr)
1032 topology_schedule_update();
1033 return 0;
1036 static ssize_t __ref rescan_store(struct device *dev,
1037 struct device_attribute *attr,
1038 const char *buf,
1039 size_t count)
1041 int rc;
1043 rc = smp_rescan_cpus();
1044 return rc ? rc : count;
1046 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1047 #endif /* CONFIG_HOTPLUG_CPU */
1049 static int __init s390_smp_init(void)
1051 int cpu, rc;
1053 register_cpu_notifier(&smp_cpu_nb);
1054 #ifdef CONFIG_HOTPLUG_CPU
1055 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1056 if (rc)
1057 return rc;
1058 #endif
1059 for_each_present_cpu(cpu) {
1060 rc = smp_add_present_cpu(cpu);
1061 if (rc)
1062 return rc;
1064 return 0;
1066 subsys_initcall(s390_smp_init);