arm64: dts: Revert "specify console via command line"
[linux/fpc-iii.git] / arch / powerpc / kernel / rtas.c
blobc5fa251b8950c78425607c1319ac3f42829d683f
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
4 * Procedures for interfacing to the RTAS on CHRP machines.
6 * Peter Bergner, IBM March 2001.
7 * Copyright (C) 2001 IBM.
8 */
10 #include <stdarg.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/capability.h>
17 #include <linux/delay.h>
18 #include <linux/cpu.h>
19 #include <linux/sched.h>
20 #include <linux/smp.h>
21 #include <linux/completion.h>
22 #include <linux/cpumask.h>
23 #include <linux/memblock.h>
24 #include <linux/slab.h>
25 #include <linux/reboot.h>
26 #include <linux/syscalls.h>
28 #include <asm/prom.h>
29 #include <asm/rtas.h>
30 #include <asm/hvcall.h>
31 #include <asm/machdep.h>
32 #include <asm/firmware.h>
33 #include <asm/page.h>
34 #include <asm/param.h>
35 #include <asm/delay.h>
36 #include <linux/uaccess.h>
37 #include <asm/udbg.h>
38 #include <asm/syscalls.h>
39 #include <asm/smp.h>
40 #include <linux/atomic.h>
41 #include <asm/time.h>
42 #include <asm/mmu.h>
43 #include <asm/topology.h>
45 /* This is here deliberately so it's only used in this file */
46 void enter_rtas(unsigned long);
48 struct rtas_t rtas = {
49 .lock = __ARCH_SPIN_LOCK_UNLOCKED
51 EXPORT_SYMBOL(rtas);
53 DEFINE_SPINLOCK(rtas_data_buf_lock);
54 EXPORT_SYMBOL(rtas_data_buf_lock);
56 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
57 EXPORT_SYMBOL(rtas_data_buf);
59 unsigned long rtas_rmo_buf;
62 * If non-NULL, this gets called when the kernel terminates.
63 * This is done like this so rtas_flash can be a module.
65 void (*rtas_flash_term_hook)(int);
66 EXPORT_SYMBOL(rtas_flash_term_hook);
68 /* RTAS use home made raw locking instead of spin_lock_irqsave
69 * because those can be called from within really nasty contexts
70 * such as having the timebase stopped which would lockup with
71 * normal locks and spinlock debugging enabled
73 static unsigned long lock_rtas(void)
75 unsigned long flags;
77 local_irq_save(flags);
78 preempt_disable();
79 arch_spin_lock(&rtas.lock);
80 return flags;
83 static void unlock_rtas(unsigned long flags)
85 arch_spin_unlock(&rtas.lock);
86 local_irq_restore(flags);
87 preempt_enable();
91 * call_rtas_display_status and call_rtas_display_status_delay
92 * are designed only for very early low-level debugging, which
93 * is why the token is hard-coded to 10.
95 static void call_rtas_display_status(unsigned char c)
97 unsigned long s;
99 if (!rtas.base)
100 return;
102 s = lock_rtas();
103 rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
104 unlock_rtas(s);
107 static void call_rtas_display_status_delay(char c)
109 static int pending_newline = 0; /* did last write end with unprinted newline? */
110 static int width = 16;
112 if (c == '\n') {
113 while (width-- > 0)
114 call_rtas_display_status(' ');
115 width = 16;
116 mdelay(500);
117 pending_newline = 1;
118 } else {
119 if (pending_newline) {
120 call_rtas_display_status('\r');
121 call_rtas_display_status('\n');
123 pending_newline = 0;
124 if (width--) {
125 call_rtas_display_status(c);
126 udelay(10000);
131 void __init udbg_init_rtas_panel(void)
133 udbg_putc = call_rtas_display_status_delay;
136 #ifdef CONFIG_UDBG_RTAS_CONSOLE
138 /* If you think you're dying before early_init_dt_scan_rtas() does its
139 * work, you can hard code the token values for your firmware here and
140 * hardcode rtas.base/entry etc.
142 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
143 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
145 static void udbg_rtascon_putc(char c)
147 int tries;
149 if (!rtas.base)
150 return;
152 /* Add CRs before LFs */
153 if (c == '\n')
154 udbg_rtascon_putc('\r');
156 /* if there is more than one character to be displayed, wait a bit */
157 for (tries = 0; tries < 16; tries++) {
158 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
159 break;
160 udelay(1000);
164 static int udbg_rtascon_getc_poll(void)
166 int c;
168 if (!rtas.base)
169 return -1;
171 if (rtas_call(rtas_getchar_token, 0, 2, &c))
172 return -1;
174 return c;
177 static int udbg_rtascon_getc(void)
179 int c;
181 while ((c = udbg_rtascon_getc_poll()) == -1)
184 return c;
188 void __init udbg_init_rtas_console(void)
190 udbg_putc = udbg_rtascon_putc;
191 udbg_getc = udbg_rtascon_getc;
192 udbg_getc_poll = udbg_rtascon_getc_poll;
194 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
196 void rtas_progress(char *s, unsigned short hex)
198 struct device_node *root;
199 int width;
200 const __be32 *p;
201 char *os;
202 static int display_character, set_indicator;
203 static int display_width, display_lines, form_feed;
204 static const int *row_width;
205 static DEFINE_SPINLOCK(progress_lock);
206 static int current_line;
207 static int pending_newline = 0; /* did last write end with unprinted newline? */
209 if (!rtas.base)
210 return;
212 if (display_width == 0) {
213 display_width = 0x10;
214 if ((root = of_find_node_by_path("/rtas"))) {
215 if ((p = of_get_property(root,
216 "ibm,display-line-length", NULL)))
217 display_width = be32_to_cpu(*p);
218 if ((p = of_get_property(root,
219 "ibm,form-feed", NULL)))
220 form_feed = be32_to_cpu(*p);
221 if ((p = of_get_property(root,
222 "ibm,display-number-of-lines", NULL)))
223 display_lines = be32_to_cpu(*p);
224 row_width = of_get_property(root,
225 "ibm,display-truncation-length", NULL);
226 of_node_put(root);
228 display_character = rtas_token("display-character");
229 set_indicator = rtas_token("set-indicator");
232 if (display_character == RTAS_UNKNOWN_SERVICE) {
233 /* use hex display if available */
234 if (set_indicator != RTAS_UNKNOWN_SERVICE)
235 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
236 return;
239 spin_lock(&progress_lock);
242 * Last write ended with newline, but we didn't print it since
243 * it would just clear the bottom line of output. Print it now
244 * instead.
246 * If no newline is pending and form feed is supported, clear the
247 * display with a form feed; otherwise, print a CR to start output
248 * at the beginning of the line.
250 if (pending_newline) {
251 rtas_call(display_character, 1, 1, NULL, '\r');
252 rtas_call(display_character, 1, 1, NULL, '\n');
253 pending_newline = 0;
254 } else {
255 current_line = 0;
256 if (form_feed)
257 rtas_call(display_character, 1, 1, NULL,
258 (char)form_feed);
259 else
260 rtas_call(display_character, 1, 1, NULL, '\r');
263 if (row_width)
264 width = row_width[current_line];
265 else
266 width = display_width;
267 os = s;
268 while (*os) {
269 if (*os == '\n' || *os == '\r') {
270 /* If newline is the last character, save it
271 * until next call to avoid bumping up the
272 * display output.
274 if (*os == '\n' && !os[1]) {
275 pending_newline = 1;
276 current_line++;
277 if (current_line > display_lines-1)
278 current_line = display_lines-1;
279 spin_unlock(&progress_lock);
280 return;
283 /* RTAS wants CR-LF, not just LF */
285 if (*os == '\n') {
286 rtas_call(display_character, 1, 1, NULL, '\r');
287 rtas_call(display_character, 1, 1, NULL, '\n');
288 } else {
289 /* CR might be used to re-draw a line, so we'll
290 * leave it alone and not add LF.
292 rtas_call(display_character, 1, 1, NULL, *os);
295 if (row_width)
296 width = row_width[current_line];
297 else
298 width = display_width;
299 } else {
300 width--;
301 rtas_call(display_character, 1, 1, NULL, *os);
304 os++;
306 /* if we overwrite the screen length */
307 if (width <= 0)
308 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
309 os++;
312 spin_unlock(&progress_lock);
314 EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
316 int rtas_token(const char *service)
318 const __be32 *tokp;
319 if (rtas.dev == NULL)
320 return RTAS_UNKNOWN_SERVICE;
321 tokp = of_get_property(rtas.dev, service, NULL);
322 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
324 EXPORT_SYMBOL(rtas_token);
326 int rtas_service_present(const char *service)
328 return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
330 EXPORT_SYMBOL(rtas_service_present);
332 #ifdef CONFIG_RTAS_ERROR_LOGGING
334 * Return the firmware-specified size of the error log buffer
335 * for all rtas calls that require an error buffer argument.
336 * This includes 'check-exception' and 'rtas-last-error'.
338 int rtas_get_error_log_max(void)
340 static int rtas_error_log_max;
341 if (rtas_error_log_max)
342 return rtas_error_log_max;
344 rtas_error_log_max = rtas_token ("rtas-error-log-max");
345 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
346 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
347 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
348 rtas_error_log_max);
349 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
351 return rtas_error_log_max;
353 EXPORT_SYMBOL(rtas_get_error_log_max);
356 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
357 static int rtas_last_error_token;
359 /** Return a copy of the detailed error text associated with the
360 * most recent failed call to rtas. Because the error text
361 * might go stale if there are any other intervening rtas calls,
362 * this routine must be called atomically with whatever produced
363 * the error (i.e. with rtas.lock still held from the previous call).
365 static char *__fetch_rtas_last_error(char *altbuf)
367 struct rtas_args err_args, save_args;
368 u32 bufsz;
369 char *buf = NULL;
371 if (rtas_last_error_token == -1)
372 return NULL;
374 bufsz = rtas_get_error_log_max();
376 err_args.token = cpu_to_be32(rtas_last_error_token);
377 err_args.nargs = cpu_to_be32(2);
378 err_args.nret = cpu_to_be32(1);
379 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
380 err_args.args[1] = cpu_to_be32(bufsz);
381 err_args.args[2] = 0;
383 save_args = rtas.args;
384 rtas.args = err_args;
386 enter_rtas(__pa(&rtas.args));
388 err_args = rtas.args;
389 rtas.args = save_args;
391 /* Log the error in the unlikely case that there was one. */
392 if (unlikely(err_args.args[2] == 0)) {
393 if (altbuf) {
394 buf = altbuf;
395 } else {
396 buf = rtas_err_buf;
397 if (slab_is_available())
398 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
400 if (buf)
401 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
404 return buf;
407 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
409 #else /* CONFIG_RTAS_ERROR_LOGGING */
410 #define __fetch_rtas_last_error(x) NULL
411 #define get_errorlog_buffer() NULL
412 #endif
415 static void
416 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
417 va_list list)
419 int i;
421 args->token = cpu_to_be32(token);
422 args->nargs = cpu_to_be32(nargs);
423 args->nret = cpu_to_be32(nret);
424 args->rets = &(args->args[nargs]);
426 for (i = 0; i < nargs; ++i)
427 args->args[i] = cpu_to_be32(va_arg(list, __u32));
429 for (i = 0; i < nret; ++i)
430 args->rets[i] = 0;
432 enter_rtas(__pa(args));
435 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
437 va_list list;
439 va_start(list, nret);
440 va_rtas_call_unlocked(args, token, nargs, nret, list);
441 va_end(list);
444 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
446 va_list list;
447 int i;
448 unsigned long s;
449 struct rtas_args *rtas_args;
450 char *buff_copy = NULL;
451 int ret;
453 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
454 return -1;
456 s = lock_rtas();
458 /* We use the global rtas args buffer */
459 rtas_args = &rtas.args;
461 va_start(list, outputs);
462 va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
463 va_end(list);
465 /* A -1 return code indicates that the last command couldn't
466 be completed due to a hardware error. */
467 if (be32_to_cpu(rtas_args->rets[0]) == -1)
468 buff_copy = __fetch_rtas_last_error(NULL);
470 if (nret > 1 && outputs != NULL)
471 for (i = 0; i < nret-1; ++i)
472 outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
473 ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
475 unlock_rtas(s);
477 if (buff_copy) {
478 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
479 if (slab_is_available())
480 kfree(buff_copy);
482 return ret;
484 EXPORT_SYMBOL(rtas_call);
486 /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
487 * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
489 unsigned int rtas_busy_delay_time(int status)
491 int order;
492 unsigned int ms = 0;
494 if (status == RTAS_BUSY) {
495 ms = 1;
496 } else if (status >= RTAS_EXTENDED_DELAY_MIN &&
497 status <= RTAS_EXTENDED_DELAY_MAX) {
498 order = status - RTAS_EXTENDED_DELAY_MIN;
499 for (ms = 1; order > 0; order--)
500 ms *= 10;
503 return ms;
505 EXPORT_SYMBOL(rtas_busy_delay_time);
507 /* For an RTAS busy status code, perform the hinted delay. */
508 unsigned int rtas_busy_delay(int status)
510 unsigned int ms;
512 might_sleep();
513 ms = rtas_busy_delay_time(status);
514 if (ms && need_resched())
515 msleep(ms);
517 return ms;
519 EXPORT_SYMBOL(rtas_busy_delay);
521 static int rtas_error_rc(int rtas_rc)
523 int rc;
525 switch (rtas_rc) {
526 case -1: /* Hardware Error */
527 rc = -EIO;
528 break;
529 case -3: /* Bad indicator/domain/etc */
530 rc = -EINVAL;
531 break;
532 case -9000: /* Isolation error */
533 rc = -EFAULT;
534 break;
535 case -9001: /* Outstanding TCE/PTE */
536 rc = -EEXIST;
537 break;
538 case -9002: /* No usable slot */
539 rc = -ENODEV;
540 break;
541 default:
542 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
543 __func__, rtas_rc);
544 rc = -ERANGE;
545 break;
547 return rc;
550 int rtas_get_power_level(int powerdomain, int *level)
552 int token = rtas_token("get-power-level");
553 int rc;
555 if (token == RTAS_UNKNOWN_SERVICE)
556 return -ENOENT;
558 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
559 udelay(1);
561 if (rc < 0)
562 return rtas_error_rc(rc);
563 return rc;
565 EXPORT_SYMBOL(rtas_get_power_level);
567 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
569 int token = rtas_token("set-power-level");
570 int rc;
572 if (token == RTAS_UNKNOWN_SERVICE)
573 return -ENOENT;
575 do {
576 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
577 } while (rtas_busy_delay(rc));
579 if (rc < 0)
580 return rtas_error_rc(rc);
581 return rc;
583 EXPORT_SYMBOL(rtas_set_power_level);
585 int rtas_get_sensor(int sensor, int index, int *state)
587 int token = rtas_token("get-sensor-state");
588 int rc;
590 if (token == RTAS_UNKNOWN_SERVICE)
591 return -ENOENT;
593 do {
594 rc = rtas_call(token, 2, 2, state, sensor, index);
595 } while (rtas_busy_delay(rc));
597 if (rc < 0)
598 return rtas_error_rc(rc);
599 return rc;
601 EXPORT_SYMBOL(rtas_get_sensor);
603 int rtas_get_sensor_fast(int sensor, int index, int *state)
605 int token = rtas_token("get-sensor-state");
606 int rc;
608 if (token == RTAS_UNKNOWN_SERVICE)
609 return -ENOENT;
611 rc = rtas_call(token, 2, 2, state, sensor, index);
612 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
613 rc <= RTAS_EXTENDED_DELAY_MAX));
615 if (rc < 0)
616 return rtas_error_rc(rc);
617 return rc;
620 bool rtas_indicator_present(int token, int *maxindex)
622 int proplen, count, i;
623 const struct indicator_elem {
624 __be32 token;
625 __be32 maxindex;
626 } *indicators;
628 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
629 if (!indicators)
630 return false;
632 count = proplen / sizeof(struct indicator_elem);
634 for (i = 0; i < count; i++) {
635 if (__be32_to_cpu(indicators[i].token) != token)
636 continue;
637 if (maxindex)
638 *maxindex = __be32_to_cpu(indicators[i].maxindex);
639 return true;
642 return false;
644 EXPORT_SYMBOL(rtas_indicator_present);
646 int rtas_set_indicator(int indicator, int index, int new_value)
648 int token = rtas_token("set-indicator");
649 int rc;
651 if (token == RTAS_UNKNOWN_SERVICE)
652 return -ENOENT;
654 do {
655 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
656 } while (rtas_busy_delay(rc));
658 if (rc < 0)
659 return rtas_error_rc(rc);
660 return rc;
662 EXPORT_SYMBOL(rtas_set_indicator);
665 * Ignoring RTAS extended delay
667 int rtas_set_indicator_fast(int indicator, int index, int new_value)
669 int rc;
670 int token = rtas_token("set-indicator");
672 if (token == RTAS_UNKNOWN_SERVICE)
673 return -ENOENT;
675 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
677 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
678 rc <= RTAS_EXTENDED_DELAY_MAX));
680 if (rc < 0)
681 return rtas_error_rc(rc);
683 return rc;
686 void __noreturn rtas_restart(char *cmd)
688 if (rtas_flash_term_hook)
689 rtas_flash_term_hook(SYS_RESTART);
690 printk("RTAS system-reboot returned %d\n",
691 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
692 for (;;);
695 void rtas_power_off(void)
697 if (rtas_flash_term_hook)
698 rtas_flash_term_hook(SYS_POWER_OFF);
699 /* allow power on only with power button press */
700 printk("RTAS power-off returned %d\n",
701 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
702 for (;;);
705 void __noreturn rtas_halt(void)
707 if (rtas_flash_term_hook)
708 rtas_flash_term_hook(SYS_HALT);
709 /* allow power on only with power button press */
710 printk("RTAS power-off returned %d\n",
711 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
712 for (;;);
715 /* Must be in the RMO region, so we place it here */
716 static char rtas_os_term_buf[2048];
718 void rtas_os_term(char *str)
720 int status;
723 * Firmware with the ibm,extended-os-term property is guaranteed
724 * to always return from an ibm,os-term call. Earlier versions without
725 * this property may terminate the partition which we want to avoid
726 * since it interferes with panic_timeout.
728 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
729 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
730 return;
732 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
734 do {
735 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
736 __pa(rtas_os_term_buf));
737 } while (rtas_busy_delay(status));
739 if (status != 0)
740 printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
743 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
744 #ifdef CONFIG_PPC_PSERIES
745 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
747 u16 slb_size = mmu_slb_size;
748 int rc = H_MULTI_THREADS_ACTIVE;
749 int cpu;
751 slb_set_size(SLB_MIN_SIZE);
752 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
754 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
755 !atomic_read(&data->error))
756 rc = rtas_call(data->token, 0, 1, NULL);
758 if (rc || atomic_read(&data->error)) {
759 printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
760 slb_set_size(slb_size);
763 if (atomic_read(&data->error))
764 rc = atomic_read(&data->error);
766 atomic_set(&data->error, rc);
767 pSeries_coalesce_init();
769 if (wake_when_done) {
770 atomic_set(&data->done, 1);
772 for_each_online_cpu(cpu)
773 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
776 if (atomic_dec_return(&data->working) == 0)
777 complete(data->complete);
779 return rc;
782 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
784 atomic_inc(&data->working);
785 return __rtas_suspend_last_cpu(data, 0);
788 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
790 long rc = H_SUCCESS;
791 unsigned long msr_save;
792 int cpu;
794 atomic_inc(&data->working);
796 /* really need to ensure MSR.EE is off for H_JOIN */
797 msr_save = mfmsr();
798 mtmsr(msr_save & ~(MSR_EE));
800 while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
801 rc = plpar_hcall_norets(H_JOIN);
803 mtmsr(msr_save);
805 if (rc == H_SUCCESS) {
806 /* This cpu was prodded and the suspend is complete. */
807 goto out;
808 } else if (rc == H_CONTINUE) {
809 /* All other cpus are in H_JOIN, this cpu does
810 * the suspend.
812 return __rtas_suspend_last_cpu(data, wake_when_done);
813 } else {
814 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
815 smp_processor_id(), rc);
816 atomic_set(&data->error, rc);
819 if (wake_when_done) {
820 atomic_set(&data->done, 1);
822 /* This cpu did the suspend or got an error; in either case,
823 * we need to prod all other other cpus out of join state.
824 * Extra prods are harmless.
826 for_each_online_cpu(cpu)
827 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
829 out:
830 if (atomic_dec_return(&data->working) == 0)
831 complete(data->complete);
832 return rc;
835 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
837 return __rtas_suspend_cpu(data, 0);
840 static void rtas_percpu_suspend_me(void *info)
842 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
845 enum rtas_cpu_state {
846 DOWN,
850 #ifndef CONFIG_SMP
851 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
852 cpumask_var_t cpus)
854 if (!cpumask_empty(cpus)) {
855 cpumask_clear(cpus);
856 return -EINVAL;
857 } else
858 return 0;
860 #else
861 /* On return cpumask will be altered to indicate CPUs changed.
862 * CPUs with states changed will be set in the mask,
863 * CPUs with status unchanged will be unset in the mask. */
864 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
865 cpumask_var_t cpus)
867 int cpu;
868 int cpuret = 0;
869 int ret = 0;
871 if (cpumask_empty(cpus))
872 return 0;
874 for_each_cpu(cpu, cpus) {
875 struct device *dev = get_cpu_device(cpu);
877 switch (state) {
878 case DOWN:
879 cpuret = device_offline(dev);
880 break;
881 case UP:
882 cpuret = device_online(dev);
883 break;
885 if (cpuret < 0) {
886 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
887 __func__,
888 ((state == UP) ? "up" : "down"),
889 cpu, cpuret);
890 if (!ret)
891 ret = cpuret;
892 if (state == UP) {
893 /* clear bits for unchanged cpus, return */
894 cpumask_shift_right(cpus, cpus, cpu);
895 cpumask_shift_left(cpus, cpus, cpu);
896 break;
897 } else {
898 /* clear bit for unchanged cpu, continue */
899 cpumask_clear_cpu(cpu, cpus);
902 cond_resched();
905 return ret;
907 #endif
909 int rtas_online_cpus_mask(cpumask_var_t cpus)
911 int ret;
913 ret = rtas_cpu_state_change_mask(UP, cpus);
915 if (ret) {
916 cpumask_var_t tmp_mask;
918 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
919 return ret;
921 /* Use tmp_mask to preserve cpus mask from first failure */
922 cpumask_copy(tmp_mask, cpus);
923 rtas_offline_cpus_mask(tmp_mask);
924 free_cpumask_var(tmp_mask);
927 return ret;
930 int rtas_offline_cpus_mask(cpumask_var_t cpus)
932 return rtas_cpu_state_change_mask(DOWN, cpus);
935 int rtas_ibm_suspend_me(u64 handle)
937 long state;
938 long rc;
939 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
940 struct rtas_suspend_me_data data;
941 DECLARE_COMPLETION_ONSTACK(done);
942 cpumask_var_t offline_mask;
943 int cpuret;
945 if (!rtas_service_present("ibm,suspend-me"))
946 return -ENOSYS;
948 /* Make sure the state is valid */
949 rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
951 state = retbuf[0];
953 if (rc) {
954 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
955 return rc;
956 } else if (state == H_VASI_ENABLED) {
957 return -EAGAIN;
958 } else if (state != H_VASI_SUSPENDING) {
959 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
960 state);
961 return -EIO;
964 if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
965 return -ENOMEM;
967 atomic_set(&data.working, 0);
968 atomic_set(&data.done, 0);
969 atomic_set(&data.error, 0);
970 data.token = rtas_token("ibm,suspend-me");
971 data.complete = &done;
973 lock_device_hotplug();
975 /* All present CPUs must be online */
976 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
977 cpuret = rtas_online_cpus_mask(offline_mask);
978 if (cpuret) {
979 pr_err("%s: Could not bring present CPUs online.\n", __func__);
980 atomic_set(&data.error, cpuret);
981 goto out;
984 cpu_hotplug_disable();
986 /* Check if we raced with a CPU-Offline Operation */
987 if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
988 pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
989 atomic_set(&data.error, -EAGAIN);
990 goto out_hotplug_enable;
993 /* Call function on all CPUs. One of us will make the
994 * rtas call
996 on_each_cpu(rtas_percpu_suspend_me, &data, 0);
998 wait_for_completion(&done);
1000 if (atomic_read(&data.error) != 0)
1001 printk(KERN_ERR "Error doing global join\n");
1003 out_hotplug_enable:
1004 cpu_hotplug_enable();
1006 /* Take down CPUs not online prior to suspend */
1007 cpuret = rtas_offline_cpus_mask(offline_mask);
1008 if (cpuret)
1009 pr_warn("%s: Could not restore CPUs to offline state.\n",
1010 __func__);
1012 out:
1013 unlock_device_hotplug();
1014 free_cpumask_var(offline_mask);
1015 return atomic_read(&data.error);
1017 #else /* CONFIG_PPC_PSERIES */
1018 int rtas_ibm_suspend_me(u64 handle)
1020 return -ENOSYS;
1022 #endif
1025 * Find a specific pseries error log in an RTAS extended event log.
1026 * @log: RTAS error/event log
1027 * @section_id: two character section identifier
1029 * Returns a pointer to the specified errorlog or NULL if not found.
1031 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1032 uint16_t section_id)
1034 struct rtas_ext_event_log_v6 *ext_log =
1035 (struct rtas_ext_event_log_v6 *)log->buffer;
1036 struct pseries_errorlog *sect;
1037 unsigned char *p, *log_end;
1038 uint32_t ext_log_length = rtas_error_extended_log_length(log);
1039 uint8_t log_format = rtas_ext_event_log_format(ext_log);
1040 uint32_t company_id = rtas_ext_event_company_id(ext_log);
1042 /* Check that we understand the format */
1043 if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1044 log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1045 company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1046 return NULL;
1048 log_end = log->buffer + ext_log_length;
1049 p = ext_log->vendor_log;
1051 while (p < log_end) {
1052 sect = (struct pseries_errorlog *)p;
1053 if (pseries_errorlog_id(sect) == section_id)
1054 return sect;
1055 p += pseries_errorlog_length(sect);
1058 return NULL;
1061 /* We assume to be passed big endian arguments */
1062 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
1064 struct rtas_args args;
1065 unsigned long flags;
1066 char *buff_copy, *errbuf = NULL;
1067 int nargs, nret, token;
1069 if (!capable(CAP_SYS_ADMIN))
1070 return -EPERM;
1072 if (!rtas.entry)
1073 return -EINVAL;
1075 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1076 return -EFAULT;
1078 nargs = be32_to_cpu(args.nargs);
1079 nret = be32_to_cpu(args.nret);
1080 token = be32_to_cpu(args.token);
1082 if (nargs >= ARRAY_SIZE(args.args)
1083 || nret > ARRAY_SIZE(args.args)
1084 || nargs + nret > ARRAY_SIZE(args.args))
1085 return -EINVAL;
1087 /* Copy in args. */
1088 if (copy_from_user(args.args, uargs->args,
1089 nargs * sizeof(rtas_arg_t)) != 0)
1090 return -EFAULT;
1092 if (token == RTAS_UNKNOWN_SERVICE)
1093 return -EINVAL;
1095 args.rets = &args.args[nargs];
1096 memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1098 /* Need to handle ibm,suspend_me call specially */
1099 if (token == ibm_suspend_me_token) {
1102 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1103 * endian, or at least the hcall within it requires it.
1105 int rc = 0;
1106 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1107 | be32_to_cpu(args.args[1]);
1108 rc = rtas_ibm_suspend_me(handle);
1109 if (rc == -EAGAIN)
1110 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1111 else if (rc == -EIO)
1112 args.rets[0] = cpu_to_be32(-1);
1113 else if (rc)
1114 return rc;
1115 goto copy_return;
1118 buff_copy = get_errorlog_buffer();
1120 flags = lock_rtas();
1122 rtas.args = args;
1123 enter_rtas(__pa(&rtas.args));
1124 args = rtas.args;
1126 /* A -1 return code indicates that the last command couldn't
1127 be completed due to a hardware error. */
1128 if (be32_to_cpu(args.rets[0]) == -1)
1129 errbuf = __fetch_rtas_last_error(buff_copy);
1131 unlock_rtas(flags);
1133 if (buff_copy) {
1134 if (errbuf)
1135 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1136 kfree(buff_copy);
1139 copy_return:
1140 /* Copy out args. */
1141 if (copy_to_user(uargs->args + nargs,
1142 args.args + nargs,
1143 nret * sizeof(rtas_arg_t)) != 0)
1144 return -EFAULT;
1146 return 0;
1150 * Call early during boot, before mem init, to retrieve the RTAS
1151 * information from the device-tree and allocate the RMO buffer for userland
1152 * accesses.
1154 void __init rtas_initialize(void)
1156 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1157 u32 base, size, entry;
1158 int no_base, no_size, no_entry;
1160 /* Get RTAS dev node and fill up our "rtas" structure with infos
1161 * about it.
1163 rtas.dev = of_find_node_by_name(NULL, "rtas");
1164 if (!rtas.dev)
1165 return;
1167 no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
1168 no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
1169 if (no_base || no_size) {
1170 of_node_put(rtas.dev);
1171 rtas.dev = NULL;
1172 return;
1175 rtas.base = base;
1176 rtas.size = size;
1177 no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
1178 rtas.entry = no_entry ? rtas.base : entry;
1180 /* If RTAS was found, allocate the RMO buffer for it and look for
1181 * the stop-self token if any
1183 #ifdef CONFIG_PPC64
1184 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1185 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1186 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1188 #endif
1189 rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
1190 0, rtas_region);
1191 if (!rtas_rmo_buf)
1192 panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
1193 PAGE_SIZE, &rtas_region);
1195 #ifdef CONFIG_RTAS_ERROR_LOGGING
1196 rtas_last_error_token = rtas_token("rtas-last-error");
1197 #endif
1200 int __init early_init_dt_scan_rtas(unsigned long node,
1201 const char *uname, int depth, void *data)
1203 const u32 *basep, *entryp, *sizep;
1205 if (depth != 1 || strcmp(uname, "rtas") != 0)
1206 return 0;
1208 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1209 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1210 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
1212 if (basep && entryp && sizep) {
1213 rtas.base = *basep;
1214 rtas.entry = *entryp;
1215 rtas.size = *sizep;
1218 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1219 basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1220 if (basep)
1221 rtas_putchar_token = *basep;
1223 basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1224 if (basep)
1225 rtas_getchar_token = *basep;
1227 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1228 rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1229 udbg_init_rtas_console();
1231 #endif
1233 /* break now */
1234 return 1;
1237 static arch_spinlock_t timebase_lock;
1238 static u64 timebase = 0;
1240 void rtas_give_timebase(void)
1242 unsigned long flags;
1244 local_irq_save(flags);
1245 hard_irq_disable();
1246 arch_spin_lock(&timebase_lock);
1247 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1248 timebase = get_tb();
1249 arch_spin_unlock(&timebase_lock);
1251 while (timebase)
1252 barrier();
1253 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1254 local_irq_restore(flags);
1257 void rtas_take_timebase(void)
1259 while (!timebase)
1260 barrier();
1261 arch_spin_lock(&timebase_lock);
1262 set_tb(timebase >> 32, timebase & 0xffffffff);
1263 timebase = 0;
1264 arch_spin_unlock(&timebase_lock);