1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Procedures for interfacing to Open Firmware.
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
14 /* we cannot use FORTIFY as it brings in new symbols */
17 #include <linux/stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <linux/printk.h>
32 #include <linux/of_fdt.h>
36 #include <asm/processor.h>
37 #include <asm/interrupt.h>
42 #include <asm/iommu.h>
43 #include <asm/btext.h>
44 #include <asm/sections.h>
45 #include <asm/setup.h>
46 #include <asm/asm-prototypes.h>
47 #include <asm/ultravisor-api.h>
49 #include <linux/linux_logo.h>
51 /* All of prom_init bss lives here */
52 #define __prombss __section(".bss.prominit")
55 * Eventually bump that one up
57 #define DEVTREE_CHUNK_SIZE 0x100000
60 * This is the size of the local memory reserve map that gets copied
61 * into the boot params passed to the kernel. That size is totally
62 * flexible as the kernel just reads the list until it encounters an
63 * entry with size 0, so it can be changed without breaking binary
66 #define MEM_RESERVE_MAP_SIZE 8
69 * prom_init() is called very early on, before the kernel text
70 * and data have been mapped to KERNELBASE. At this point the code
71 * is running at whatever address it has been loaded at.
72 * On ppc32 we compile with -mrelocatable, which means that references
73 * to extern and static variables get relocated automatically.
74 * ppc64 objects are always relocatable, we just need to relocate the
77 * Because OF may have mapped I/O devices into the area starting at
78 * KERNELBASE, particularly on CHRP machines, we can't safely call
79 * OF once the kernel has been mapped to KERNELBASE. Therefore all
80 * OF calls must be done within prom_init().
82 * ADDR is used in calls to call_prom. The 4th and following
83 * arguments to call_prom should be 32-bit values.
84 * On ppc64, 64 bit values are truncated to 32 bits (and
85 * fortunately don't get interpreted as two arguments).
87 #define ADDR(x) (u32)(unsigned long)(x)
90 #define OF_WORKAROUNDS 0
92 #define OF_WORKAROUNDS of_workarounds
93 static int of_workarounds __prombss
;
96 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
97 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
100 #define prom_debug(x...) prom_printf(x)
102 #define prom_debug(x...) do { } while (0)
106 typedef u32 prom_arg_t
;
124 struct mem_map_entry
{
129 typedef __be32 cell_t
;
131 extern void __start(unsigned long r3
, unsigned long r4
, unsigned long r5
,
132 unsigned long r6
, unsigned long r7
, unsigned long r8
,
136 extern int enter_prom(struct prom_args
*args
, unsigned long entry
);
138 static inline int enter_prom(struct prom_args
*args
, unsigned long entry
)
140 return ((int (*)(struct prom_args
*))entry
)(args
);
144 extern void copy_and_flush(unsigned long dest
, unsigned long src
,
145 unsigned long size
, unsigned long offset
);
148 static struct prom_t __prombss prom
;
150 static unsigned long __prombss prom_entry
;
152 static char __prombss of_stdout_device
[256];
153 static char __prombss prom_scratch
[256];
155 static unsigned long __prombss dt_header_start
;
156 static unsigned long __prombss dt_struct_start
, dt_struct_end
;
157 static unsigned long __prombss dt_string_start
, dt_string_end
;
159 static unsigned long __prombss prom_initrd_start
, prom_initrd_end
;
162 static int __prombss prom_iommu_force_on
;
163 static int __prombss prom_iommu_off
;
164 static unsigned long __prombss prom_tce_alloc_start
;
165 static unsigned long __prombss prom_tce_alloc_end
;
168 #ifdef CONFIG_PPC_PSERIES
169 static bool __prombss prom_radix_disable
;
170 static bool __prombss prom_radix_gtse_disable
;
171 static bool __prombss prom_xive_disable
;
174 #ifdef CONFIG_PPC_SVM
175 static bool __prombss prom_svm_enable
;
178 struct platform_support
{
185 /* Platforms codes are now obsolete in the kernel. Now only used within this
186 * file and ultimately gone too. Feel free to change them if you need, they
187 * are not shared with anything outside of this file anymore
189 #define PLATFORM_PSERIES 0x0100
190 #define PLATFORM_PSERIES_LPAR 0x0101
191 #define PLATFORM_LPAR 0x0001
192 #define PLATFORM_POWERMAC 0x0400
193 #define PLATFORM_GENERIC 0x0500
195 static int __prombss of_platform
;
197 static char __prombss prom_cmd_line
[COMMAND_LINE_SIZE
];
199 static unsigned long __prombss prom_memory_limit
;
201 static unsigned long __prombss alloc_top
;
202 static unsigned long __prombss alloc_top_high
;
203 static unsigned long __prombss alloc_bottom
;
204 static unsigned long __prombss rmo_top
;
205 static unsigned long __prombss ram_top
;
207 static struct mem_map_entry __prombss mem_reserve_map
[MEM_RESERVE_MAP_SIZE
];
208 static int __prombss mem_reserve_cnt
;
210 static cell_t __prombss regbuf
[1024];
212 static bool __prombss rtas_has_query_cpu_stopped
;
216 * Error results ... some OF calls will return "-1" on error, some
217 * will return 0, some will return either. To simplify, here are
218 * macros to use with any ihandle or phandle return value to check if
222 #define PROM_ERROR (-1u)
223 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
224 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
226 /* Copied from lib/string.c and lib/kstrtox.c */
228 static int __init
prom_strcmp(const char *cs
, const char *ct
)
230 unsigned char c1
, c2
;
236 return c1
< c2
? -1 : 1;
243 static ssize_t __init
prom_strscpy_pad(char *dest
, const char *src
, size_t n
)
248 if (n
== 0 || n
> INT_MAX
)
251 // Copy up to n bytes
252 for (i
= 0; i
< n
&& src
[i
] != '\0'; i
++)
257 // If we copied all n then we have run out of space for the nul
259 // Rewind by one character to ensure nul termination
270 static int __init
prom_strncmp(const char *cs
, const char *ct
, size_t count
)
272 unsigned char c1
, c2
;
278 return c1
< c2
? -1 : 1;
286 static size_t __init
prom_strlen(const char *s
)
290 for (sc
= s
; *sc
!= '\0'; ++sc
)
295 static int __init
prom_memcmp(const void *cs
, const void *ct
, size_t count
)
297 const unsigned char *su1
, *su2
;
300 for (su1
= cs
, su2
= ct
; 0 < count
; ++su1
, ++su2
, count
--)
301 if ((res
= *su1
- *su2
) != 0)
306 static char __init
*prom_strstr(const char *s1
, const char *s2
)
310 l2
= prom_strlen(s2
);
313 l1
= prom_strlen(s1
);
316 if (!prom_memcmp(s1
, s2
, l2
))
323 static size_t __init
prom_strlcat(char *dest
, const char *src
, size_t count
)
325 size_t dsize
= prom_strlen(dest
);
326 size_t len
= prom_strlen(src
);
327 size_t res
= dsize
+ len
;
329 /* This would be a bug */
337 memcpy(dest
, src
, len
);
343 #ifdef CONFIG_PPC_PSERIES
344 static int __init
prom_strtobool(const char *s
, bool *res
)
383 /* This is the one and *ONLY* place where we actually call open
387 static int __init
call_prom(const char *service
, int nargs
, int nret
, ...)
390 struct prom_args args
;
393 args
.service
= cpu_to_be32(ADDR(service
));
394 args
.nargs
= cpu_to_be32(nargs
);
395 args
.nret
= cpu_to_be32(nret
);
397 va_start(list
, nret
);
398 for (i
= 0; i
< nargs
; i
++)
399 args
.args
[i
] = cpu_to_be32(va_arg(list
, prom_arg_t
));
402 for (i
= 0; i
< nret
; i
++)
403 args
.args
[nargs
+i
] = 0;
405 if (enter_prom(&args
, prom_entry
) < 0)
408 return (nret
> 0) ? be32_to_cpu(args
.args
[nargs
]) : 0;
411 static int __init
call_prom_ret(const char *service
, int nargs
, int nret
,
412 prom_arg_t
*rets
, ...)
415 struct prom_args args
;
418 args
.service
= cpu_to_be32(ADDR(service
));
419 args
.nargs
= cpu_to_be32(nargs
);
420 args
.nret
= cpu_to_be32(nret
);
422 va_start(list
, rets
);
423 for (i
= 0; i
< nargs
; i
++)
424 args
.args
[i
] = cpu_to_be32(va_arg(list
, prom_arg_t
));
427 for (i
= 0; i
< nret
; i
++)
428 args
.args
[nargs
+i
] = 0;
430 if (enter_prom(&args
, prom_entry
) < 0)
434 for (i
= 1; i
< nret
; ++i
)
435 rets
[i
-1] = be32_to_cpu(args
.args
[nargs
+i
]);
437 return (nret
> 0) ? be32_to_cpu(args
.args
[nargs
]) : 0;
441 static void __init
prom_print(const char *msg
)
445 if (prom
.stdout
== 0)
448 for (p
= msg
; *p
!= 0; p
= q
) {
449 for (q
= p
; *q
!= 0 && *q
!= '\n'; ++q
)
452 call_prom("write", 3, 1, prom
.stdout
, p
, q
- p
);
456 call_prom("write", 3, 1, prom
.stdout
, ADDR("\r\n"), 2);
462 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
463 * we do not need __udivdi3 or __umoddi3 on 32bits.
465 static void __init
prom_print_hex(unsigned long val
)
467 int i
, nibbles
= sizeof(val
)*2;
468 char buf
[sizeof(val
)*2+1];
470 for (i
= nibbles
-1; i
>= 0; i
--) {
471 buf
[i
] = (val
& 0xf) + '0';
473 buf
[i
] += ('a'-'0'-10);
477 call_prom("write", 3, 1, prom
.stdout
, buf
, nibbles
);
480 /* max number of decimal digits in an unsigned long */
482 static void __init
prom_print_dec(unsigned long val
)
485 char buf
[UL_DIGITS
+1];
487 for (i
= UL_DIGITS
-1; i
>= 0; i
--) {
488 buf
[i
] = (val
% 10) + '0';
493 /* shift stuff down */
494 size
= UL_DIGITS
- i
;
495 call_prom("write", 3, 1, prom
.stdout
, buf
+i
, size
);
499 static void __init
prom_printf(const char *format
, ...)
501 const char *p
, *q
, *s
;
507 va_start(args
, format
);
508 for (p
= format
; *p
!= 0; p
= q
) {
509 for (q
= p
; *q
!= 0 && *q
!= '\n' && *q
!= '%'; ++q
)
512 call_prom("write", 3, 1, prom
.stdout
, p
, q
- p
);
517 call_prom("write", 3, 1, prom
.stdout
,
531 s
= va_arg(args
, const char *);
538 v
= va_arg(args
, unsigned int);
541 v
= va_arg(args
, unsigned long);
545 v
= va_arg(args
, unsigned long long);
554 v
= va_arg(args
, unsigned int);
557 v
= va_arg(args
, unsigned long);
561 v
= va_arg(args
, unsigned long long);
570 vs
= va_arg(args
, int);
573 vs
= va_arg(args
, long);
577 vs
= va_arg(args
, long long);
592 static unsigned int __init
prom_claim(unsigned long virt
, unsigned long size
,
596 if (align
== 0 && (OF_WORKAROUNDS
& OF_WA_CLAIM
)) {
598 * Old OF requires we claim physical and virtual separately
599 * and then map explicitly (assuming virtual mode)
604 ret
= call_prom_ret("call-method", 5, 2, &result
,
605 ADDR("claim"), prom
.memory
,
607 if (ret
!= 0 || result
== -1)
609 ret
= call_prom_ret("call-method", 5, 2, &result
,
610 ADDR("claim"), prom
.mmumap
,
613 call_prom("call-method", 4, 1, ADDR("release"),
614 prom
.memory
, size
, virt
);
617 /* the 0x12 is M (coherence) + PP == read/write */
618 call_prom("call-method", 6, 1,
619 ADDR("map"), prom
.mmumap
, 0x12, size
, virt
, virt
);
622 return call_prom("claim", 3, 1, (prom_arg_t
)virt
, (prom_arg_t
)size
,
626 static void __init
__attribute__((noreturn
)) prom_panic(const char *reason
)
629 /* Do not call exit because it clears the screen on pmac
630 * it also causes some sort of double-fault on early pmacs */
631 if (of_platform
== PLATFORM_POWERMAC
)
634 /* ToDo: should put up an SRC here on pSeries */
635 call_prom("exit", 0, 0);
637 for (;;) /* should never get here */
642 static int __init
prom_next_node(phandle
*nodep
)
646 if ((node
= *nodep
) != 0
647 && (*nodep
= call_prom("child", 1, 1, node
)) != 0)
649 if ((*nodep
= call_prom("peer", 1, 1, node
)) != 0)
652 if ((node
= call_prom("parent", 1, 1, node
)) == 0)
654 if ((*nodep
= call_prom("peer", 1, 1, node
)) != 0)
659 static inline int __init
prom_getprop(phandle node
, const char *pname
,
660 void *value
, size_t valuelen
)
662 return call_prom("getprop", 4, 1, node
, ADDR(pname
),
663 (u32
)(unsigned long) value
, (u32
) valuelen
);
666 static inline int __init
prom_getproplen(phandle node
, const char *pname
)
668 return call_prom("getproplen", 2, 1, node
, ADDR(pname
));
671 static void __init
add_string(char **str
, const char *q
)
681 static char *__init
tohex(unsigned int x
)
683 static const char digits
[] __initconst
= "0123456789abcdef";
684 static char result
[9] __prombss
;
691 result
[i
] = digits
[x
& 0xf];
693 } while (x
!= 0 && i
> 0);
697 static int __init
prom_setprop(phandle node
, const char *nodename
,
698 const char *pname
, void *value
, size_t valuelen
)
702 if (!(OF_WORKAROUNDS
& OF_WA_LONGTRAIL
))
703 return call_prom("setprop", 4, 1, node
, ADDR(pname
),
704 (u32
)(unsigned long) value
, (u32
) valuelen
);
706 /* gah... setprop doesn't work on longtrail, have to use interpret */
708 add_string(&p
, "dev");
709 add_string(&p
, nodename
);
710 add_string(&p
, tohex((u32
)(unsigned long) value
));
711 add_string(&p
, tohex(valuelen
));
712 add_string(&p
, tohex(ADDR(pname
)));
713 add_string(&p
, tohex(prom_strlen(pname
)));
714 add_string(&p
, "property");
716 return call_prom("interpret", 1, 1, (u32
)(unsigned long) cmd
);
719 /* We can't use the standard versions because of relocation headaches. */
720 #define prom_isxdigit(c) \
721 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
723 #define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
724 #define prom_islower(c) ('a' <= (c) && (c) <= 'z')
725 #define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
727 static unsigned long __init
prom_strtoul(const char *cp
, const char **endp
)
729 unsigned long result
= 0, base
= 10, value
;
734 if (prom_toupper(*cp
) == 'X') {
740 while (prom_isxdigit(*cp
) &&
741 (value
= prom_isdigit(*cp
) ? *cp
- '0' : prom_toupper(*cp
) - 'A' + 10) < base
) {
742 result
= result
* base
+ value
;
752 static unsigned long __init
prom_memparse(const char *ptr
, const char **retptr
)
754 unsigned long ret
= prom_strtoul(ptr
, retptr
);
758 * We can't use a switch here because GCC *may* generate a
759 * jump table which won't work, because we're not running at
760 * the address we're linked at.
762 if ('G' == **retptr
|| 'g' == **retptr
)
765 if ('M' == **retptr
|| 'm' == **retptr
)
768 if ('K' == **retptr
|| 'k' == **retptr
)
780 * Early parsing of the command line passed to the kernel, used for
781 * "mem=x" and the options that affect the iommu
783 static void __init
early_cmdline_parse(void)
790 prom_cmd_line
[0] = 0;
793 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE
) && (long)prom
.chosen
> 0)
794 l
= prom_getprop(prom
.chosen
, "bootargs", p
, COMMAND_LINE_SIZE
-1);
796 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND
) || l
<= 0 || p
[0] == '\0')
797 prom_strlcat(prom_cmd_line
, " " CONFIG_CMDLINE
,
798 sizeof(prom_cmd_line
));
800 prom_printf("command line: %s\n", prom_cmd_line
);
803 opt
= prom_strstr(prom_cmd_line
, "iommu=");
805 prom_printf("iommu opt is: %s\n", opt
);
807 while (*opt
&& *opt
== ' ')
809 if (!prom_strncmp(opt
, "off", 3))
811 else if (!prom_strncmp(opt
, "force", 5))
812 prom_iommu_force_on
= 1;
815 opt
= prom_strstr(prom_cmd_line
, "mem=");
818 prom_memory_limit
= prom_memparse(opt
, (const char **)&opt
);
820 /* Align down to 16 MB which is large page size with hash page translation */
821 prom_memory_limit
= ALIGN_DOWN(prom_memory_limit
, SZ_16M
);
825 #ifdef CONFIG_PPC_PSERIES
826 prom_radix_disable
= !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT
);
827 opt
= prom_strstr(prom_cmd_line
, "disable_radix");
830 if (*opt
&& *opt
== '=') {
833 if (prom_strtobool(++opt
, &val
))
834 prom_radix_disable
= false;
836 prom_radix_disable
= val
;
838 prom_radix_disable
= true;
840 if (prom_radix_disable
)
841 prom_debug("Radix disabled from cmdline\n");
843 opt
= prom_strstr(prom_cmd_line
, "radix_hcall_invalidate=on");
845 prom_radix_gtse_disable
= true;
846 prom_debug("Radix GTSE disabled from cmdline\n");
849 opt
= prom_strstr(prom_cmd_line
, "xive=off");
851 prom_xive_disable
= true;
852 prom_debug("XIVE disabled from cmdline\n");
854 #endif /* CONFIG_PPC_PSERIES */
856 #ifdef CONFIG_PPC_SVM
857 opt
= prom_strstr(prom_cmd_line
, "svm=");
861 opt
+= sizeof("svm=") - 1;
862 if (!prom_strtobool(opt
, &val
))
863 prom_svm_enable
= val
;
865 #endif /* CONFIG_PPC_SVM */
868 #ifdef CONFIG_PPC_PSERIES
870 * The architecture vector has an array of PVR mask/value pairs,
871 * followed by # option vectors - 1, followed by the option vectors.
873 * See prom.h for the definition of the bits specified in the
874 * architecture vector.
877 /* Firmware expects the value to be n - 1, where n is the # of vectors */
878 #define NUM_VECTORS(n) ((n) - 1)
881 * Firmware expects 1 + n - 2, where n is the length of the option vector in
882 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
884 #define VECTOR_LENGTH(n) (1 + (n) - 2)
886 struct option_vector1
{
892 struct option_vector2
{
906 struct option_vector3
{
911 struct option_vector4
{
916 struct option_vector5
{
928 u8 platform_facilities
;
939 struct option_vector6
{
945 struct option_vector7
{
949 struct ibm_arch_vec
{
950 struct { __be32 mask
, val
; } pvrs
[16];
955 struct option_vector1 vec1
;
958 struct option_vector2 vec2
;
961 struct option_vector3 vec3
;
964 struct option_vector4 vec4
;
967 struct option_vector5 vec5
;
970 struct option_vector6 vec6
;
973 struct option_vector7 vec7
;
976 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst
= {
979 .mask
= cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
980 .val
= cpu_to_be32(0x003a0000),
983 .mask
= cpu_to_be32(0xffff0000), /* POWER6 */
984 .val
= cpu_to_be32(0x003e0000),
987 .mask
= cpu_to_be32(0xffff0000), /* POWER7 */
988 .val
= cpu_to_be32(0x003f0000),
991 .mask
= cpu_to_be32(0xffff0000), /* POWER8E */
992 .val
= cpu_to_be32(0x004b0000),
995 .mask
= cpu_to_be32(0xffff0000), /* POWER8NVL */
996 .val
= cpu_to_be32(0x004c0000),
999 .mask
= cpu_to_be32(0xffff0000), /* POWER8 */
1000 .val
= cpu_to_be32(0x004d0000),
1003 .mask
= cpu_to_be32(0xffff0000), /* POWER9 */
1004 .val
= cpu_to_be32(0x004e0000),
1007 .mask
= cpu_to_be32(0xffff0000), /* POWER10 */
1008 .val
= cpu_to_be32(0x00800000),
1011 .mask
= cpu_to_be32(0xffff0000), /* POWER11 */
1012 .val
= cpu_to_be32(0x00820000),
1015 .mask
= cpu_to_be32(0xffffffff), /* P11 compliant */
1016 .val
= cpu_to_be32(0x0f000007),
1019 .mask
= cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1020 .val
= cpu_to_be32(0x0f000006),
1023 .mask
= cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1024 .val
= cpu_to_be32(0x0f000005),
1027 .mask
= cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1028 .val
= cpu_to_be32(0x0f000004),
1031 .mask
= cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1032 .val
= cpu_to_be32(0x0f000003),
1035 .mask
= cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1036 .val
= cpu_to_be32(0x0f000002),
1039 .mask
= cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1040 .val
= cpu_to_be32(0x0f000001),
1044 .num_vectors
= NUM_VECTORS(6),
1046 .vec1_len
= VECTOR_LENGTH(sizeof(struct option_vector1
)),
1049 .arch_versions
= OV1_PPC_2_00
| OV1_PPC_2_01
| OV1_PPC_2_02
| OV1_PPC_2_03
|
1050 OV1_PPC_2_04
| OV1_PPC_2_05
| OV1_PPC_2_06
| OV1_PPC_2_07
,
1051 .arch_versions3
= OV1_PPC_3_00
| OV1_PPC_3_1
,
1054 .vec2_len
= VECTOR_LENGTH(sizeof(struct option_vector2
)),
1055 /* option vector 2: Open Firmware options supported */
1057 .byte1
= OV2_REAL_MODE
,
1059 .real_base
= cpu_to_be32(0xffffffff),
1060 .real_size
= cpu_to_be32(0xffffffff),
1061 .virt_base
= cpu_to_be32(0xffffffff),
1062 .virt_size
= cpu_to_be32(0xffffffff),
1063 .load_base
= cpu_to_be32(0xffffffff),
1064 .min_rma
= cpu_to_be32(512), /* 512MB min RMA */
1065 .min_load
= cpu_to_be32(0xffffffff), /* full client load */
1066 .min_rma_percent
= 0, /* min RMA percentage of total RAM */
1067 .max_pft_size
= 48, /* max log_2(hash table size) */
1070 .vec3_len
= VECTOR_LENGTH(sizeof(struct option_vector3
)),
1071 /* option vector 3: processor options supported */
1073 .byte1
= 0, /* don't ignore, don't halt */
1074 .byte2
= OV3_FP
| OV3_VMX
| OV3_DFP
,
1077 .vec4_len
= VECTOR_LENGTH(sizeof(struct option_vector4
)),
1078 /* option vector 4: IBM PAPR implementation */
1080 .byte1
= 0, /* don't halt */
1081 .min_vp_cap
= OV4_MIN_ENT_CAP
, /* minimum VP entitled capacity */
1084 .vec5_len
= VECTOR_LENGTH(sizeof(struct option_vector5
)),
1085 /* option vector 5: PAPR/OF options */
1087 .byte1
= 0, /* don't ignore, don't halt */
1088 .byte2
= OV5_FEAT(OV5_LPAR
) | OV5_FEAT(OV5_SPLPAR
) | OV5_FEAT(OV5_LARGE_PAGES
) |
1089 OV5_FEAT(OV5_DRCONF_MEMORY
) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU
) |
1090 #ifdef CONFIG_PCI_MSI
1091 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1098 #ifdef CONFIG_PPC_SMLPAR
1099 OV5_FEAT(OV5_CMO
) | OV5_FEAT(OV5_XCMO
),
1103 .associativity
= OV5_FEAT(OV5_FORM1_AFFINITY
) | OV5_FEAT(OV5_PRRN
) |
1104 OV5_FEAT(OV5_FORM2_AFFINITY
),
1105 .bin_opts
= OV5_FEAT(OV5_RESIZE_HPT
) | OV5_FEAT(OV5_HP_EVT
),
1106 .micro_checkpoint
= 0,
1108 .max_cpus
= cpu_to_be32(NR_CPUS
), /* number of cores supported */
1111 .platform_facilities
= OV5_FEAT(OV5_PFO_HW_RNG
) | OV5_FEAT(OV5_PFO_HW_ENCR
) | OV5_FEAT(OV5_PFO_HW_842
),
1115 .byte22
= OV5_FEAT(OV5_DRMEM_V2
) | OV5_FEAT(OV5_DRC_INFO
),
1122 /* option vector 6: IBM PAPR hints */
1123 .vec6_len
= VECTOR_LENGTH(sizeof(struct option_vector6
)),
1126 .secondary_pteg
= 0,
1127 .os_name
= OV6_LINUX
,
1130 /* option vector 7: OS Identification */
1131 .vec7_len
= VECTOR_LENGTH(sizeof(struct option_vector7
)),
1134 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned
;
1136 /* Old method - ELF header with PT_NOTE sections only works on BE */
1137 #ifdef __BIG_ENDIAN__
1138 static const struct fake_elf
{
1145 char name
[8]; /* "PowerPC" */
1159 char name
[24]; /* "IBM,RPA-Client-Config" */
1163 u32 min_rmo_percent
;
1171 } fake_elf __initconst
= {
1173 .e_ident
= { 0x7f, 'E', 'L', 'F',
1174 ELFCLASS32
, ELFDATA2MSB
, EV_CURRENT
},
1175 .e_type
= ET_EXEC
, /* yeah right */
1176 .e_machine
= EM_PPC
,
1177 .e_version
= EV_CURRENT
,
1178 .e_phoff
= offsetof(struct fake_elf
, phdr
),
1179 .e_phentsize
= sizeof(Elf32_Phdr
),
1185 .p_offset
= offsetof(struct fake_elf
, chrpnote
),
1186 .p_filesz
= sizeof(struct chrpnote
)
1189 .p_offset
= offsetof(struct fake_elf
, rpanote
),
1190 .p_filesz
= sizeof(struct rpanote
)
1194 .namesz
= sizeof("PowerPC"),
1195 .descsz
= sizeof(struct chrpdesc
),
1199 .real_mode
= ~0U, /* ~0 means "don't care" */
1208 .namesz
= sizeof("IBM,RPA-Client-Config"),
1209 .descsz
= sizeof(struct rpadesc
),
1211 .name
= "IBM,RPA-Client-Config",
1214 .min_rmo_size
= 64, /* in megabytes */
1215 .min_rmo_percent
= 0,
1216 .max_pft_size
= 48, /* 2^48 bytes max PFT size */
1223 #endif /* __BIG_ENDIAN__ */
1225 static int __init
prom_count_smt_threads(void)
1231 /* Pick up th first CPU node we can find */
1232 for (node
= 0; prom_next_node(&node
); ) {
1234 prom_getprop(node
, "device_type", type
, sizeof(type
));
1236 if (prom_strcmp(type
, "cpu"))
1239 * There is an entry for each smt thread, each entry being
1240 * 4 bytes long. All cpus should have the same number of
1241 * smt threads, so return after finding the first.
1243 plen
= prom_getproplen(node
, "ibm,ppc-interrupt-server#s");
1244 if (plen
== PROM_ERROR
)
1247 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen
);
1250 if (plen
< 1 || plen
> 64) {
1251 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1252 (unsigned long)plen
);
1257 prom_debug("No threads found, assuming 1 per core\n");
1263 static void __init
prom_parse_mmu_model(u8 val
,
1264 struct platform_support
*support
)
1267 case OV5_FEAT(OV5_MMU_DYNAMIC
):
1268 case OV5_FEAT(OV5_MMU_EITHER
): /* Either Available */
1269 prom_debug("MMU - either supported\n");
1270 support
->radix_mmu
= !prom_radix_disable
;
1271 support
->hash_mmu
= true;
1273 case OV5_FEAT(OV5_MMU_RADIX
): /* Only Radix */
1274 prom_debug("MMU - radix only\n");
1275 if (prom_radix_disable
) {
1277 * If we __have__ to do radix, we're better off ignoring
1278 * the command line rather than not booting.
1280 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1282 support
->radix_mmu
= true;
1284 case OV5_FEAT(OV5_MMU_HASH
):
1285 prom_debug("MMU - hash only\n");
1286 support
->hash_mmu
= true;
1289 prom_debug("Unknown mmu support option: 0x%x\n", val
);
1294 static void __init
prom_parse_xive_model(u8 val
,
1295 struct platform_support
*support
)
1298 case OV5_FEAT(OV5_XIVE_EITHER
): /* Either Available */
1299 prom_debug("XIVE - either mode supported\n");
1300 support
->xive
= !prom_xive_disable
;
1302 case OV5_FEAT(OV5_XIVE_EXPLOIT
): /* Only Exploitation mode */
1303 prom_debug("XIVE - exploitation mode supported\n");
1304 if (prom_xive_disable
) {
1306 * If we __have__ to do XIVE, we're better off ignoring
1307 * the command line rather than not booting.
1309 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1311 support
->xive
= true;
1313 case OV5_FEAT(OV5_XIVE_LEGACY
): /* Only Legacy mode */
1314 prom_debug("XIVE - legacy mode supported\n");
1317 prom_debug("Unknown xive support option: 0x%x\n", val
);
1322 static void __init
prom_parse_platform_support(u8 index
, u8 val
,
1323 struct platform_support
*support
)
1326 case OV5_INDX(OV5_MMU_SUPPORT
): /* MMU Model */
1327 prom_parse_mmu_model(val
& OV5_FEAT(OV5_MMU_SUPPORT
), support
);
1329 case OV5_INDX(OV5_RADIX_GTSE
): /* Radix Extensions */
1330 if (val
& OV5_FEAT(OV5_RADIX_GTSE
))
1331 support
->radix_gtse
= !prom_radix_gtse_disable
;
1333 case OV5_INDX(OV5_XIVE_SUPPORT
): /* Interrupt mode */
1334 prom_parse_xive_model(val
& OV5_FEAT(OV5_XIVE_SUPPORT
),
1340 static void __init
prom_check_platform_support(void)
1342 struct platform_support supported
= {
1345 .radix_gtse
= false,
1348 int prop_len
= prom_getproplen(prom
.chosen
,
1349 "ibm,arch-vec-5-platform-support");
1352 * First copy the architecture vec template
1354 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1355 * by __memcpy() when KASAN is active
1357 memcpy(&ibm_architecture_vec
, &ibm_architecture_vec_template
,
1358 sizeof(ibm_architecture_vec
));
1360 prom_strscpy_pad(ibm_architecture_vec
.vec7
.os_id
, linux_banner
, 256);
1365 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1367 if (prop_len
> sizeof(vec
))
1368 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1370 prom_getprop(prom
.chosen
, "ibm,arch-vec-5-platform-support", &vec
, sizeof(vec
));
1371 for (i
= 0; i
< prop_len
; i
+= 2) {
1372 prom_debug("%d: index = 0x%x val = 0x%x\n", i
/ 2, vec
[i
], vec
[i
+ 1]);
1373 prom_parse_platform_support(vec
[i
], vec
[i
+ 1], &supported
);
1377 if (supported
.radix_mmu
&& IS_ENABLED(CONFIG_PPC_RADIX_MMU
)) {
1378 /* Radix preferred - Check if GTSE is also supported */
1379 prom_debug("Asking for radix\n");
1380 ibm_architecture_vec
.vec5
.mmu
= OV5_FEAT(OV5_MMU_RADIX
);
1381 if (supported
.radix_gtse
)
1382 ibm_architecture_vec
.vec5
.radix_ext
=
1383 OV5_FEAT(OV5_RADIX_GTSE
);
1385 prom_debug("Radix GTSE isn't supported\n");
1386 } else if (supported
.hash_mmu
) {
1387 /* Default to hash mmu (if we can) */
1388 prom_debug("Asking for hash\n");
1389 ibm_architecture_vec
.vec5
.mmu
= OV5_FEAT(OV5_MMU_HASH
);
1391 /* We're probably on a legacy hypervisor */
1392 prom_debug("Assuming legacy hash support\n");
1395 if (supported
.xive
) {
1396 prom_debug("Asking for XIVE\n");
1397 ibm_architecture_vec
.vec5
.intarch
= OV5_FEAT(OV5_XIVE_EXPLOIT
);
1401 static void __init
prom_send_capabilities(void)
1407 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1408 prom_check_platform_support();
1410 root
= call_prom("open", 1, 1, ADDR("/"));
1412 /* We need to tell the FW about the number of cores we support.
1414 * To do that, we count the number of threads on the first core
1415 * (we assume this is the same for all cores) and use it to
1419 cores
= DIV_ROUND_UP(NR_CPUS
, prom_count_smt_threads());
1420 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1423 ibm_architecture_vec
.vec5
.max_cpus
= cpu_to_be32(cores
);
1425 /* try calling the ibm,client-architecture-support method */
1426 prom_printf("Calling ibm,client-architecture-support...");
1427 if (call_prom_ret("call-method", 3, 2, &ret
,
1428 ADDR("ibm,client-architecture-support"),
1430 ADDR(&ibm_architecture_vec
)) == 0) {
1431 /* the call exists... */
1433 prom_printf("\nWARNING: ibm,client-architecture"
1434 "-support call FAILED!\n");
1435 call_prom("close", 1, 0, root
);
1436 prom_printf(" done\n");
1439 call_prom("close", 1, 0, root
);
1440 prom_printf(" not implemented\n");
1443 #ifdef __BIG_ENDIAN__
1447 /* no ibm,client-architecture-support call, try the old way */
1448 elfloader
= call_prom("open", 1, 1,
1449 ADDR("/packages/elf-loader"));
1450 if (elfloader
== 0) {
1451 prom_printf("couldn't open /packages/elf-loader\n");
1454 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1455 elfloader
, ADDR(&fake_elf
));
1456 call_prom("close", 1, 0, elfloader
);
1458 #endif /* __BIG_ENDIAN__ */
1460 #endif /* CONFIG_PPC_PSERIES */
1463 * Memory allocation strategy... our layout is normally:
1465 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1466 * rare cases, initrd might end up being before the kernel though.
1467 * We assume this won't override the final kernel at 0, we have no
1468 * provision to handle that in this version, but it should hopefully
1471 * alloc_top is set to the top of RMO, eventually shrink down if the
1474 * alloc_bottom is set to the top of kernel/initrd
1476 * from there, allocations are done this way : rtas is allocated
1477 * topmost, and the device-tree is allocated from the bottom. We try
1478 * to grow the device-tree allocation as we progress. If we can't,
1479 * then we fail, we don't currently have a facility to restart
1480 * elsewhere, but that shouldn't be necessary.
1482 * Note that calls to reserve_mem have to be done explicitly, memory
1483 * allocated with either alloc_up or alloc_down isn't automatically
1489 * Allocates memory in the RMO upward from the kernel/initrd
1491 * When align is 0, this is a special case, it means to allocate in place
1492 * at the current location of alloc_bottom or fail (that is basically
1493 * extending the previous allocation). Used for the device-tree flattening
1495 static unsigned long __init
alloc_up(unsigned long size
, unsigned long align
)
1497 unsigned long base
= alloc_bottom
;
1498 unsigned long addr
= 0;
1501 base
= ALIGN(base
, align
);
1502 prom_debug("%s(%lx, %lx)\n", __func__
, size
, align
);
1504 prom_panic("alloc_up() called with mem not initialized\n");
1507 base
= ALIGN(alloc_bottom
, align
);
1509 base
= alloc_bottom
;
1511 for(; (base
+ size
) <= alloc_top
;
1512 base
= ALIGN(base
+ 0x100000, align
)) {
1513 prom_debug(" trying: 0x%lx\n\r", base
);
1514 addr
= (unsigned long)prom_claim(base
, size
, 0);
1515 if (addr
!= PROM_ERROR
&& addr
!= 0)
1523 alloc_bottom
= addr
+ size
;
1525 prom_debug(" -> %lx\n", addr
);
1526 prom_debug(" alloc_bottom : %lx\n", alloc_bottom
);
1527 prom_debug(" alloc_top : %lx\n", alloc_top
);
1528 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high
);
1529 prom_debug(" rmo_top : %lx\n", rmo_top
);
1530 prom_debug(" ram_top : %lx\n", ram_top
);
1536 * Allocates memory downward, either from top of RMO, or if highmem
1537 * is set, from the top of RAM. Note that this one doesn't handle
1538 * failures. It does claim memory if highmem is not set.
1540 static unsigned long __init
alloc_down(unsigned long size
, unsigned long align
,
1543 unsigned long base
, addr
= 0;
1545 prom_debug("%s(%lx, %lx, %s)\n", __func__
, size
, align
,
1546 highmem
? "(high)" : "(low)");
1548 prom_panic("alloc_down() called with mem not initialized\n");
1551 /* Carve out storage for the TCE table. */
1552 addr
= ALIGN_DOWN(alloc_top_high
- size
, align
);
1553 if (addr
<= alloc_bottom
)
1555 /* Will we bump into the RMO ? If yes, check out that we
1556 * didn't overlap existing allocations there, if we did,
1557 * we are dead, we must be the first in town !
1559 if (addr
< rmo_top
) {
1560 /* Good, we are first */
1561 if (alloc_top
== rmo_top
)
1562 alloc_top
= rmo_top
= addr
;
1566 alloc_top_high
= addr
;
1570 base
= ALIGN_DOWN(alloc_top
- size
, align
);
1571 for (; base
> alloc_bottom
;
1572 base
= ALIGN_DOWN(base
- 0x100000, align
)) {
1573 prom_debug(" trying: 0x%lx\n\r", base
);
1574 addr
= (unsigned long)prom_claim(base
, size
, 0);
1575 if (addr
!= PROM_ERROR
&& addr
!= 0)
1584 prom_debug(" -> %lx\n", addr
);
1585 prom_debug(" alloc_bottom : %lx\n", alloc_bottom
);
1586 prom_debug(" alloc_top : %lx\n", alloc_top
);
1587 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high
);
1588 prom_debug(" rmo_top : %lx\n", rmo_top
);
1589 prom_debug(" ram_top : %lx\n", ram_top
);
1595 * Parse a "reg" cell
1597 static unsigned long __init
prom_next_cell(int s
, cell_t
**cellp
)
1600 unsigned long r
= 0;
1602 /* Ignore more than 2 cells */
1603 while (s
> sizeof(unsigned long) / 4) {
1607 r
= be32_to_cpu(*p
++);
1611 r
|= be32_to_cpu(*(p
++));
1619 * Very dumb function for adding to the memory reserve list, but
1620 * we don't need anything smarter at this point
1622 * XXX Eventually check for collisions. They should NEVER happen.
1623 * If problems seem to show up, it would be a good start to track
1626 static void __init
reserve_mem(u64 base
, u64 size
)
1628 u64 top
= base
+ size
;
1629 unsigned long cnt
= mem_reserve_cnt
;
1634 /* We need to always keep one empty entry so that we
1635 * have our terminator with "size" set to 0 since we are
1636 * dumb and just copy this entire array to the boot params
1638 base
= ALIGN_DOWN(base
, PAGE_SIZE
);
1639 top
= ALIGN(top
, PAGE_SIZE
);
1642 if (cnt
>= (MEM_RESERVE_MAP_SIZE
- 1))
1643 prom_panic("Memory reserve map exhausted !\n");
1644 mem_reserve_map
[cnt
].base
= cpu_to_be64(base
);
1645 mem_reserve_map
[cnt
].size
= cpu_to_be64(size
);
1646 mem_reserve_cnt
= cnt
+ 1;
1650 * Initialize memory allocation mechanism, parse "memory" nodes and
1651 * obtain that way the top of memory and RMO to setup out local allocator
1653 static void __init
prom_init_mem(void)
1663 * We iterate the memory nodes to find
1664 * 1) top of RMO (first node)
1667 val
= cpu_to_be32(2);
1668 prom_getprop(prom
.root
, "#address-cells", &val
, sizeof(val
));
1669 rac
= be32_to_cpu(val
);
1670 val
= cpu_to_be32(1);
1671 prom_getprop(prom
.root
, "#size-cells", &val
, sizeof(rsc
));
1672 rsc
= be32_to_cpu(val
);
1673 prom_debug("root_addr_cells: %x\n", rac
);
1674 prom_debug("root_size_cells: %x\n", rsc
);
1676 prom_debug("scanning memory:\n");
1678 for (node
= 0; prom_next_node(&node
); ) {
1680 prom_getprop(node
, "device_type", type
, sizeof(type
));
1684 * CHRP Longtrail machines have no device_type
1685 * on the memory node, so check the name instead...
1687 prom_getprop(node
, "name", type
, sizeof(type
));
1689 if (prom_strcmp(type
, "memory"))
1692 plen
= prom_getprop(node
, "reg", regbuf
, sizeof(regbuf
));
1693 if (plen
> sizeof(regbuf
)) {
1694 prom_printf("memory node too large for buffer !\n");
1695 plen
= sizeof(regbuf
);
1698 endp
= p
+ (plen
/ sizeof(cell_t
));
1701 memset(prom_scratch
, 0, sizeof(prom_scratch
));
1702 call_prom("package-to-path", 3, 1, node
, prom_scratch
,
1703 sizeof(prom_scratch
) - 1);
1704 prom_debug(" node %s :\n", prom_scratch
);
1705 #endif /* DEBUG_PROM */
1707 while ((endp
- p
) >= (rac
+ rsc
)) {
1708 unsigned long base
, size
;
1710 base
= prom_next_cell(rac
, &p
);
1711 size
= prom_next_cell(rsc
, &p
);
1715 prom_debug(" %lx %lx\n", base
, size
);
1716 if (base
== 0 && (of_platform
& PLATFORM_LPAR
))
1718 if ((base
+ size
) > ram_top
)
1719 ram_top
= base
+ size
;
1723 alloc_bottom
= PAGE_ALIGN((unsigned long)&_end
+ 0x4000);
1726 * If prom_memory_limit is set we reduce the upper limits *except* for
1727 * alloc_top_high. This must be the real top of RAM so we can put
1731 alloc_top_high
= ram_top
;
1733 if (prom_memory_limit
) {
1734 if (prom_memory_limit
<= alloc_bottom
) {
1735 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1737 prom_memory_limit
= 0;
1738 } else if (prom_memory_limit
>= ram_top
) {
1739 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1741 prom_memory_limit
= 0;
1743 ram_top
= prom_memory_limit
;
1744 rmo_top
= min(rmo_top
, prom_memory_limit
);
1749 * Setup our top alloc point, that is top of RMO or top of
1750 * segment 0 when running non-LPAR.
1751 * Some RS64 machines have buggy firmware where claims up at
1752 * 1GB fail. Cap at 768MB as a workaround.
1753 * Since 768MB is plenty of room, and we need to cap to something
1754 * reasonable on 32-bit, cap at 768MB on all machines.
1758 rmo_top
= min(0x30000000ul
, rmo_top
);
1759 alloc_top
= rmo_top
;
1760 alloc_top_high
= ram_top
;
1763 * Check if we have an initrd after the kernel but still inside
1764 * the RMO. If we do move our bottom point to after it.
1766 if (prom_initrd_start
&&
1767 prom_initrd_start
< rmo_top
&&
1768 prom_initrd_end
> alloc_bottom
)
1769 alloc_bottom
= PAGE_ALIGN(prom_initrd_end
);
1771 prom_printf("memory layout at init:\n");
1772 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1774 prom_printf(" alloc_bottom : %lx\n", alloc_bottom
);
1775 prom_printf(" alloc_top : %lx\n", alloc_top
);
1776 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high
);
1777 prom_printf(" rmo_top : %lx\n", rmo_top
);
1778 prom_printf(" ram_top : %lx\n", ram_top
);
1781 static void __init
prom_close_stdin(void)
1786 if (prom_getprop(prom
.chosen
, "stdin", &val
, sizeof(val
)) > 0) {
1787 stdin
= be32_to_cpu(val
);
1788 call_prom("close", 1, 0, stdin
);
1792 #ifdef CONFIG_PPC_SVM
1793 static int __init
prom_rtas_hcall(uint64_t args
)
1795 register uint64_t arg1
asm("r3") = H_RTAS
;
1796 register uint64_t arg2
asm("r4") = args
;
1798 asm volatile("sc 1\n" : "=r" (arg1
) :
1801 srr_regs_clobbered();
1806 static struct rtas_args __prombss os_term_args
;
1808 static void __init
prom_rtas_os_term(char *str
)
1814 prom_debug("%s: start...\n", __func__
);
1815 rtas_node
= call_prom("finddevice", 1, 1, ADDR("/rtas"));
1816 prom_debug("rtas_node: %x\n", rtas_node
);
1817 if (!PHANDLE_VALID(rtas_node
))
1821 prom_getprop(rtas_node
, "ibm,os-term", &val
, sizeof(val
));
1822 token
= be32_to_cpu(val
);
1823 prom_debug("ibm,os-term: %x\n", token
);
1825 prom_panic("Could not get token for ibm,os-term\n");
1826 os_term_args
.token
= cpu_to_be32(token
);
1827 os_term_args
.nargs
= cpu_to_be32(1);
1828 os_term_args
.nret
= cpu_to_be32(1);
1829 os_term_args
.args
[0] = cpu_to_be32(__pa(str
));
1830 prom_rtas_hcall((uint64_t)&os_term_args
);
1832 #endif /* CONFIG_PPC_SVM */
1835 * Allocate room for and instantiate RTAS
1837 static void __init
prom_instantiate_rtas(void)
1841 u32 base
, entry
= 0;
1845 prom_debug("prom_instantiate_rtas: start...\n");
1847 rtas_node
= call_prom("finddevice", 1, 1, ADDR("/rtas"));
1848 prom_debug("rtas_node: %x\n", rtas_node
);
1849 if (!PHANDLE_VALID(rtas_node
))
1853 prom_getprop(rtas_node
, "rtas-size", &val
, sizeof(size
));
1854 size
= be32_to_cpu(val
);
1858 base
= alloc_down(size
, PAGE_SIZE
, 0);
1860 prom_panic("Could not allocate memory for RTAS\n");
1862 rtas_inst
= call_prom("open", 1, 1, ADDR("/rtas"));
1863 if (!IHANDLE_VALID(rtas_inst
)) {
1864 prom_printf("opening rtas package failed (%x)\n", rtas_inst
);
1868 prom_printf("instantiating rtas at 0x%x...", base
);
1870 if (call_prom_ret("call-method", 3, 2, &entry
,
1871 ADDR("instantiate-rtas"),
1872 rtas_inst
, base
) != 0
1874 prom_printf(" failed\n");
1877 prom_printf(" done\n");
1879 reserve_mem(base
, size
);
1881 val
= cpu_to_be32(base
);
1882 prom_setprop(rtas_node
, "/rtas", "linux,rtas-base",
1884 val
= cpu_to_be32(entry
);
1885 prom_setprop(rtas_node
, "/rtas", "linux,rtas-entry",
1888 /* Check if it supports "query-cpu-stopped-state" */
1889 if (prom_getprop(rtas_node
, "query-cpu-stopped-state",
1890 &val
, sizeof(val
)) != PROM_ERROR
)
1891 rtas_has_query_cpu_stopped
= true;
1893 prom_debug("rtas base = 0x%x\n", base
);
1894 prom_debug("rtas entry = 0x%x\n", entry
);
1895 prom_debug("rtas size = 0x%x\n", size
);
1897 prom_debug("prom_instantiate_rtas: end...\n");
1902 * Allocate room for and instantiate Stored Measurement Log (SML)
1904 static void __init
prom_instantiate_sml(void)
1906 phandle ibmvtpm_node
;
1907 ihandle ibmvtpm_inst
;
1908 u32 entry
= 0, size
= 0, succ
= 0;
1912 prom_debug("prom_instantiate_sml: start...\n");
1914 ibmvtpm_node
= call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1915 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node
);
1916 if (!PHANDLE_VALID(ibmvtpm_node
))
1919 ibmvtpm_inst
= call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1920 if (!IHANDLE_VALID(ibmvtpm_inst
)) {
1921 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst
);
1925 if (prom_getprop(ibmvtpm_node
, "ibm,sml-efi-reformat-supported",
1926 &val
, sizeof(val
)) != PROM_ERROR
) {
1927 if (call_prom_ret("call-method", 2, 2, &succ
,
1928 ADDR("reformat-sml-to-efi-alignment"),
1929 ibmvtpm_inst
) != 0 || succ
== 0) {
1930 prom_printf("Reformat SML to EFI alignment failed\n");
1934 if (call_prom_ret("call-method", 2, 2, &size
,
1935 ADDR("sml-get-allocated-size"),
1936 ibmvtpm_inst
) != 0 || size
== 0) {
1937 prom_printf("SML get allocated size failed\n");
1941 if (call_prom_ret("call-method", 2, 2, &size
,
1942 ADDR("sml-get-handover-size"),
1943 ibmvtpm_inst
) != 0 || size
== 0) {
1944 prom_printf("SML get handover size failed\n");
1949 base
= alloc_down(size
, PAGE_SIZE
, 0);
1951 prom_panic("Could not allocate memory for sml\n");
1953 prom_printf("instantiating sml at 0x%llx...", base
);
1955 memset((void *)base
, 0, size
);
1957 if (call_prom_ret("call-method", 4, 2, &entry
,
1958 ADDR("sml-handover"),
1959 ibmvtpm_inst
, size
, base
) != 0 || entry
== 0) {
1960 prom_printf("SML handover failed\n");
1963 prom_printf(" done\n");
1965 reserve_mem(base
, size
);
1967 prom_setprop(ibmvtpm_node
, "/vdevice/vtpm", "linux,sml-base",
1968 &base
, sizeof(base
));
1969 prom_setprop(ibmvtpm_node
, "/vdevice/vtpm", "linux,sml-size",
1970 &size
, sizeof(size
));
1972 prom_debug("sml base = 0x%llx\n", base
);
1973 prom_debug("sml size = 0x%x\n", size
);
1975 prom_debug("prom_instantiate_sml: end...\n");
1979 * Allocate room for and initialize TCE tables
1981 #ifdef __BIG_ENDIAN__
1982 static void __init
prom_initialize_tce_table(void)
1986 char compatible
[64], type
[64], model
[64];
1987 char *path
= prom_scratch
;
1989 u32 minalign
, minsize
;
1990 u64 tce_entry
, *tce_entryp
;
1991 u64 local_alloc_top
, local_alloc_bottom
;
1997 prom_debug("starting prom_initialize_tce_table\n");
1999 /* Cache current top of allocs so we reserve a single block */
2000 local_alloc_top
= alloc_top_high
;
2001 local_alloc_bottom
= local_alloc_top
;
2003 /* Search all nodes looking for PHBs. */
2004 for (node
= 0; prom_next_node(&node
); ) {
2008 prom_getprop(node
, "compatible",
2009 compatible
, sizeof(compatible
));
2010 prom_getprop(node
, "device_type", type
, sizeof(type
));
2011 prom_getprop(node
, "model", model
, sizeof(model
));
2013 if ((type
[0] == 0) || (prom_strstr(type
, "pci") == NULL
))
2016 /* Keep the old logic intact to avoid regression. */
2017 if (compatible
[0] != 0) {
2018 if ((prom_strstr(compatible
, "python") == NULL
) &&
2019 (prom_strstr(compatible
, "Speedwagon") == NULL
) &&
2020 (prom_strstr(compatible
, "Winnipeg") == NULL
))
2022 } else if (model
[0] != 0) {
2023 if ((prom_strstr(model
, "ython") == NULL
) &&
2024 (prom_strstr(model
, "peedwagon") == NULL
) &&
2025 (prom_strstr(model
, "innipeg") == NULL
))
2029 if (prom_getprop(node
, "tce-table-minalign", &minalign
,
2030 sizeof(minalign
)) == PROM_ERROR
)
2032 if (prom_getprop(node
, "tce-table-minsize", &minsize
,
2033 sizeof(minsize
)) == PROM_ERROR
)
2034 minsize
= 4UL << 20;
2037 * Even though we read what OF wants, we just set the table
2038 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2039 * By doing this, we avoid the pitfalls of trying to DMA to
2040 * MMIO space and the DMA alias hole.
2042 minsize
= 4UL << 20;
2044 /* Align to the greater of the align or size */
2045 align
= max(minalign
, minsize
);
2046 base
= alloc_down(minsize
, align
, 1);
2048 prom_panic("ERROR, cannot find space for TCE table.\n");
2049 if (base
< local_alloc_bottom
)
2050 local_alloc_bottom
= base
;
2052 /* It seems OF doesn't null-terminate the path :-( */
2053 memset(path
, 0, sizeof(prom_scratch
));
2054 /* Call OF to setup the TCE hardware */
2055 if (call_prom("package-to-path", 3, 1, node
,
2056 path
, sizeof(prom_scratch
) - 1) == PROM_ERROR
) {
2057 prom_printf("package-to-path failed\n");
2060 /* Save away the TCE table attributes for later use. */
2061 prom_setprop(node
, path
, "linux,tce-base", &base
, sizeof(base
));
2062 prom_setprop(node
, path
, "linux,tce-size", &minsize
, sizeof(minsize
));
2064 prom_debug("TCE table: %s\n", path
);
2065 prom_debug("\tnode = 0x%x\n", node
);
2066 prom_debug("\tbase = 0x%llx\n", base
);
2067 prom_debug("\tsize = 0x%x\n", minsize
);
2069 /* Initialize the table to have a one-to-one mapping
2070 * over the allocated size.
2072 tce_entryp
= (u64
*)base
;
2073 for (i
= 0; i
< (minsize
>> 3) ;tce_entryp
++, i
++) {
2074 tce_entry
= (i
<< PAGE_SHIFT
);
2076 *tce_entryp
= tce_entry
;
2079 prom_printf("opening PHB %s", path
);
2080 phb_node
= call_prom("open", 1, 1, path
);
2082 prom_printf("... failed\n");
2084 prom_printf("... done\n");
2086 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2087 phb_node
, -1, minsize
,
2088 (u32
) base
, (u32
) (base
>> 32));
2089 call_prom("close", 1, 0, phb_node
);
2092 reserve_mem(local_alloc_bottom
, local_alloc_top
- local_alloc_bottom
);
2094 /* These are only really needed if there is a memory limit in
2095 * effect, but we don't know so export them always. */
2096 prom_tce_alloc_start
= local_alloc_bottom
;
2097 prom_tce_alloc_end
= local_alloc_top
;
2099 /* Flag the first invalid entry */
2100 prom_debug("ending prom_initialize_tce_table\n");
2102 #endif /* __BIG_ENDIAN__ */
2103 #endif /* CONFIG_PPC64 */
2106 * With CHRP SMP we need to use the OF to start the other processors.
2107 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2108 * so we have to put the processors into a holding pattern controlled
2109 * by the kernel (not OF) before we destroy the OF.
2111 * This uses a chunk of low memory, puts some holding pattern
2112 * code there and sends the other processors off to there until
2113 * smp_boot_cpus tells them to do something. The holding pattern
2114 * checks that address until its cpu # is there, when it is that
2115 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2116 * of setting those values.
2118 * We also use physical address 0x4 here to tell when a cpu
2119 * is in its holding pattern code.
2124 * We want to reference the copy of __secondary_hold_* in the
2125 * 0 - 0x100 address range
2127 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2129 static void __init
prom_hold_cpus(void)
2134 unsigned long *spinloop
2135 = (void *) LOW_ADDR(__secondary_hold_spinloop
);
2136 unsigned long *acknowledge
2137 = (void *) LOW_ADDR(__secondary_hold_acknowledge
);
2138 unsigned long secondary_hold
= LOW_ADDR(__secondary_hold
);
2141 * On pseries, if RTAS supports "query-cpu-stopped-state",
2142 * we skip this stage, the CPUs will be started by the
2143 * kernel using RTAS.
2145 if ((of_platform
== PLATFORM_PSERIES
||
2146 of_platform
== PLATFORM_PSERIES_LPAR
) &&
2147 rtas_has_query_cpu_stopped
) {
2148 prom_printf("prom_hold_cpus: skipped\n");
2152 prom_debug("prom_hold_cpus: start...\n");
2153 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop
);
2154 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop
);
2155 prom_debug(" 1) acknowledge = 0x%lx\n",
2156 (unsigned long)acknowledge
);
2157 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge
);
2158 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold
);
2160 /* Set the common spinloop variable, so all of the secondary cpus
2161 * will block when they are awakened from their OF spinloop.
2162 * This must occur for both SMP and non SMP kernels, since OF will
2163 * be trashed when we move the kernel.
2168 for (node
= 0; prom_next_node(&node
); ) {
2169 unsigned int cpu_no
;
2173 prom_getprop(node
, "device_type", type
, sizeof(type
));
2174 if (prom_strcmp(type
, "cpu") != 0)
2177 /* Skip non-configured cpus. */
2178 if (prom_getprop(node
, "status", type
, sizeof(type
)) > 0)
2179 if (prom_strcmp(type
, "okay") != 0)
2182 reg
= cpu_to_be32(-1); /* make sparse happy */
2183 prom_getprop(node
, "reg", ®
, sizeof(reg
));
2184 cpu_no
= be32_to_cpu(reg
);
2186 prom_debug("cpu hw idx = %u\n", cpu_no
);
2188 /* Init the acknowledge var which will be reset by
2189 * the secondary cpu when it awakens from its OF
2192 *acknowledge
= (unsigned long)-1;
2194 if (cpu_no
!= prom
.cpu
) {
2195 /* Primary Thread of non-boot cpu or any thread */
2196 prom_printf("starting cpu hw idx %u... ", cpu_no
);
2197 call_prom("start-cpu", 3, 0, node
,
2198 secondary_hold
, cpu_no
);
2200 for (i
= 0; (i
< 100000000) &&
2201 (*acknowledge
== ((unsigned long)-1)); i
++ )
2204 if (*acknowledge
== cpu_no
)
2205 prom_printf("done\n");
2207 prom_printf("failed: %lx\n", *acknowledge
);
2211 prom_printf("boot cpu hw idx %u\n", cpu_no
);
2212 #endif /* CONFIG_SMP */
2215 prom_debug("prom_hold_cpus: end...\n");
2219 static void __init
prom_init_client_services(unsigned long pp
)
2221 /* Get a handle to the prom entry point before anything else */
2224 /* get a handle for the stdout device */
2225 prom
.chosen
= call_prom("finddevice", 1, 1, ADDR("/chosen"));
2226 if (!PHANDLE_VALID(prom
.chosen
))
2227 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2229 /* get device tree root */
2230 prom
.root
= call_prom("finddevice", 1, 1, ADDR("/"));
2231 if (!PHANDLE_VALID(prom
.root
))
2232 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2239 * For really old powermacs, we need to map things we claim.
2240 * For that, we need the ihandle of the mmu.
2241 * Also, on the longtrail, we need to work around other bugs.
2243 static void __init
prom_find_mmu(void)
2248 oprom
= call_prom("finddevice", 1, 1, ADDR("/openprom"));
2249 if (!PHANDLE_VALID(oprom
))
2251 if (prom_getprop(oprom
, "model", version
, sizeof(version
)) <= 0)
2253 version
[sizeof(version
) - 1] = 0;
2254 /* XXX might need to add other versions here */
2255 if (prom_strcmp(version
, "Open Firmware, 1.0.5") == 0)
2256 of_workarounds
= OF_WA_CLAIM
;
2257 else if (prom_strncmp(version
, "FirmWorks,3.", 12) == 0) {
2258 of_workarounds
= OF_WA_CLAIM
| OF_WA_LONGTRAIL
;
2259 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2262 prom
.memory
= call_prom("open", 1, 1, ADDR("/memory"));
2263 prom_getprop(prom
.chosen
, "mmu", &prom
.mmumap
,
2264 sizeof(prom
.mmumap
));
2265 prom
.mmumap
= be32_to_cpu(prom
.mmumap
);
2266 if (!IHANDLE_VALID(prom
.memory
) || !IHANDLE_VALID(prom
.mmumap
))
2267 of_workarounds
&= ~OF_WA_CLAIM
; /* hmmm */
2270 #define prom_find_mmu()
2273 static void __init
prom_init_stdout(void)
2275 char *path
= of_stdout_device
;
2277 phandle stdout_node
;
2280 if (prom_getprop(prom
.chosen
, "stdout", &val
, sizeof(val
)) <= 0)
2281 prom_panic("cannot find stdout");
2283 prom
.stdout
= be32_to_cpu(val
);
2285 /* Get the full OF pathname of the stdout device */
2286 memset(path
, 0, 256);
2287 call_prom("instance-to-path", 3, 1, prom
.stdout
, path
, 255);
2288 prom_printf("OF stdout device is: %s\n", of_stdout_device
);
2289 prom_setprop(prom
.chosen
, "/chosen", "linux,stdout-path",
2290 path
, prom_strlen(path
) + 1);
2292 /* instance-to-package fails on PA-Semi */
2293 stdout_node
= call_prom("instance-to-package", 1, 1, prom
.stdout
);
2294 if (stdout_node
!= PROM_ERROR
) {
2295 val
= cpu_to_be32(stdout_node
);
2297 /* If it's a display, note it */
2298 memset(type
, 0, sizeof(type
));
2299 prom_getprop(stdout_node
, "device_type", type
, sizeof(type
));
2300 if (prom_strcmp(type
, "display") == 0)
2301 prom_setprop(stdout_node
, path
, "linux,boot-display", NULL
, 0);
2305 static int __init
prom_find_machine_type(void)
2307 static char compat
[256] __prombss
;
2314 /* Look for a PowerMac or a Cell */
2315 len
= prom_getprop(prom
.root
, "compatible",
2316 compat
, sizeof(compat
)-1);
2320 char *p
= &compat
[i
];
2321 int sl
= prom_strlen(p
);
2324 if (prom_strstr(p
, "Power Macintosh") ||
2325 prom_strstr(p
, "MacRISC"))
2326 return PLATFORM_POWERMAC
;
2328 /* We must make sure we don't detect the IBM Cell
2329 * blades as pSeries due to some firmware issues,
2332 if (prom_strstr(p
, "IBM,CBEA") ||
2333 prom_strstr(p
, "IBM,CPBW-1.0"))
2334 return PLATFORM_GENERIC
;
2335 #endif /* CONFIG_PPC64 */
2340 /* Try to figure out if it's an IBM pSeries or any other
2341 * PAPR compliant platform. We assume it is if :
2342 * - /device_type is "chrp" (please, do NOT use that for future
2346 len
= prom_getprop(prom
.root
, "device_type",
2347 compat
, sizeof(compat
)-1);
2349 return PLATFORM_GENERIC
;
2350 if (prom_strcmp(compat
, "chrp"))
2351 return PLATFORM_GENERIC
;
2353 /* Default to pSeries. We need to know if we are running LPAR */
2354 rtas
= call_prom("finddevice", 1, 1, ADDR("/rtas"));
2355 if (!PHANDLE_VALID(rtas
))
2356 return PLATFORM_GENERIC
;
2357 x
= prom_getproplen(rtas
, "ibm,hypertas-functions");
2358 if (x
!= PROM_ERROR
) {
2359 prom_debug("Hypertas detected, assuming LPAR !\n");
2360 return PLATFORM_PSERIES_LPAR
;
2362 return PLATFORM_PSERIES
;
2364 return PLATFORM_GENERIC
;
2368 static int __init
prom_set_color(ihandle ih
, int i
, int r
, int g
, int b
)
2370 return call_prom("call-method", 6, 1, ADDR("color!"), ih
, i
, b
, g
, r
);
2374 * If we have a display that we don't know how to drive,
2375 * we will want to try to execute OF's open method for it
2376 * later. However, OF will probably fall over if we do that
2377 * we've taken over the MMU.
2378 * So we check whether we will need to open the display,
2379 * and if so, open it now.
2381 static void __init
prom_check_displays(void)
2383 char type
[16], *path
;
2388 static const unsigned char default_colors
[] __initconst
= {
2406 const unsigned char *clut
;
2408 prom_debug("Looking for displays\n");
2409 for (node
= 0; prom_next_node(&node
); ) {
2410 memset(type
, 0, sizeof(type
));
2411 prom_getprop(node
, "device_type", type
, sizeof(type
));
2412 if (prom_strcmp(type
, "display") != 0)
2415 /* It seems OF doesn't null-terminate the path :-( */
2416 path
= prom_scratch
;
2417 memset(path
, 0, sizeof(prom_scratch
));
2420 * leave some room at the end of the path for appending extra
2423 if (call_prom("package-to-path", 3, 1, node
, path
,
2424 sizeof(prom_scratch
) - 10) == PROM_ERROR
)
2426 prom_printf("found display : %s, opening... ", path
);
2428 ih
= call_prom("open", 1, 1, path
);
2430 prom_printf("failed\n");
2435 prom_printf("done\n");
2436 prom_setprop(node
, path
, "linux,opened", NULL
, 0);
2438 /* Setup a usable color table when the appropriate
2439 * method is available. Should update this to set-colors */
2440 clut
= default_colors
;
2441 for (i
= 0; i
< 16; i
++, clut
+= 3)
2442 if (prom_set_color(ih
, i
, clut
[0], clut
[1],
2446 #ifdef CONFIG_LOGO_LINUX_CLUT224
2447 clut
= PTRRELOC(logo_linux_clut224
.clut
);
2448 for (i
= 0; i
< logo_linux_clut224
.clutsize
; i
++, clut
+= 3)
2449 if (prom_set_color(ih
, i
+ 32, clut
[0], clut
[1],
2452 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2454 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2455 if (prom_getprop(node
, "linux,boot-display", NULL
, 0) !=
2457 u32 width
, height
, pitch
, addr
;
2459 prom_printf("Setting btext !\n");
2461 if (prom_getprop(node
, "width", &width
, 4) == PROM_ERROR
)
2464 if (prom_getprop(node
, "height", &height
, 4) == PROM_ERROR
)
2467 if (prom_getprop(node
, "linebytes", &pitch
, 4) == PROM_ERROR
)
2470 if (prom_getprop(node
, "address", &addr
, 4) == PROM_ERROR
)
2473 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2474 width
, height
, pitch
, addr
);
2475 btext_setup_display(width
, height
, 8, pitch
, addr
);
2476 btext_prepare_BAT();
2478 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2483 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2484 static void __init
*make_room(unsigned long *mem_start
, unsigned long *mem_end
,
2485 unsigned long needed
, unsigned long align
)
2489 *mem_start
= ALIGN(*mem_start
, align
);
2490 while ((*mem_start
+ needed
) > *mem_end
) {
2491 unsigned long room
, chunk
;
2493 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2495 room
= alloc_top
- alloc_bottom
;
2496 if (room
> DEVTREE_CHUNK_SIZE
)
2497 room
= DEVTREE_CHUNK_SIZE
;
2498 if (room
< PAGE_SIZE
)
2499 prom_panic("No memory for flatten_device_tree "
2501 chunk
= alloc_up(room
, 0);
2503 prom_panic("No memory for flatten_device_tree "
2504 "(claim failed)\n");
2505 *mem_end
= chunk
+ room
;
2508 ret
= (void *)*mem_start
;
2509 *mem_start
+= needed
;
2514 #define dt_push_token(token, mem_start, mem_end) do { \
2515 void *room = make_room(mem_start, mem_end, 4, 4); \
2516 *(__be32 *)room = cpu_to_be32(token); \
2519 static unsigned long __init
dt_find_string(char *str
)
2523 s
= os
= (char *)dt_string_start
;
2525 while (s
< (char *)dt_string_end
) {
2526 if (prom_strcmp(s
, str
) == 0)
2528 s
+= prom_strlen(s
) + 1;
2534 * The Open Firmware 1275 specification states properties must be 31 bytes or
2535 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2537 #define MAX_PROPERTY_NAME 64
2539 static void __init
scan_dt_build_strings(phandle node
,
2540 unsigned long *mem_start
,
2541 unsigned long *mem_end
)
2543 char *prev_name
, *namep
, *sstart
;
2547 sstart
= (char *)dt_string_start
;
2549 /* get and store all property names */
2552 /* 64 is max len of name including nul. */
2553 namep
= make_room(mem_start
, mem_end
, MAX_PROPERTY_NAME
, 1);
2554 if (call_prom("nextprop", 3, 1, node
, prev_name
, namep
) != 1) {
2555 /* No more nodes: unwind alloc */
2556 *mem_start
= (unsigned long)namep
;
2561 if (prom_strcmp(namep
, "name") == 0) {
2562 *mem_start
= (unsigned long)namep
;
2566 /* get/create string entry */
2567 soff
= dt_find_string(namep
);
2569 *mem_start
= (unsigned long)namep
;
2570 namep
= sstart
+ soff
;
2572 /* Trim off some if we can */
2573 *mem_start
= (unsigned long)namep
+ prom_strlen(namep
) + 1;
2574 dt_string_end
= *mem_start
;
2579 /* do all our children */
2580 child
= call_prom("child", 1, 1, node
);
2581 while (child
!= 0) {
2582 scan_dt_build_strings(child
, mem_start
, mem_end
);
2583 child
= call_prom("peer", 1, 1, child
);
2587 static void __init
scan_dt_build_struct(phandle node
, unsigned long *mem_start
,
2588 unsigned long *mem_end
)
2591 char *namep
, *prev_name
, *sstart
, *p
, *ep
, *lp
, *path
;
2593 unsigned char *valp
;
2594 static char pname
[MAX_PROPERTY_NAME
] __prombss
;
2595 int l
, room
, has_phandle
= 0;
2597 dt_push_token(OF_DT_BEGIN_NODE
, mem_start
, mem_end
);
2599 /* get the node's full name */
2600 namep
= (char *)*mem_start
;
2601 room
= *mem_end
- *mem_start
;
2604 l
= call_prom("package-to-path", 3, 1, node
, namep
, room
);
2606 /* Didn't fit? Get more room. */
2608 if (l
>= *mem_end
- *mem_start
)
2609 namep
= make_room(mem_start
, mem_end
, l
+1, 1);
2610 call_prom("package-to-path", 3, 1, node
, namep
, l
);
2614 /* Fixup an Apple bug where they have bogus \0 chars in the
2615 * middle of the path in some properties, and extract
2616 * the unit name (everything after the last '/').
2618 for (lp
= p
= namep
, ep
= namep
+ l
; p
< ep
; p
++) {
2625 *mem_start
= ALIGN((unsigned long)lp
+ 1, 4);
2628 /* get it again for debugging */
2629 path
= prom_scratch
;
2630 memset(path
, 0, sizeof(prom_scratch
));
2631 call_prom("package-to-path", 3, 1, node
, path
, sizeof(prom_scratch
) - 1);
2633 /* get and store all properties */
2635 sstart
= (char *)dt_string_start
;
2637 if (call_prom("nextprop", 3, 1, node
, prev_name
,
2642 if (prom_strcmp(pname
, "name") == 0) {
2647 /* find string offset */
2648 soff
= dt_find_string(pname
);
2650 prom_printf("WARNING: Can't find string index for"
2651 " <%s>, node %s\n", pname
, path
);
2654 prev_name
= sstart
+ soff
;
2657 l
= call_prom("getproplen", 2, 1, node
, pname
);
2660 if (l
== PROM_ERROR
)
2663 /* push property head */
2664 dt_push_token(OF_DT_PROP
, mem_start
, mem_end
);
2665 dt_push_token(l
, mem_start
, mem_end
);
2666 dt_push_token(soff
, mem_start
, mem_end
);
2668 /* push property content */
2669 valp
= make_room(mem_start
, mem_end
, l
, 4);
2670 call_prom("getprop", 4, 1, node
, pname
, valp
, l
);
2671 *mem_start
= ALIGN(*mem_start
, 4);
2673 if (!prom_strcmp(pname
, "phandle"))
2677 /* Add a "phandle" property if none already exist */
2679 soff
= dt_find_string("phandle");
2681 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path
);
2683 dt_push_token(OF_DT_PROP
, mem_start
, mem_end
);
2684 dt_push_token(4, mem_start
, mem_end
);
2685 dt_push_token(soff
, mem_start
, mem_end
);
2686 valp
= make_room(mem_start
, mem_end
, 4, 4);
2687 *(__be32
*)valp
= cpu_to_be32(node
);
2691 /* do all our children */
2692 child
= call_prom("child", 1, 1, node
);
2693 while (child
!= 0) {
2694 scan_dt_build_struct(child
, mem_start
, mem_end
);
2695 child
= call_prom("peer", 1, 1, child
);
2698 dt_push_token(OF_DT_END_NODE
, mem_start
, mem_end
);
2701 static void __init
flatten_device_tree(void)
2704 unsigned long mem_start
, mem_end
, room
;
2705 struct boot_param_header
*hdr
;
2710 * Check how much room we have between alloc top & bottom (+/- a
2711 * few pages), crop to 1MB, as this is our "chunk" size
2713 room
= alloc_top
- alloc_bottom
- 0x4000;
2714 if (room
> DEVTREE_CHUNK_SIZE
)
2715 room
= DEVTREE_CHUNK_SIZE
;
2716 prom_debug("starting device tree allocs at %lx\n", alloc_bottom
);
2718 /* Now try to claim that */
2719 mem_start
= (unsigned long)alloc_up(room
, PAGE_SIZE
);
2721 prom_panic("Can't allocate initial device-tree chunk\n");
2722 mem_end
= mem_start
+ room
;
2724 /* Get root of tree */
2725 root
= call_prom("peer", 1, 1, (phandle
)0);
2726 if (root
== (phandle
)0)
2727 prom_panic ("couldn't get device tree root\n");
2729 /* Build header and make room for mem rsv map */
2730 mem_start
= ALIGN(mem_start
, 4);
2731 hdr
= make_room(&mem_start
, &mem_end
,
2732 sizeof(struct boot_param_header
), 4);
2733 dt_header_start
= (unsigned long)hdr
;
2734 rsvmap
= make_room(&mem_start
, &mem_end
, sizeof(mem_reserve_map
), 8);
2736 /* Start of strings */
2737 mem_start
= PAGE_ALIGN(mem_start
);
2738 dt_string_start
= mem_start
;
2739 mem_start
+= 4; /* hole */
2741 /* Add "phandle" in there, we'll need it */
2742 namep
= make_room(&mem_start
, &mem_end
, 16, 1);
2743 prom_strscpy_pad(namep
, "phandle", sizeof("phandle"));
2744 mem_start
= (unsigned long)namep
+ prom_strlen(namep
) + 1;
2746 /* Build string array */
2747 prom_printf("Building dt strings...\n");
2748 scan_dt_build_strings(root
, &mem_start
, &mem_end
);
2749 dt_string_end
= mem_start
;
2751 /* Build structure */
2752 mem_start
= PAGE_ALIGN(mem_start
);
2753 dt_struct_start
= mem_start
;
2754 prom_printf("Building dt structure...\n");
2755 scan_dt_build_struct(root
, &mem_start
, &mem_end
);
2756 dt_push_token(OF_DT_END
, &mem_start
, &mem_end
);
2757 dt_struct_end
= PAGE_ALIGN(mem_start
);
2760 hdr
->boot_cpuid_phys
= cpu_to_be32(prom
.cpu
);
2761 hdr
->magic
= cpu_to_be32(OF_DT_HEADER
);
2762 hdr
->totalsize
= cpu_to_be32(dt_struct_end
- dt_header_start
);
2763 hdr
->off_dt_struct
= cpu_to_be32(dt_struct_start
- dt_header_start
);
2764 hdr
->off_dt_strings
= cpu_to_be32(dt_string_start
- dt_header_start
);
2765 hdr
->dt_strings_size
= cpu_to_be32(dt_string_end
- dt_string_start
);
2766 hdr
->off_mem_rsvmap
= cpu_to_be32(((unsigned long)rsvmap
) - dt_header_start
);
2767 hdr
->version
= cpu_to_be32(OF_DT_VERSION
);
2768 /* Version 16 is not backward compatible */
2769 hdr
->last_comp_version
= cpu_to_be32(0x10);
2771 /* Copy the reserve map in */
2772 memcpy(rsvmap
, mem_reserve_map
, sizeof(mem_reserve_map
));
2777 prom_printf("reserved memory map:\n");
2778 for (i
= 0; i
< mem_reserve_cnt
; i
++)
2779 prom_printf(" %llx - %llx\n",
2780 be64_to_cpu(mem_reserve_map
[i
].base
),
2781 be64_to_cpu(mem_reserve_map
[i
].size
));
2784 /* Bump mem_reserve_cnt to cause further reservations to fail
2785 * since it's too late.
2787 mem_reserve_cnt
= MEM_RESERVE_MAP_SIZE
;
2789 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2790 dt_string_start
, dt_string_end
);
2791 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2792 dt_struct_start
, dt_struct_end
);
2795 #ifdef CONFIG_PPC_CHRP
2797 * Pegasos and BriQ lacks the "ranges" property in the isa node
2798 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2799 * Pegasos has the IDE configured in legacy mode, but advertised as native
2801 static void __init
fixup_device_tree_chrp(void)
2805 u32 rloc
= 0x01006000; /* IO space; PCI device = 12 */
2809 name
= "/pci@80000000/isa@c";
2810 ph
= call_prom("finddevice", 1, 1, ADDR(name
));
2811 if (!PHANDLE_VALID(ph
)) {
2812 name
= "/pci@ff500000/isa@6";
2813 ph
= call_prom("finddevice", 1, 1, ADDR(name
));
2814 rloc
= 0x01003000; /* IO space; PCI device = 6 */
2816 if (PHANDLE_VALID(ph
)) {
2817 rc
= prom_getproplen(ph
, "ranges");
2818 if (rc
== 0 || rc
== PROM_ERROR
) {
2819 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2826 prop
[5] = 0x00010000;
2827 prom_setprop(ph
, name
, "ranges", prop
, sizeof(prop
));
2831 name
= "/pci@80000000/ide@C,1";
2832 ph
= call_prom("finddevice", 1, 1, ADDR(name
));
2833 if (PHANDLE_VALID(ph
)) {
2834 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2837 prom_setprop(ph
, name
, "interrupts", prop
, 2*sizeof(u32
));
2838 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2839 rc
= prom_getprop(ph
, "class-code", prop
, sizeof(u32
));
2840 if (rc
== sizeof(u32
)) {
2842 prom_setprop(ph
, name
, "class-code", prop
, sizeof(u32
));
2847 #define fixup_device_tree_chrp()
2850 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2851 static void __init
fixup_device_tree_pmac64(void)
2853 phandle u3
, i2c
, mpic
;
2858 /* Some G5s have a missing interrupt definition, fix it up here */
2859 u3
= call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2860 if (!PHANDLE_VALID(u3
))
2862 i2c
= call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2863 if (!PHANDLE_VALID(i2c
))
2865 mpic
= call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2866 if (!PHANDLE_VALID(mpic
))
2869 /* check if proper rev of u3 */
2870 if (prom_getprop(u3
, "device-rev", &u3_rev
, sizeof(u3_rev
))
2873 if (u3_rev
< 0x35 || u3_rev
> 0x39)
2875 /* does it need fixup ? */
2876 if (prom_getproplen(i2c
, "interrupts") > 0)
2879 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2881 /* interrupt on this revision of u3 is number 0 and level */
2884 prom_setprop(i2c
, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2885 &interrupts
, sizeof(interrupts
));
2887 prom_setprop(i2c
, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2888 &parent
, sizeof(parent
));
2891 #define fixup_device_tree_pmac64()
2894 #ifdef CONFIG_PPC_PMAC
2895 static void __init
fixup_device_tree_pmac(void)
2901 // Some pmacs are missing #size-cells on escc nodes
2902 for (node
= 0; prom_next_node(&node
); ) {
2904 prom_getprop(node
, "device_type", type
, sizeof(type
));
2905 if (prom_strcmp(type
, "escc"))
2908 if (prom_getproplen(node
, "#size-cells") != PROM_ERROR
)
2911 prom_setprop(node
, NULL
, "#size-cells", &val
, sizeof(val
));
2915 static inline void fixup_device_tree_pmac(void) { }
2918 #ifdef CONFIG_PPC_EFIKA
2920 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2921 * to talk to the phy. If the phy-handle property is missing, then this
2922 * function is called to add the appropriate nodes and link it to the
2925 static void __init
fixup_device_tree_efika_add_phy(void)
2931 /* Check if /builtin/ethernet exists - bail if it doesn't */
2932 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2933 if (!PHANDLE_VALID(node
))
2936 /* Check if the phy-handle property exists - bail if it does */
2937 rv
= prom_getprop(node
, "phy-handle", prop
, sizeof(prop
));
2942 * At this point the ethernet device doesn't have a phy described.
2943 * Now we need to add the missing phy node and linkage
2946 /* Check for an MDIO bus node - if missing then create one */
2947 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2948 if (!PHANDLE_VALID(node
)) {
2949 prom_printf("Adding Ethernet MDIO node\n");
2950 call_prom("interpret", 1, 1,
2951 " s\" /builtin\" find-device"
2953 " 1 encode-int s\" #address-cells\" property"
2954 " 0 encode-int s\" #size-cells\" property"
2955 " s\" mdio\" device-name"
2956 " s\" fsl,mpc5200b-mdio\" encode-string"
2957 " s\" compatible\" property"
2958 " 0xf0003000 0x400 reg"
2960 " 0x5 encode-int encode+"
2961 " 0x3 encode-int encode+"
2962 " s\" interrupts\" property"
2966 /* Check for a PHY device node - if missing then create one and
2967 * give it's phandle to the ethernet node */
2968 node
= call_prom("finddevice", 1, 1,
2969 ADDR("/builtin/mdio/ethernet-phy"));
2970 if (!PHANDLE_VALID(node
)) {
2971 prom_printf("Adding Ethernet PHY node\n");
2972 call_prom("interpret", 1, 1,
2973 " s\" /builtin/mdio\" find-device"
2975 " s\" ethernet-phy\" device-name"
2976 " 0x10 encode-int s\" reg\" property"
2980 " s\" /builtin/ethernet\" find-device"
2982 " s\" phy-handle\" property"
2987 static void __init
fixup_device_tree_efika(void)
2989 int sound_irq
[3] = { 2, 2, 0 };
2990 int bcomm_irq
[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2991 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2992 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2993 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2998 /* Check if we're really running on a EFIKA */
2999 node
= call_prom("finddevice", 1, 1, ADDR("/"));
3000 if (!PHANDLE_VALID(node
))
3003 rv
= prom_getprop(node
, "model", prop
, sizeof(prop
));
3004 if (rv
== PROM_ERROR
)
3006 if (prom_strcmp(prop
, "EFIKA5K2"))
3009 prom_printf("Applying EFIKA device tree fixups\n");
3011 /* Claiming to be 'chrp' is death */
3012 node
= call_prom("finddevice", 1, 1, ADDR("/"));
3013 rv
= prom_getprop(node
, "device_type", prop
, sizeof(prop
));
3014 if (rv
!= PROM_ERROR
&& (prom_strcmp(prop
, "chrp") == 0))
3015 prom_setprop(node
, "/", "device_type", "efika", sizeof("efika"));
3017 /* CODEGEN,description is exposed in /proc/cpuinfo so
3019 rv
= prom_getprop(node
, "CODEGEN,description", prop
, sizeof(prop
));
3020 if (rv
!= PROM_ERROR
&& (prom_strstr(prop
, "CHRP")))
3021 prom_setprop(node
, "/", "CODEGEN,description",
3022 "Efika 5200B PowerPC System",
3023 sizeof("Efika 5200B PowerPC System"));
3025 /* Fixup bestcomm interrupts property */
3026 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3027 if (PHANDLE_VALID(node
)) {
3028 len
= prom_getproplen(node
, "interrupts");
3030 prom_printf("Fixing bestcomm interrupts property\n");
3031 prom_setprop(node
, "/builtin/bestcom", "interrupts",
3032 bcomm_irq
, sizeof(bcomm_irq
));
3036 /* Fixup sound interrupts property */
3037 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3038 if (PHANDLE_VALID(node
)) {
3039 rv
= prom_getprop(node
, "interrupts", prop
, sizeof(prop
));
3040 if (rv
== PROM_ERROR
) {
3041 prom_printf("Adding sound interrupts property\n");
3042 prom_setprop(node
, "/builtin/sound", "interrupts",
3043 sound_irq
, sizeof(sound_irq
));
3047 /* Make sure ethernet phy-handle property exists */
3048 fixup_device_tree_efika_add_phy();
3051 #define fixup_device_tree_efika()
3054 #ifdef CONFIG_PPC_PASEMI_NEMO
3056 * CFE supplied on Nemo is broken in several ways, biggest
3057 * problem is that it reassigns ISA interrupts to unused mpic ints.
3058 * Add an interrupt-controller property for the io-bridge to use
3059 * and correct the ints so we can attach them to an irq_domain
3061 static void __init
fixup_device_tree_pasemi(void)
3063 u32 interrupts
[2], parent
, rval
, val
= 0;
3064 char *name
, *pci_name
;
3067 /* Find the root pci node */
3068 name
= "/pxp@0,e0000000";
3069 iob
= call_prom("finddevice", 1, 1, ADDR(name
));
3070 if (!PHANDLE_VALID(iob
))
3073 /* check if interrupt-controller node set yet */
3074 if (prom_getproplen(iob
, "interrupt-controller") !=PROM_ERROR
)
3077 prom_printf("adding interrupt-controller property for SB600...\n");
3079 prom_setprop(iob
, name
, "interrupt-controller", &val
, 0);
3081 pci_name
= "/pxp@0,e0000000/pci@11";
3082 node
= call_prom("finddevice", 1, 1, ADDR(pci_name
));
3085 for( ; prom_next_node(&node
); ) {
3086 /* scan each node for one with an interrupt */
3087 if (!PHANDLE_VALID(node
))
3090 rval
= prom_getproplen(node
, "interrupts");
3091 if (rval
== 0 || rval
== PROM_ERROR
)
3094 prom_getprop(node
, "interrupts", &interrupts
, sizeof(interrupts
));
3095 if ((interrupts
[0] < 212) || (interrupts
[0] > 222))
3098 /* found a node, update both interrupts and interrupt-parent */
3099 if ((interrupts
[0] >= 212) && (interrupts
[0] <= 215))
3100 interrupts
[0] -= 203;
3101 if ((interrupts
[0] >= 216) && (interrupts
[0] <= 220))
3102 interrupts
[0] -= 213;
3103 if (interrupts
[0] == 221)
3105 if (interrupts
[0] == 222)
3108 prom_setprop(node
, pci_name
, "interrupts", interrupts
,
3109 sizeof(interrupts
));
3110 prom_setprop(node
, pci_name
, "interrupt-parent", &parent
,
3115 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3116 * so that generic isa-bridge code can add the SB600 and its on-board
3119 name
= "/pxp@0,e0000000/io-bridge@0";
3120 iob
= call_prom("finddevice", 1, 1, ADDR(name
));
3121 if (!PHANDLE_VALID(iob
))
3124 /* device_type is already set, just change it. */
3126 prom_printf("Changing device_type of SB600 node...\n");
3128 prom_setprop(iob
, name
, "device_type", "isa", sizeof("isa"));
3130 #else /* !CONFIG_PPC_PASEMI_NEMO */
3131 static inline void fixup_device_tree_pasemi(void) { }
3134 static void __init
fixup_device_tree(void)
3136 fixup_device_tree_chrp();
3137 fixup_device_tree_pmac();
3138 fixup_device_tree_pmac64();
3139 fixup_device_tree_efika();
3140 fixup_device_tree_pasemi();
3143 static void __init
prom_find_boot_cpu(void)
3150 if (prom_getprop(prom
.chosen
, "cpu", &rval
, sizeof(rval
)) <= 0)
3152 prom_cpu
= be32_to_cpu(rval
);
3154 cpu_pkg
= call_prom("instance-to-package", 1, 1, prom_cpu
);
3156 if (!PHANDLE_VALID(cpu_pkg
))
3159 prom_getprop(cpu_pkg
, "reg", &rval
, sizeof(rval
));
3160 prom
.cpu
= be32_to_cpu(rval
);
3162 prom_debug("Booting CPU hw index = %d\n", prom
.cpu
);
3165 static void __init
prom_check_initrd(unsigned long r3
, unsigned long r4
)
3167 #ifdef CONFIG_BLK_DEV_INITRD
3168 if (r3
&& r4
&& r4
!= 0xdeadbeef) {
3171 prom_initrd_start
= is_kernel_addr(r3
) ? __pa(r3
) : r3
;
3172 prom_initrd_end
= prom_initrd_start
+ r4
;
3174 val
= cpu_to_be64(prom_initrd_start
);
3175 prom_setprop(prom
.chosen
, "/chosen", "linux,initrd-start",
3177 val
= cpu_to_be64(prom_initrd_end
);
3178 prom_setprop(prom
.chosen
, "/chosen", "linux,initrd-end",
3181 reserve_mem(prom_initrd_start
,
3182 prom_initrd_end
- prom_initrd_start
);
3184 prom_debug("initrd_start=0x%lx\n", prom_initrd_start
);
3185 prom_debug("initrd_end=0x%lx\n", prom_initrd_end
);
3187 #endif /* CONFIG_BLK_DEV_INITRD */
3190 #ifdef CONFIG_PPC_SVM
3192 * Perform the Enter Secure Mode ultracall.
3194 static int __init
enter_secure_mode(unsigned long kbase
, unsigned long fdt
)
3196 register unsigned long r3
asm("r3") = UV_ESM
;
3197 register unsigned long r4
asm("r4") = kbase
;
3198 register unsigned long r5
asm("r5") = fdt
;
3200 asm volatile("sc 2" : "+r"(r3
) : "r"(r4
), "r"(r5
));
3206 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3208 static void __init
setup_secure_guest(unsigned long kbase
, unsigned long fdt
)
3212 if (!prom_svm_enable
)
3215 /* Switch to secure mode. */
3216 prom_printf("Switching to secure mode.\n");
3219 * The ultravisor will do an integrity check of the kernel image but we
3220 * relocated it so the check will fail. Restore the original image by
3221 * relocating it back to the kernel virtual base address.
3223 relocate(KERNELBASE
);
3225 ret
= enter_secure_mode(kbase
, fdt
);
3227 /* Relocate the kernel again. */
3230 if (ret
!= U_SUCCESS
) {
3231 prom_printf("Returned %d from switching to secure mode.\n", ret
);
3232 prom_rtas_os_term("Switch to secure mode failed.\n");
3236 static void __init
setup_secure_guest(unsigned long kbase
, unsigned long fdt
)
3239 #endif /* CONFIG_PPC_SVM */
3242 * We enter here early on, when the Open Firmware prom is still
3243 * handling exceptions and the MMU hash table for us.
3246 unsigned long __init
prom_init(unsigned long r3
, unsigned long r4
,
3248 unsigned long r6
, unsigned long r7
,
3249 unsigned long kbase
)
3254 unsigned long offset
= reloc_offset();
3259 * First zero the BSS
3261 memset(&__bss_start
, 0, __bss_stop
- __bss_start
);
3264 * Init interface to Open Firmware, get some node references,
3267 prom_init_client_services(pp
);
3270 * See if this OF is old enough that we need to do explicit maps
3271 * and other workarounds
3276 * Init prom stdout device
3280 prom_printf("Preparing to boot %s", linux_banner
);
3283 * Get default machine type. At this point, we do not differentiate
3284 * between pSeries SMP and pSeries LPAR
3286 of_platform
= prom_find_machine_type();
3287 prom_printf("Detected machine type: %x\n", of_platform
);
3289 #ifndef CONFIG_NONSTATIC_KERNEL
3290 /* Bail if this is a kdump kernel. */
3291 if (PHYSICAL_START
> 0)
3292 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3296 * Check for an initrd
3298 prom_check_initrd(r3
, r4
);
3301 * Do early parsing of command line
3303 early_cmdline_parse();
3305 #ifdef CONFIG_PPC_PSERIES
3307 * On pSeries, inform the firmware about our capabilities
3309 if (of_platform
== PLATFORM_PSERIES
||
3310 of_platform
== PLATFORM_PSERIES_LPAR
)
3311 prom_send_capabilities();
3315 * Copy the CPU hold code
3317 if (of_platform
!= PLATFORM_POWERMAC
)
3318 copy_and_flush(0, kbase
, 0x100, 0);
3321 * Initialize memory management within prom_init
3326 * Determine which cpu is actually running right _now_
3328 prom_find_boot_cpu();
3331 * Initialize display devices
3333 prom_check_displays();
3335 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3337 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3338 * that uses the allocator, we need to make sure we get the top of memory
3339 * available for us here...
3341 if (of_platform
== PLATFORM_PSERIES
)
3342 prom_initialize_tce_table();
3346 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3347 * have a usable RTAS implementation.
3349 if (of_platform
!= PLATFORM_POWERMAC
)
3350 prom_instantiate_rtas();
3353 /* instantiate sml */
3354 prom_instantiate_sml();
3358 * On non-powermacs, put all CPUs in spin-loops.
3360 * PowerMacs use a different mechanism to spin CPUs
3362 * (This must be done after instantiating RTAS)
3364 if (of_platform
!= PLATFORM_POWERMAC
)
3368 * Fill in some infos for use by the kernel later on
3370 if (prom_memory_limit
) {
3371 __be64 val
= cpu_to_be64(prom_memory_limit
);
3372 prom_setprop(prom
.chosen
, "/chosen", "linux,memory-limit",
3377 prom_setprop(prom
.chosen
, "/chosen", "linux,iommu-off",
3380 if (prom_iommu_force_on
)
3381 prom_setprop(prom
.chosen
, "/chosen", "linux,iommu-force-on",
3384 if (prom_tce_alloc_start
) {
3385 prom_setprop(prom
.chosen
, "/chosen", "linux,tce-alloc-start",
3386 &prom_tce_alloc_start
,
3387 sizeof(prom_tce_alloc_start
));
3388 prom_setprop(prom
.chosen
, "/chosen", "linux,tce-alloc-end",
3389 &prom_tce_alloc_end
,
3390 sizeof(prom_tce_alloc_end
));
3395 * Fixup any known bugs in the device-tree
3397 fixup_device_tree();
3400 * Now finally create the flattened device-tree
3402 prom_printf("copying OF device tree...\n");
3403 flatten_device_tree();
3406 * in case stdin is USB and still active on IBM machines...
3407 * Unfortunately quiesce crashes on some powermacs if we have
3408 * closed stdin already (in particular the powerbook 101).
3410 if (of_platform
!= PLATFORM_POWERMAC
)
3414 * Call OF "quiesce" method to shut down pending DMA's from
3417 prom_printf("Quiescing Open Firmware ...\n");
3418 call_prom("quiesce", 0, 0);
3421 * And finally, call the kernel passing it the flattened device
3422 * tree and NULL as r5, thus triggering the new entry point which
3423 * is common to us and kexec
3425 hdr
= dt_header_start
;
3427 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase
);
3428 prom_debug("->dt_header_start=0x%lx\n", hdr
);
3431 reloc_got2(-offset
);
3434 /* Move to secure memory if we're supposed to be secure guests. */
3435 setup_secure_guest(kbase
, hdr
);
3437 __start(hdr
, kbase
, 0, 0, 0, 0, 0);