2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 /* we cannot use FORTIFY as it brings in new symbols */
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/threads.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/proc_fs.h>
30 #include <linux/stringify.h>
31 #include <linux/delay.h>
32 #include <linux/initrd.h>
33 #include <linux/bitops.h>
37 #include <asm/processor.h>
42 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
48 #include <asm/asm-prototypes.h>
50 #include <linux/linux_logo.h>
53 * Eventually bump that one up
55 #define DEVTREE_CHUNK_SIZE 0x100000
58 * This is the size of the local memory reserve map that gets copied
59 * into the boot params passed to the kernel. That size is totally
60 * flexible as the kernel just reads the list until it encounters an
61 * entry with size 0, so it can be changed without breaking binary
64 #define MEM_RESERVE_MAP_SIZE 8
67 * prom_init() is called very early on, before the kernel text
68 * and data have been mapped to KERNELBASE. At this point the code
69 * is running at whatever address it has been loaded at.
70 * On ppc32 we compile with -mrelocatable, which means that references
71 * to extern and static variables get relocated automatically.
72 * ppc64 objects are always relocatable, we just need to relocate the
75 * Because OF may have mapped I/O devices into the area starting at
76 * KERNELBASE, particularly on CHRP machines, we can't safely call
77 * OF once the kernel has been mapped to KERNELBASE. Therefore all
78 * OF calls must be done within prom_init().
80 * ADDR is used in calls to call_prom. The 4th and following
81 * arguments to call_prom should be 32-bit values.
82 * On ppc64, 64 bit values are truncated to 32 bits (and
83 * fortunately don't get interpreted as two arguments).
85 #define ADDR(x) (u32)(unsigned long)(x)
88 #define OF_WORKAROUNDS 0
90 #define OF_WORKAROUNDS of_workarounds
94 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
95 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
97 #define PROM_BUG() do { \
98 prom_printf("kernel BUG at %s line 0x%x!\n", \
99 __FILE__, __LINE__); \
100 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
104 #define prom_debug(x...) prom_printf(x)
106 #define prom_debug(x...)
110 typedef u32 prom_arg_t
;
128 struct mem_map_entry
{
133 typedef __be32 cell_t
;
135 extern void __start(unsigned long r3
, unsigned long r4
, unsigned long r5
,
136 unsigned long r6
, unsigned long r7
, unsigned long r8
,
140 extern int enter_prom(struct prom_args
*args
, unsigned long entry
);
142 static inline int enter_prom(struct prom_args
*args
, unsigned long entry
)
144 return ((int (*)(struct prom_args
*))entry
)(args
);
148 extern void copy_and_flush(unsigned long dest
, unsigned long src
,
149 unsigned long size
, unsigned long offset
);
152 static struct prom_t __initdata prom
;
154 static unsigned long prom_entry __initdata
;
156 #define PROM_SCRATCH_SIZE 256
158 static char __initdata of_stdout_device
[256];
159 static char __initdata prom_scratch
[PROM_SCRATCH_SIZE
];
161 static unsigned long __initdata dt_header_start
;
162 static unsigned long __initdata dt_struct_start
, dt_struct_end
;
163 static unsigned long __initdata dt_string_start
, dt_string_end
;
165 static unsigned long __initdata prom_initrd_start
, prom_initrd_end
;
168 static int __initdata prom_iommu_force_on
;
169 static int __initdata prom_iommu_off
;
170 static unsigned long __initdata prom_tce_alloc_start
;
171 static unsigned long __initdata prom_tce_alloc_end
;
174 static bool __initdata prom_radix_disable
;
176 struct platform_support
{
182 /* Platforms codes are now obsolete in the kernel. Now only used within this
183 * file and ultimately gone too. Feel free to change them if you need, they
184 * are not shared with anything outside of this file anymore
186 #define PLATFORM_PSERIES 0x0100
187 #define PLATFORM_PSERIES_LPAR 0x0101
188 #define PLATFORM_LPAR 0x0001
189 #define PLATFORM_POWERMAC 0x0400
190 #define PLATFORM_GENERIC 0x0500
191 #define PLATFORM_OPAL 0x0600
193 static int __initdata of_platform
;
195 static char __initdata prom_cmd_line
[COMMAND_LINE_SIZE
];
197 static unsigned long __initdata prom_memory_limit
;
199 static unsigned long __initdata alloc_top
;
200 static unsigned long __initdata alloc_top_high
;
201 static unsigned long __initdata alloc_bottom
;
202 static unsigned long __initdata rmo_top
;
203 static unsigned long __initdata ram_top
;
205 static struct mem_map_entry __initdata mem_reserve_map
[MEM_RESERVE_MAP_SIZE
];
206 static int __initdata mem_reserve_cnt
;
208 static cell_t __initdata regbuf
[1024];
210 static bool rtas_has_query_cpu_stopped
;
214 * Error results ... some OF calls will return "-1" on error, some
215 * will return 0, some will return either. To simplify, here are
216 * macros to use with any ihandle or phandle return value to check if
220 #define PROM_ERROR (-1u)
221 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
222 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
225 /* This is the one and *ONLY* place where we actually call open
229 static int __init
call_prom(const char *service
, int nargs
, int nret
, ...)
232 struct prom_args args
;
235 args
.service
= cpu_to_be32(ADDR(service
));
236 args
.nargs
= cpu_to_be32(nargs
);
237 args
.nret
= cpu_to_be32(nret
);
239 va_start(list
, nret
);
240 for (i
= 0; i
< nargs
; i
++)
241 args
.args
[i
] = cpu_to_be32(va_arg(list
, prom_arg_t
));
244 for (i
= 0; i
< nret
; i
++)
245 args
.args
[nargs
+i
] = 0;
247 if (enter_prom(&args
, prom_entry
) < 0)
250 return (nret
> 0) ? be32_to_cpu(args
.args
[nargs
]) : 0;
253 static int __init
call_prom_ret(const char *service
, int nargs
, int nret
,
254 prom_arg_t
*rets
, ...)
257 struct prom_args args
;
260 args
.service
= cpu_to_be32(ADDR(service
));
261 args
.nargs
= cpu_to_be32(nargs
);
262 args
.nret
= cpu_to_be32(nret
);
264 va_start(list
, rets
);
265 for (i
= 0; i
< nargs
; i
++)
266 args
.args
[i
] = cpu_to_be32(va_arg(list
, prom_arg_t
));
269 for (i
= 0; i
< nret
; i
++)
270 args
.args
[nargs
+i
] = 0;
272 if (enter_prom(&args
, prom_entry
) < 0)
276 for (i
= 1; i
< nret
; ++i
)
277 rets
[i
-1] = be32_to_cpu(args
.args
[nargs
+i
]);
279 return (nret
> 0) ? be32_to_cpu(args
.args
[nargs
]) : 0;
283 static void __init
prom_print(const char *msg
)
287 if (prom
.stdout
== 0)
290 for (p
= msg
; *p
!= 0; p
= q
) {
291 for (q
= p
; *q
!= 0 && *q
!= '\n'; ++q
)
294 call_prom("write", 3, 1, prom
.stdout
, p
, q
- p
);
298 call_prom("write", 3, 1, prom
.stdout
, ADDR("\r\n"), 2);
303 static void __init
prom_print_hex(unsigned long val
)
305 int i
, nibbles
= sizeof(val
)*2;
306 char buf
[sizeof(val
)*2+1];
308 for (i
= nibbles
-1; i
>= 0; i
--) {
309 buf
[i
] = (val
& 0xf) + '0';
311 buf
[i
] += ('a'-'0'-10);
315 call_prom("write", 3, 1, prom
.stdout
, buf
, nibbles
);
318 /* max number of decimal digits in an unsigned long */
320 static void __init
prom_print_dec(unsigned long val
)
323 char buf
[UL_DIGITS
+1];
325 for (i
= UL_DIGITS
-1; i
>= 0; i
--) {
326 buf
[i
] = (val
% 10) + '0';
331 /* shift stuff down */
332 size
= UL_DIGITS
- i
;
333 call_prom("write", 3, 1, prom
.stdout
, buf
+i
, size
);
336 static void __init
prom_printf(const char *format
, ...)
338 const char *p
, *q
, *s
;
343 va_start(args
, format
);
344 for (p
= format
; *p
!= 0; p
= q
) {
345 for (q
= p
; *q
!= 0 && *q
!= '\n' && *q
!= '%'; ++q
)
348 call_prom("write", 3, 1, prom
.stdout
, p
, q
- p
);
353 call_prom("write", 3, 1, prom
.stdout
,
363 s
= va_arg(args
, const char *);
368 v
= va_arg(args
, unsigned long);
373 vs
= va_arg(args
, int);
384 else if (*q
== 'x') {
386 v
= va_arg(args
, unsigned long);
388 } else if (*q
== 'u') { /* '%lu' */
390 v
= va_arg(args
, unsigned long);
392 } else if (*q
== 'd') { /* %ld */
394 vs
= va_arg(args
, long);
408 static unsigned int __init
prom_claim(unsigned long virt
, unsigned long size
,
412 if (align
== 0 && (OF_WORKAROUNDS
& OF_WA_CLAIM
)) {
414 * Old OF requires we claim physical and virtual separately
415 * and then map explicitly (assuming virtual mode)
420 ret
= call_prom_ret("call-method", 5, 2, &result
,
421 ADDR("claim"), prom
.memory
,
423 if (ret
!= 0 || result
== -1)
425 ret
= call_prom_ret("call-method", 5, 2, &result
,
426 ADDR("claim"), prom
.mmumap
,
429 call_prom("call-method", 4, 1, ADDR("release"),
430 prom
.memory
, size
, virt
);
433 /* the 0x12 is M (coherence) + PP == read/write */
434 call_prom("call-method", 6, 1,
435 ADDR("map"), prom
.mmumap
, 0x12, size
, virt
, virt
);
438 return call_prom("claim", 3, 1, (prom_arg_t
)virt
, (prom_arg_t
)size
,
442 static void __init
__attribute__((noreturn
)) prom_panic(const char *reason
)
445 /* Do not call exit because it clears the screen on pmac
446 * it also causes some sort of double-fault on early pmacs */
447 if (of_platform
== PLATFORM_POWERMAC
)
450 /* ToDo: should put up an SRC here on pSeries */
451 call_prom("exit", 0, 0);
453 for (;;) /* should never get here */
458 static int __init
prom_next_node(phandle
*nodep
)
462 if ((node
= *nodep
) != 0
463 && (*nodep
= call_prom("child", 1, 1, node
)) != 0)
465 if ((*nodep
= call_prom("peer", 1, 1, node
)) != 0)
468 if ((node
= call_prom("parent", 1, 1, node
)) == 0)
470 if ((*nodep
= call_prom("peer", 1, 1, node
)) != 0)
475 static inline int prom_getprop(phandle node
, const char *pname
,
476 void *value
, size_t valuelen
)
478 return call_prom("getprop", 4, 1, node
, ADDR(pname
),
479 (u32
)(unsigned long) value
, (u32
) valuelen
);
482 static inline int prom_getproplen(phandle node
, const char *pname
)
484 return call_prom("getproplen", 2, 1, node
, ADDR(pname
));
487 static void add_string(char **str
, const char *q
)
497 static char *tohex(unsigned int x
)
499 static char digits
[] = "0123456789abcdef";
500 static char result
[9];
507 result
[i
] = digits
[x
& 0xf];
509 } while (x
!= 0 && i
> 0);
513 static int __init
prom_setprop(phandle node
, const char *nodename
,
514 const char *pname
, void *value
, size_t valuelen
)
518 if (!(OF_WORKAROUNDS
& OF_WA_LONGTRAIL
))
519 return call_prom("setprop", 4, 1, node
, ADDR(pname
),
520 (u32
)(unsigned long) value
, (u32
) valuelen
);
522 /* gah... setprop doesn't work on longtrail, have to use interpret */
524 add_string(&p
, "dev");
525 add_string(&p
, nodename
);
526 add_string(&p
, tohex((u32
)(unsigned long) value
));
527 add_string(&p
, tohex(valuelen
));
528 add_string(&p
, tohex(ADDR(pname
)));
529 add_string(&p
, tohex(strlen(pname
)));
530 add_string(&p
, "property");
532 return call_prom("interpret", 1, 1, (u32
)(unsigned long) cmd
);
535 /* We can't use the standard versions because of relocation headaches. */
536 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
537 || ('a' <= (c) && (c) <= 'f') \
538 || ('A' <= (c) && (c) <= 'F'))
540 #define isdigit(c) ('0' <= (c) && (c) <= '9')
541 #define islower(c) ('a' <= (c) && (c) <= 'z')
542 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
544 static unsigned long prom_strtoul(const char *cp
, const char **endp
)
546 unsigned long result
= 0, base
= 10, value
;
551 if (toupper(*cp
) == 'X') {
557 while (isxdigit(*cp
) &&
558 (value
= isdigit(*cp
) ? *cp
- '0' : toupper(*cp
) - 'A' + 10) < base
) {
559 result
= result
* base
+ value
;
569 static unsigned long prom_memparse(const char *ptr
, const char **retptr
)
571 unsigned long ret
= prom_strtoul(ptr
, retptr
);
575 * We can't use a switch here because GCC *may* generate a
576 * jump table which won't work, because we're not running at
577 * the address we're linked at.
579 if ('G' == **retptr
|| 'g' == **retptr
)
582 if ('M' == **retptr
|| 'm' == **retptr
)
585 if ('K' == **retptr
|| 'k' == **retptr
)
597 * Early parsing of the command line passed to the kernel, used for
598 * "mem=x" and the options that affect the iommu
600 static void __init
early_cmdline_parse(void)
607 prom_cmd_line
[0] = 0;
609 if ((long)prom
.chosen
> 0)
610 l
= prom_getprop(prom
.chosen
, "bootargs", p
, COMMAND_LINE_SIZE
-1);
611 #ifdef CONFIG_CMDLINE
612 if (l
<= 0 || p
[0] == '\0') /* dbl check */
613 strlcpy(prom_cmd_line
,
614 CONFIG_CMDLINE
, sizeof(prom_cmd_line
));
615 #endif /* CONFIG_CMDLINE */
616 prom_printf("command line: %s\n", prom_cmd_line
);
619 opt
= strstr(prom_cmd_line
, "iommu=");
621 prom_printf("iommu opt is: %s\n", opt
);
623 while (*opt
&& *opt
== ' ')
625 if (!strncmp(opt
, "off", 3))
627 else if (!strncmp(opt
, "force", 5))
628 prom_iommu_force_on
= 1;
631 opt
= strstr(prom_cmd_line
, "mem=");
634 prom_memory_limit
= prom_memparse(opt
, (const char **)&opt
);
636 /* Align to 16 MB == size of ppc64 large page */
637 prom_memory_limit
= ALIGN(prom_memory_limit
, 0x1000000);
641 opt
= strstr(prom_cmd_line
, "disable_radix");
643 prom_debug("Radix disabled from cmdline\n");
644 prom_radix_disable
= true;
648 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
650 * The architecture vector has an array of PVR mask/value pairs,
651 * followed by # option vectors - 1, followed by the option vectors.
653 * See prom.h for the definition of the bits specified in the
654 * architecture vector.
657 /* Firmware expects the value to be n - 1, where n is the # of vectors */
658 #define NUM_VECTORS(n) ((n) - 1)
661 * Firmware expects 1 + n - 2, where n is the length of the option vector in
662 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
664 #define VECTOR_LENGTH(n) (1 + (n) - 2)
666 struct option_vector1
{
672 struct option_vector2
{
686 struct option_vector3
{
691 struct option_vector4
{
696 struct option_vector5
{
708 u8 platform_facilities
;
719 struct option_vector6
{
725 struct ibm_arch_vec
{
726 struct { u32 mask
, val
; } pvrs
[12];
731 struct option_vector1 vec1
;
734 struct option_vector2 vec2
;
737 struct option_vector3 vec3
;
740 struct option_vector4 vec4
;
743 struct option_vector5 vec5
;
746 struct option_vector6 vec6
;
749 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec
= {
752 .mask
= cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
753 .val
= cpu_to_be32(0x003a0000),
756 .mask
= cpu_to_be32(0xffff0000), /* POWER6 */
757 .val
= cpu_to_be32(0x003e0000),
760 .mask
= cpu_to_be32(0xffff0000), /* POWER7 */
761 .val
= cpu_to_be32(0x003f0000),
764 .mask
= cpu_to_be32(0xffff0000), /* POWER8E */
765 .val
= cpu_to_be32(0x004b0000),
768 .mask
= cpu_to_be32(0xffff0000), /* POWER8NVL */
769 .val
= cpu_to_be32(0x004c0000),
772 .mask
= cpu_to_be32(0xffff0000), /* POWER8 */
773 .val
= cpu_to_be32(0x004d0000),
776 .mask
= cpu_to_be32(0xffff0000), /* POWER9 */
777 .val
= cpu_to_be32(0x004e0000),
780 .mask
= cpu_to_be32(0xffffffff), /* all 3.00-compliant */
781 .val
= cpu_to_be32(0x0f000005),
784 .mask
= cpu_to_be32(0xffffffff), /* all 2.07-compliant */
785 .val
= cpu_to_be32(0x0f000004),
788 .mask
= cpu_to_be32(0xffffffff), /* all 2.06-compliant */
789 .val
= cpu_to_be32(0x0f000003),
792 .mask
= cpu_to_be32(0xffffffff), /* all 2.05-compliant */
793 .val
= cpu_to_be32(0x0f000002),
796 .mask
= cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
797 .val
= cpu_to_be32(0x0f000001),
801 .num_vectors
= NUM_VECTORS(6),
803 .vec1_len
= VECTOR_LENGTH(sizeof(struct option_vector1
)),
806 .arch_versions
= OV1_PPC_2_00
| OV1_PPC_2_01
| OV1_PPC_2_02
| OV1_PPC_2_03
|
807 OV1_PPC_2_04
| OV1_PPC_2_05
| OV1_PPC_2_06
| OV1_PPC_2_07
,
808 .arch_versions3
= OV1_PPC_3_00
,
811 .vec2_len
= VECTOR_LENGTH(sizeof(struct option_vector2
)),
812 /* option vector 2: Open Firmware options supported */
814 .byte1
= OV2_REAL_MODE
,
816 .real_base
= cpu_to_be32(0xffffffff),
817 .real_size
= cpu_to_be32(0xffffffff),
818 .virt_base
= cpu_to_be32(0xffffffff),
819 .virt_size
= cpu_to_be32(0xffffffff),
820 .load_base
= cpu_to_be32(0xffffffff),
821 .min_rma
= cpu_to_be32(512), /* 512MB min RMA */
822 .min_load
= cpu_to_be32(0xffffffff), /* full client load */
823 .min_rma_percent
= 0, /* min RMA percentage of total RAM */
824 .max_pft_size
= 48, /* max log_2(hash table size) */
827 .vec3_len
= VECTOR_LENGTH(sizeof(struct option_vector3
)),
828 /* option vector 3: processor options supported */
830 .byte1
= 0, /* don't ignore, don't halt */
831 .byte2
= OV3_FP
| OV3_VMX
| OV3_DFP
,
834 .vec4_len
= VECTOR_LENGTH(sizeof(struct option_vector4
)),
835 /* option vector 4: IBM PAPR implementation */
837 .byte1
= 0, /* don't halt */
838 .min_vp_cap
= OV4_MIN_ENT_CAP
, /* minimum VP entitled capacity */
841 .vec5_len
= VECTOR_LENGTH(sizeof(struct option_vector5
)),
842 /* option vector 5: PAPR/OF options */
844 .byte1
= 0, /* don't ignore, don't halt */
845 .byte2
= OV5_FEAT(OV5_LPAR
) | OV5_FEAT(OV5_SPLPAR
) | OV5_FEAT(OV5_LARGE_PAGES
) |
846 OV5_FEAT(OV5_DRCONF_MEMORY
) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU
) |
847 #ifdef CONFIG_PCI_MSI
848 /* PCIe/MSI support. Without MSI full PCIe is not supported */
855 #ifdef CONFIG_PPC_SMLPAR
856 OV5_FEAT(OV5_CMO
) | OV5_FEAT(OV5_XCMO
),
860 .associativity
= OV5_FEAT(OV5_TYPE1_AFFINITY
) | OV5_FEAT(OV5_PRRN
),
861 .bin_opts
= OV5_FEAT(OV5_RESIZE_HPT
) | OV5_FEAT(OV5_HP_EVT
),
862 .micro_checkpoint
= 0,
864 .max_cpus
= cpu_to_be32(NR_CPUS
), /* number of cores supported */
867 .platform_facilities
= OV5_FEAT(OV5_PFO_HW_RNG
) | OV5_FEAT(OV5_PFO_HW_ENCR
) | OV5_FEAT(OV5_PFO_HW_842
),
877 /* option vector 6: IBM PAPR hints */
878 .vec6_len
= VECTOR_LENGTH(sizeof(struct option_vector6
)),
882 .os_name
= OV6_LINUX
,
886 /* Old method - ELF header with PT_NOTE sections only works on BE */
887 #ifdef __BIG_ENDIAN__
888 static struct fake_elf
{
895 char name
[8]; /* "PowerPC" */
909 char name
[24]; /* "IBM,RPA-Client-Config" */
923 .e_ident
= { 0x7f, 'E', 'L', 'F',
924 ELFCLASS32
, ELFDATA2MSB
, EV_CURRENT
},
925 .e_type
= ET_EXEC
, /* yeah right */
927 .e_version
= EV_CURRENT
,
928 .e_phoff
= offsetof(struct fake_elf
, phdr
),
929 .e_phentsize
= sizeof(Elf32_Phdr
),
935 .p_offset
= offsetof(struct fake_elf
, chrpnote
),
936 .p_filesz
= sizeof(struct chrpnote
)
939 .p_offset
= offsetof(struct fake_elf
, rpanote
),
940 .p_filesz
= sizeof(struct rpanote
)
944 .namesz
= sizeof("PowerPC"),
945 .descsz
= sizeof(struct chrpdesc
),
949 .real_mode
= ~0U, /* ~0 means "don't care" */
958 .namesz
= sizeof("IBM,RPA-Client-Config"),
959 .descsz
= sizeof(struct rpadesc
),
961 .name
= "IBM,RPA-Client-Config",
964 .min_rmo_size
= 64, /* in megabytes */
965 .min_rmo_percent
= 0,
966 .max_pft_size
= 48, /* 2^48 bytes max PFT size */
973 #endif /* __BIG_ENDIAN__ */
975 static int __init
prom_count_smt_threads(void)
981 /* Pick up th first CPU node we can find */
982 for (node
= 0; prom_next_node(&node
); ) {
984 prom_getprop(node
, "device_type", type
, sizeof(type
));
986 if (strcmp(type
, "cpu"))
989 * There is an entry for each smt thread, each entry being
990 * 4 bytes long. All cpus should have the same number of
991 * smt threads, so return after finding the first.
993 plen
= prom_getproplen(node
, "ibm,ppc-interrupt-server#s");
994 if (plen
== PROM_ERROR
)
997 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen
);
1000 if (plen
< 1 || plen
> 64) {
1001 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1002 (unsigned long)plen
);
1007 prom_debug("No threads found, assuming 1 per core\n");
1013 static void __init
prom_parse_mmu_model(u8 val
,
1014 struct platform_support
*support
)
1017 case OV5_FEAT(OV5_MMU_DYNAMIC
):
1018 case OV5_FEAT(OV5_MMU_EITHER
): /* Either Available */
1019 prom_debug("MMU - either supported\n");
1020 support
->radix_mmu
= !prom_radix_disable
;
1021 support
->hash_mmu
= true;
1023 case OV5_FEAT(OV5_MMU_RADIX
): /* Only Radix */
1024 prom_debug("MMU - radix only\n");
1025 if (prom_radix_disable
) {
1027 * If we __have__ to do radix, we're better off ignoring
1028 * the command line rather than not booting.
1030 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1032 support
->radix_mmu
= true;
1034 case OV5_FEAT(OV5_MMU_HASH
):
1035 prom_debug("MMU - hash only\n");
1036 support
->hash_mmu
= true;
1039 prom_debug("Unknown mmu support option: 0x%x\n", val
);
1044 static void __init
prom_parse_platform_support(u8 index
, u8 val
,
1045 struct platform_support
*support
)
1048 case OV5_INDX(OV5_MMU_SUPPORT
): /* MMU Model */
1049 prom_parse_mmu_model(val
& OV5_FEAT(OV5_MMU_SUPPORT
), support
);
1051 case OV5_INDX(OV5_RADIX_GTSE
): /* Radix Extensions */
1052 if (val
& OV5_FEAT(OV5_RADIX_GTSE
)) {
1053 prom_debug("Radix - GTSE supported\n");
1054 support
->radix_gtse
= true;
1060 static void __init
prom_check_platform_support(void)
1062 struct platform_support supported
= {
1067 int prop_len
= prom_getproplen(prom
.chosen
,
1068 "ibm,arch-vec-5-platform-support");
1072 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1074 prom_getprop(prom
.chosen
, "ibm,arch-vec-5-platform-support",
1076 for (i
= 0; i
< prop_len
; i
+= 2) {
1077 prom_debug("%d: index = 0x%x val = 0x%x\n", i
/ 2
1080 prom_parse_platform_support(vec
[i
], vec
[i
+ 1],
1085 if (supported
.radix_mmu
&& supported
.radix_gtse
) {
1086 /* Radix preferred - but we require GTSE for now */
1087 prom_debug("Asking for radix with GTSE\n");
1088 ibm_architecture_vec
.vec5
.mmu
= OV5_FEAT(OV5_MMU_RADIX
);
1089 ibm_architecture_vec
.vec5
.radix_ext
= OV5_FEAT(OV5_RADIX_GTSE
);
1090 } else if (supported
.hash_mmu
) {
1091 /* Default to hash mmu (if we can) */
1092 prom_debug("Asking for hash\n");
1093 ibm_architecture_vec
.vec5
.mmu
= OV5_FEAT(OV5_MMU_HASH
);
1095 /* We're probably on a legacy hypervisor */
1096 prom_debug("Assuming legacy hash support\n");
1100 static void __init
prom_send_capabilities(void)
1106 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1107 prom_check_platform_support();
1109 root
= call_prom("open", 1, 1, ADDR("/"));
1111 /* We need to tell the FW about the number of cores we support.
1113 * To do that, we count the number of threads on the first core
1114 * (we assume this is the same for all cores) and use it to
1118 cores
= DIV_ROUND_UP(NR_CPUS
, prom_count_smt_threads());
1119 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
1122 ibm_architecture_vec
.vec5
.max_cpus
= cpu_to_be32(cores
);
1124 /* try calling the ibm,client-architecture-support method */
1125 prom_printf("Calling ibm,client-architecture-support...");
1126 if (call_prom_ret("call-method", 3, 2, &ret
,
1127 ADDR("ibm,client-architecture-support"),
1129 ADDR(&ibm_architecture_vec
)) == 0) {
1130 /* the call exists... */
1132 prom_printf("\nWARNING: ibm,client-architecture"
1133 "-support call FAILED!\n");
1134 call_prom("close", 1, 0, root
);
1135 prom_printf(" done\n");
1138 call_prom("close", 1, 0, root
);
1139 prom_printf(" not implemented\n");
1142 #ifdef __BIG_ENDIAN__
1146 /* no ibm,client-architecture-support call, try the old way */
1147 elfloader
= call_prom("open", 1, 1,
1148 ADDR("/packages/elf-loader"));
1149 if (elfloader
== 0) {
1150 prom_printf("couldn't open /packages/elf-loader\n");
1153 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1154 elfloader
, ADDR(&fake_elf
));
1155 call_prom("close", 1, 0, elfloader
);
1157 #endif /* __BIG_ENDIAN__ */
1159 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1162 * Memory allocation strategy... our layout is normally:
1164 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1165 * rare cases, initrd might end up being before the kernel though.
1166 * We assume this won't override the final kernel at 0, we have no
1167 * provision to handle that in this version, but it should hopefully
1170 * alloc_top is set to the top of RMO, eventually shrink down if the
1173 * alloc_bottom is set to the top of kernel/initrd
1175 * from there, allocations are done this way : rtas is allocated
1176 * topmost, and the device-tree is allocated from the bottom. We try
1177 * to grow the device-tree allocation as we progress. If we can't,
1178 * then we fail, we don't currently have a facility to restart
1179 * elsewhere, but that shouldn't be necessary.
1181 * Note that calls to reserve_mem have to be done explicitly, memory
1182 * allocated with either alloc_up or alloc_down isn't automatically
1188 * Allocates memory in the RMO upward from the kernel/initrd
1190 * When align is 0, this is a special case, it means to allocate in place
1191 * at the current location of alloc_bottom or fail (that is basically
1192 * extending the previous allocation). Used for the device-tree flattening
1194 static unsigned long __init
alloc_up(unsigned long size
, unsigned long align
)
1196 unsigned long base
= alloc_bottom
;
1197 unsigned long addr
= 0;
1200 base
= _ALIGN_UP(base
, align
);
1201 prom_debug("alloc_up(%x, %x)\n", size
, align
);
1203 prom_panic("alloc_up() called with mem not initialized\n");
1206 base
= _ALIGN_UP(alloc_bottom
, align
);
1208 base
= alloc_bottom
;
1210 for(; (base
+ size
) <= alloc_top
;
1211 base
= _ALIGN_UP(base
+ 0x100000, align
)) {
1212 prom_debug(" trying: 0x%x\n\r", base
);
1213 addr
= (unsigned long)prom_claim(base
, size
, 0);
1214 if (addr
!= PROM_ERROR
&& addr
!= 0)
1222 alloc_bottom
= addr
+ size
;
1224 prom_debug(" -> %x\n", addr
);
1225 prom_debug(" alloc_bottom : %x\n", alloc_bottom
);
1226 prom_debug(" alloc_top : %x\n", alloc_top
);
1227 prom_debug(" alloc_top_hi : %x\n", alloc_top_high
);
1228 prom_debug(" rmo_top : %x\n", rmo_top
);
1229 prom_debug(" ram_top : %x\n", ram_top
);
1235 * Allocates memory downward, either from top of RMO, or if highmem
1236 * is set, from the top of RAM. Note that this one doesn't handle
1237 * failures. It does claim memory if highmem is not set.
1239 static unsigned long __init
alloc_down(unsigned long size
, unsigned long align
,
1242 unsigned long base
, addr
= 0;
1244 prom_debug("alloc_down(%x, %x, %s)\n", size
, align
,
1245 highmem
? "(high)" : "(low)");
1247 prom_panic("alloc_down() called with mem not initialized\n");
1250 /* Carve out storage for the TCE table. */
1251 addr
= _ALIGN_DOWN(alloc_top_high
- size
, align
);
1252 if (addr
<= alloc_bottom
)
1254 /* Will we bump into the RMO ? If yes, check out that we
1255 * didn't overlap existing allocations there, if we did,
1256 * we are dead, we must be the first in town !
1258 if (addr
< rmo_top
) {
1259 /* Good, we are first */
1260 if (alloc_top
== rmo_top
)
1261 alloc_top
= rmo_top
= addr
;
1265 alloc_top_high
= addr
;
1269 base
= _ALIGN_DOWN(alloc_top
- size
, align
);
1270 for (; base
> alloc_bottom
;
1271 base
= _ALIGN_DOWN(base
- 0x100000, align
)) {
1272 prom_debug(" trying: 0x%x\n\r", base
);
1273 addr
= (unsigned long)prom_claim(base
, size
, 0);
1274 if (addr
!= PROM_ERROR
&& addr
!= 0)
1283 prom_debug(" -> %x\n", addr
);
1284 prom_debug(" alloc_bottom : %x\n", alloc_bottom
);
1285 prom_debug(" alloc_top : %x\n", alloc_top
);
1286 prom_debug(" alloc_top_hi : %x\n", alloc_top_high
);
1287 prom_debug(" rmo_top : %x\n", rmo_top
);
1288 prom_debug(" ram_top : %x\n", ram_top
);
1294 * Parse a "reg" cell
1296 static unsigned long __init
prom_next_cell(int s
, cell_t
**cellp
)
1299 unsigned long r
= 0;
1301 /* Ignore more than 2 cells */
1302 while (s
> sizeof(unsigned long) / 4) {
1306 r
= be32_to_cpu(*p
++);
1310 r
|= be32_to_cpu(*(p
++));
1318 * Very dumb function for adding to the memory reserve list, but
1319 * we don't need anything smarter at this point
1321 * XXX Eventually check for collisions. They should NEVER happen.
1322 * If problems seem to show up, it would be a good start to track
1325 static void __init
reserve_mem(u64 base
, u64 size
)
1327 u64 top
= base
+ size
;
1328 unsigned long cnt
= mem_reserve_cnt
;
1333 /* We need to always keep one empty entry so that we
1334 * have our terminator with "size" set to 0 since we are
1335 * dumb and just copy this entire array to the boot params
1337 base
= _ALIGN_DOWN(base
, PAGE_SIZE
);
1338 top
= _ALIGN_UP(top
, PAGE_SIZE
);
1341 if (cnt
>= (MEM_RESERVE_MAP_SIZE
- 1))
1342 prom_panic("Memory reserve map exhausted !\n");
1343 mem_reserve_map
[cnt
].base
= cpu_to_be64(base
);
1344 mem_reserve_map
[cnt
].size
= cpu_to_be64(size
);
1345 mem_reserve_cnt
= cnt
+ 1;
1349 * Initialize memory allocation mechanism, parse "memory" nodes and
1350 * obtain that way the top of memory and RMO to setup out local allocator
1352 static void __init
prom_init_mem(void)
1355 char *path
, type
[64];
1362 * We iterate the memory nodes to find
1363 * 1) top of RMO (first node)
1366 val
= cpu_to_be32(2);
1367 prom_getprop(prom
.root
, "#address-cells", &val
, sizeof(val
));
1368 rac
= be32_to_cpu(val
);
1369 val
= cpu_to_be32(1);
1370 prom_getprop(prom
.root
, "#size-cells", &val
, sizeof(rsc
));
1371 rsc
= be32_to_cpu(val
);
1372 prom_debug("root_addr_cells: %x\n", rac
);
1373 prom_debug("root_size_cells: %x\n", rsc
);
1375 prom_debug("scanning memory:\n");
1376 path
= prom_scratch
;
1378 for (node
= 0; prom_next_node(&node
); ) {
1380 prom_getprop(node
, "device_type", type
, sizeof(type
));
1384 * CHRP Longtrail machines have no device_type
1385 * on the memory node, so check the name instead...
1387 prom_getprop(node
, "name", type
, sizeof(type
));
1389 if (strcmp(type
, "memory"))
1392 plen
= prom_getprop(node
, "reg", regbuf
, sizeof(regbuf
));
1393 if (plen
> sizeof(regbuf
)) {
1394 prom_printf("memory node too large for buffer !\n");
1395 plen
= sizeof(regbuf
);
1398 endp
= p
+ (plen
/ sizeof(cell_t
));
1401 memset(path
, 0, PROM_SCRATCH_SIZE
);
1402 call_prom("package-to-path", 3, 1, node
, path
, PROM_SCRATCH_SIZE
-1);
1403 prom_debug(" node %s :\n", path
);
1404 #endif /* DEBUG_PROM */
1406 while ((endp
- p
) >= (rac
+ rsc
)) {
1407 unsigned long base
, size
;
1409 base
= prom_next_cell(rac
, &p
);
1410 size
= prom_next_cell(rsc
, &p
);
1414 prom_debug(" %x %x\n", base
, size
);
1415 if (base
== 0 && (of_platform
& PLATFORM_LPAR
))
1417 if ((base
+ size
) > ram_top
)
1418 ram_top
= base
+ size
;
1422 alloc_bottom
= PAGE_ALIGN((unsigned long)&_end
+ 0x4000);
1425 * If prom_memory_limit is set we reduce the upper limits *except* for
1426 * alloc_top_high. This must be the real top of RAM so we can put
1430 alloc_top_high
= ram_top
;
1432 if (prom_memory_limit
) {
1433 if (prom_memory_limit
<= alloc_bottom
) {
1434 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1436 prom_memory_limit
= 0;
1437 } else if (prom_memory_limit
>= ram_top
) {
1438 prom_printf("Ignoring mem=%x >= ram_top.\n",
1440 prom_memory_limit
= 0;
1442 ram_top
= prom_memory_limit
;
1443 rmo_top
= min(rmo_top
, prom_memory_limit
);
1448 * Setup our top alloc point, that is top of RMO or top of
1449 * segment 0 when running non-LPAR.
1450 * Some RS64 machines have buggy firmware where claims up at
1451 * 1GB fail. Cap at 768MB as a workaround.
1452 * Since 768MB is plenty of room, and we need to cap to something
1453 * reasonable on 32-bit, cap at 768MB on all machines.
1457 rmo_top
= min(0x30000000ul
, rmo_top
);
1458 alloc_top
= rmo_top
;
1459 alloc_top_high
= ram_top
;
1462 * Check if we have an initrd after the kernel but still inside
1463 * the RMO. If we do move our bottom point to after it.
1465 if (prom_initrd_start
&&
1466 prom_initrd_start
< rmo_top
&&
1467 prom_initrd_end
> alloc_bottom
)
1468 alloc_bottom
= PAGE_ALIGN(prom_initrd_end
);
1470 prom_printf("memory layout at init:\n");
1471 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit
);
1472 prom_printf(" alloc_bottom : %x\n", alloc_bottom
);
1473 prom_printf(" alloc_top : %x\n", alloc_top
);
1474 prom_printf(" alloc_top_hi : %x\n", alloc_top_high
);
1475 prom_printf(" rmo_top : %x\n", rmo_top
);
1476 prom_printf(" ram_top : %x\n", ram_top
);
1479 static void __init
prom_close_stdin(void)
1484 if (prom_getprop(prom
.chosen
, "stdin", &val
, sizeof(val
)) > 0) {
1485 stdin
= be32_to_cpu(val
);
1486 call_prom("close", 1, 0, stdin
);
1490 #ifdef CONFIG_PPC_POWERNV
1492 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1493 static u64 __initdata prom_opal_base
;
1494 static u64 __initdata prom_opal_entry
;
1498 * Allocate room for and instantiate OPAL
1500 static void __init
prom_instantiate_opal(void)
1505 u64 size
= 0, align
= 0x10000;
1509 prom_debug("prom_instantiate_opal: start...\n");
1511 opal_node
= call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1512 prom_debug("opal_node: %x\n", opal_node
);
1513 if (!PHANDLE_VALID(opal_node
))
1517 prom_getprop(opal_node
, "opal-runtime-size", &val64
, sizeof(val64
));
1518 size
= be64_to_cpu(val64
);
1522 prom_getprop(opal_node
, "opal-runtime-alignment", &val64
,sizeof(val64
));
1523 align
= be64_to_cpu(val64
);
1525 base
= alloc_down(size
, align
, 0);
1527 prom_printf("OPAL allocation failed !\n");
1531 opal_inst
= call_prom("open", 1, 1, ADDR("/ibm,opal"));
1532 if (!IHANDLE_VALID(opal_inst
)) {
1533 prom_printf("opening opal package failed (%x)\n", opal_inst
);
1537 prom_printf("instantiating opal at 0x%x...", base
);
1539 if (call_prom_ret("call-method", 4, 3, rets
,
1540 ADDR("load-opal-runtime"),
1542 base
>> 32, base
& 0xffffffff) != 0
1543 || (rets
[0] == 0 && rets
[1] == 0)) {
1544 prom_printf(" failed\n");
1547 entry
= (((u64
)rets
[0]) << 32) | rets
[1];
1549 prom_printf(" done\n");
1551 reserve_mem(base
, size
);
1553 prom_debug("opal base = 0x%x\n", base
);
1554 prom_debug("opal align = 0x%x\n", align
);
1555 prom_debug("opal entry = 0x%x\n", entry
);
1556 prom_debug("opal size = 0x%x\n", (long)size
);
1558 prom_setprop(opal_node
, "/ibm,opal", "opal-base-address",
1559 &base
, sizeof(base
));
1560 prom_setprop(opal_node
, "/ibm,opal", "opal-entry-address",
1561 &entry
, sizeof(entry
));
1563 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1564 prom_opal_base
= base
;
1565 prom_opal_entry
= entry
;
1567 prom_debug("prom_instantiate_opal: end...\n");
1570 #endif /* CONFIG_PPC_POWERNV */
1573 * Allocate room for and instantiate RTAS
1575 static void __init
prom_instantiate_rtas(void)
1579 u32 base
, entry
= 0;
1583 prom_debug("prom_instantiate_rtas: start...\n");
1585 rtas_node
= call_prom("finddevice", 1, 1, ADDR("/rtas"));
1586 prom_debug("rtas_node: %x\n", rtas_node
);
1587 if (!PHANDLE_VALID(rtas_node
))
1591 prom_getprop(rtas_node
, "rtas-size", &val
, sizeof(size
));
1592 size
= be32_to_cpu(val
);
1596 base
= alloc_down(size
, PAGE_SIZE
, 0);
1598 prom_panic("Could not allocate memory for RTAS\n");
1600 rtas_inst
= call_prom("open", 1, 1, ADDR("/rtas"));
1601 if (!IHANDLE_VALID(rtas_inst
)) {
1602 prom_printf("opening rtas package failed (%x)\n", rtas_inst
);
1606 prom_printf("instantiating rtas at 0x%x...", base
);
1608 if (call_prom_ret("call-method", 3, 2, &entry
,
1609 ADDR("instantiate-rtas"),
1610 rtas_inst
, base
) != 0
1612 prom_printf(" failed\n");
1615 prom_printf(" done\n");
1617 reserve_mem(base
, size
);
1619 val
= cpu_to_be32(base
);
1620 prom_setprop(rtas_node
, "/rtas", "linux,rtas-base",
1622 val
= cpu_to_be32(entry
);
1623 prom_setprop(rtas_node
, "/rtas", "linux,rtas-entry",
1626 /* Check if it supports "query-cpu-stopped-state" */
1627 if (prom_getprop(rtas_node
, "query-cpu-stopped-state",
1628 &val
, sizeof(val
)) != PROM_ERROR
)
1629 rtas_has_query_cpu_stopped
= true;
1631 prom_debug("rtas base = 0x%x\n", base
);
1632 prom_debug("rtas entry = 0x%x\n", entry
);
1633 prom_debug("rtas size = 0x%x\n", (long)size
);
1635 prom_debug("prom_instantiate_rtas: end...\n");
1640 * Allocate room for and instantiate Stored Measurement Log (SML)
1642 static void __init
prom_instantiate_sml(void)
1644 phandle ibmvtpm_node
;
1645 ihandle ibmvtpm_inst
;
1646 u32 entry
= 0, size
= 0, succ
= 0;
1650 prom_debug("prom_instantiate_sml: start...\n");
1652 ibmvtpm_node
= call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1653 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node
);
1654 if (!PHANDLE_VALID(ibmvtpm_node
))
1657 ibmvtpm_inst
= call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1658 if (!IHANDLE_VALID(ibmvtpm_inst
)) {
1659 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst
);
1663 if (prom_getprop(ibmvtpm_node
, "ibm,sml-efi-reformat-supported",
1664 &val
, sizeof(val
)) != PROM_ERROR
) {
1665 if (call_prom_ret("call-method", 2, 2, &succ
,
1666 ADDR("reformat-sml-to-efi-alignment"),
1667 ibmvtpm_inst
) != 0 || succ
== 0) {
1668 prom_printf("Reformat SML to EFI alignment failed\n");
1672 if (call_prom_ret("call-method", 2, 2, &size
,
1673 ADDR("sml-get-allocated-size"),
1674 ibmvtpm_inst
) != 0 || size
== 0) {
1675 prom_printf("SML get allocated size failed\n");
1679 if (call_prom_ret("call-method", 2, 2, &size
,
1680 ADDR("sml-get-handover-size"),
1681 ibmvtpm_inst
) != 0 || size
== 0) {
1682 prom_printf("SML get handover size failed\n");
1687 base
= alloc_down(size
, PAGE_SIZE
, 0);
1689 prom_panic("Could not allocate memory for sml\n");
1691 prom_printf("instantiating sml at 0x%x...", base
);
1693 memset((void *)base
, 0, size
);
1695 if (call_prom_ret("call-method", 4, 2, &entry
,
1696 ADDR("sml-handover"),
1697 ibmvtpm_inst
, size
, base
) != 0 || entry
== 0) {
1698 prom_printf("SML handover failed\n");
1701 prom_printf(" done\n");
1703 reserve_mem(base
, size
);
1705 prom_setprop(ibmvtpm_node
, "/vdevice/vtpm", "linux,sml-base",
1706 &base
, sizeof(base
));
1707 prom_setprop(ibmvtpm_node
, "/vdevice/vtpm", "linux,sml-size",
1708 &size
, sizeof(size
));
1710 prom_debug("sml base = 0x%x\n", base
);
1711 prom_debug("sml size = 0x%x\n", (long)size
);
1713 prom_debug("prom_instantiate_sml: end...\n");
1717 * Allocate room for and initialize TCE tables
1719 #ifdef __BIG_ENDIAN__
1720 static void __init
prom_initialize_tce_table(void)
1724 char compatible
[64], type
[64], model
[64];
1725 char *path
= prom_scratch
;
1727 u32 minalign
, minsize
;
1728 u64 tce_entry
, *tce_entryp
;
1729 u64 local_alloc_top
, local_alloc_bottom
;
1735 prom_debug("starting prom_initialize_tce_table\n");
1737 /* Cache current top of allocs so we reserve a single block */
1738 local_alloc_top
= alloc_top_high
;
1739 local_alloc_bottom
= local_alloc_top
;
1741 /* Search all nodes looking for PHBs. */
1742 for (node
= 0; prom_next_node(&node
); ) {
1746 prom_getprop(node
, "compatible",
1747 compatible
, sizeof(compatible
));
1748 prom_getprop(node
, "device_type", type
, sizeof(type
));
1749 prom_getprop(node
, "model", model
, sizeof(model
));
1751 if ((type
[0] == 0) || (strstr(type
, "pci") == NULL
))
1754 /* Keep the old logic intact to avoid regression. */
1755 if (compatible
[0] != 0) {
1756 if ((strstr(compatible
, "python") == NULL
) &&
1757 (strstr(compatible
, "Speedwagon") == NULL
) &&
1758 (strstr(compatible
, "Winnipeg") == NULL
))
1760 } else if (model
[0] != 0) {
1761 if ((strstr(model
, "ython") == NULL
) &&
1762 (strstr(model
, "peedwagon") == NULL
) &&
1763 (strstr(model
, "innipeg") == NULL
))
1767 if (prom_getprop(node
, "tce-table-minalign", &minalign
,
1768 sizeof(minalign
)) == PROM_ERROR
)
1770 if (prom_getprop(node
, "tce-table-minsize", &minsize
,
1771 sizeof(minsize
)) == PROM_ERROR
)
1772 minsize
= 4UL << 20;
1775 * Even though we read what OF wants, we just set the table
1776 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1777 * By doing this, we avoid the pitfalls of trying to DMA to
1778 * MMIO space and the DMA alias hole.
1780 * On POWER4, firmware sets the TCE region by assuming
1781 * each TCE table is 8MB. Using this memory for anything
1782 * else will impact performance, so we always allocate 8MB.
1785 if (pvr_version_is(PVR_POWER4
) || pvr_version_is(PVR_POWER4p
))
1786 minsize
= 8UL << 20;
1788 minsize
= 4UL << 20;
1790 /* Align to the greater of the align or size */
1791 align
= max(minalign
, minsize
);
1792 base
= alloc_down(minsize
, align
, 1);
1794 prom_panic("ERROR, cannot find space for TCE table.\n");
1795 if (base
< local_alloc_bottom
)
1796 local_alloc_bottom
= base
;
1798 /* It seems OF doesn't null-terminate the path :-( */
1799 memset(path
, 0, PROM_SCRATCH_SIZE
);
1800 /* Call OF to setup the TCE hardware */
1801 if (call_prom("package-to-path", 3, 1, node
,
1802 path
, PROM_SCRATCH_SIZE
-1) == PROM_ERROR
) {
1803 prom_printf("package-to-path failed\n");
1806 /* Save away the TCE table attributes for later use. */
1807 prom_setprop(node
, path
, "linux,tce-base", &base
, sizeof(base
));
1808 prom_setprop(node
, path
, "linux,tce-size", &minsize
, sizeof(minsize
));
1810 prom_debug("TCE table: %s\n", path
);
1811 prom_debug("\tnode = 0x%x\n", node
);
1812 prom_debug("\tbase = 0x%x\n", base
);
1813 prom_debug("\tsize = 0x%x\n", minsize
);
1815 /* Initialize the table to have a one-to-one mapping
1816 * over the allocated size.
1818 tce_entryp
= (u64
*)base
;
1819 for (i
= 0; i
< (minsize
>> 3) ;tce_entryp
++, i
++) {
1820 tce_entry
= (i
<< PAGE_SHIFT
);
1822 *tce_entryp
= tce_entry
;
1825 prom_printf("opening PHB %s", path
);
1826 phb_node
= call_prom("open", 1, 1, path
);
1828 prom_printf("... failed\n");
1830 prom_printf("... done\n");
1832 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1833 phb_node
, -1, minsize
,
1834 (u32
) base
, (u32
) (base
>> 32));
1835 call_prom("close", 1, 0, phb_node
);
1838 reserve_mem(local_alloc_bottom
, local_alloc_top
- local_alloc_bottom
);
1840 /* These are only really needed if there is a memory limit in
1841 * effect, but we don't know so export them always. */
1842 prom_tce_alloc_start
= local_alloc_bottom
;
1843 prom_tce_alloc_end
= local_alloc_top
;
1845 /* Flag the first invalid entry */
1846 prom_debug("ending prom_initialize_tce_table\n");
1848 #endif /* __BIG_ENDIAN__ */
1849 #endif /* CONFIG_PPC64 */
1852 * With CHRP SMP we need to use the OF to start the other processors.
1853 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1854 * so we have to put the processors into a holding pattern controlled
1855 * by the kernel (not OF) before we destroy the OF.
1857 * This uses a chunk of low memory, puts some holding pattern
1858 * code there and sends the other processors off to there until
1859 * smp_boot_cpus tells them to do something. The holding pattern
1860 * checks that address until its cpu # is there, when it is that
1861 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1862 * of setting those values.
1864 * We also use physical address 0x4 here to tell when a cpu
1865 * is in its holding pattern code.
1870 * We want to reference the copy of __secondary_hold_* in the
1871 * 0 - 0x100 address range
1873 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1875 static void __init
prom_hold_cpus(void)
1880 unsigned long *spinloop
1881 = (void *) LOW_ADDR(__secondary_hold_spinloop
);
1882 unsigned long *acknowledge
1883 = (void *) LOW_ADDR(__secondary_hold_acknowledge
);
1884 unsigned long secondary_hold
= LOW_ADDR(__secondary_hold
);
1887 * On pseries, if RTAS supports "query-cpu-stopped-state",
1888 * we skip this stage, the CPUs will be started by the
1889 * kernel using RTAS.
1891 if ((of_platform
== PLATFORM_PSERIES
||
1892 of_platform
== PLATFORM_PSERIES_LPAR
) &&
1893 rtas_has_query_cpu_stopped
) {
1894 prom_printf("prom_hold_cpus: skipped\n");
1898 prom_debug("prom_hold_cpus: start...\n");
1899 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop
);
1900 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop
);
1901 prom_debug(" 1) acknowledge = 0x%x\n",
1902 (unsigned long)acknowledge
);
1903 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge
);
1904 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold
);
1906 /* Set the common spinloop variable, so all of the secondary cpus
1907 * will block when they are awakened from their OF spinloop.
1908 * This must occur for both SMP and non SMP kernels, since OF will
1909 * be trashed when we move the kernel.
1914 for (node
= 0; prom_next_node(&node
); ) {
1915 unsigned int cpu_no
;
1919 prom_getprop(node
, "device_type", type
, sizeof(type
));
1920 if (strcmp(type
, "cpu") != 0)
1923 /* Skip non-configured cpus. */
1924 if (prom_getprop(node
, "status", type
, sizeof(type
)) > 0)
1925 if (strcmp(type
, "okay") != 0)
1928 reg
= cpu_to_be32(-1); /* make sparse happy */
1929 prom_getprop(node
, "reg", ®
, sizeof(reg
));
1930 cpu_no
= be32_to_cpu(reg
);
1932 prom_debug("cpu hw idx = %lu\n", cpu_no
);
1934 /* Init the acknowledge var which will be reset by
1935 * the secondary cpu when it awakens from its OF
1938 *acknowledge
= (unsigned long)-1;
1940 if (cpu_no
!= prom
.cpu
) {
1941 /* Primary Thread of non-boot cpu or any thread */
1942 prom_printf("starting cpu hw idx %lu... ", cpu_no
);
1943 call_prom("start-cpu", 3, 0, node
,
1944 secondary_hold
, cpu_no
);
1946 for (i
= 0; (i
< 100000000) &&
1947 (*acknowledge
== ((unsigned long)-1)); i
++ )
1950 if (*acknowledge
== cpu_no
)
1951 prom_printf("done\n");
1953 prom_printf("failed: %x\n", *acknowledge
);
1957 prom_printf("boot cpu hw idx %lu\n", cpu_no
);
1958 #endif /* CONFIG_SMP */
1961 prom_debug("prom_hold_cpus: end...\n");
1965 static void __init
prom_init_client_services(unsigned long pp
)
1967 /* Get a handle to the prom entry point before anything else */
1970 /* get a handle for the stdout device */
1971 prom
.chosen
= call_prom("finddevice", 1, 1, ADDR("/chosen"));
1972 if (!PHANDLE_VALID(prom
.chosen
))
1973 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1975 /* get device tree root */
1976 prom
.root
= call_prom("finddevice", 1, 1, ADDR("/"));
1977 if (!PHANDLE_VALID(prom
.root
))
1978 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1985 * For really old powermacs, we need to map things we claim.
1986 * For that, we need the ihandle of the mmu.
1987 * Also, on the longtrail, we need to work around other bugs.
1989 static void __init
prom_find_mmu(void)
1994 oprom
= call_prom("finddevice", 1, 1, ADDR("/openprom"));
1995 if (!PHANDLE_VALID(oprom
))
1997 if (prom_getprop(oprom
, "model", version
, sizeof(version
)) <= 0)
1999 version
[sizeof(version
) - 1] = 0;
2000 /* XXX might need to add other versions here */
2001 if (strcmp(version
, "Open Firmware, 1.0.5") == 0)
2002 of_workarounds
= OF_WA_CLAIM
;
2003 else if (strncmp(version
, "FirmWorks,3.", 12) == 0) {
2004 of_workarounds
= OF_WA_CLAIM
| OF_WA_LONGTRAIL
;
2005 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2008 prom
.memory
= call_prom("open", 1, 1, ADDR("/memory"));
2009 prom_getprop(prom
.chosen
, "mmu", &prom
.mmumap
,
2010 sizeof(prom
.mmumap
));
2011 prom
.mmumap
= be32_to_cpu(prom
.mmumap
);
2012 if (!IHANDLE_VALID(prom
.memory
) || !IHANDLE_VALID(prom
.mmumap
))
2013 of_workarounds
&= ~OF_WA_CLAIM
; /* hmmm */
2016 #define prom_find_mmu()
2019 static void __init
prom_init_stdout(void)
2021 char *path
= of_stdout_device
;
2023 phandle stdout_node
;
2026 if (prom_getprop(prom
.chosen
, "stdout", &val
, sizeof(val
)) <= 0)
2027 prom_panic("cannot find stdout");
2029 prom
.stdout
= be32_to_cpu(val
);
2031 /* Get the full OF pathname of the stdout device */
2032 memset(path
, 0, 256);
2033 call_prom("instance-to-path", 3, 1, prom
.stdout
, path
, 255);
2034 prom_printf("OF stdout device is: %s\n", of_stdout_device
);
2035 prom_setprop(prom
.chosen
, "/chosen", "linux,stdout-path",
2036 path
, strlen(path
) + 1);
2038 /* instance-to-package fails on PA-Semi */
2039 stdout_node
= call_prom("instance-to-package", 1, 1, prom
.stdout
);
2040 if (stdout_node
!= PROM_ERROR
) {
2041 val
= cpu_to_be32(stdout_node
);
2042 prom_setprop(prom
.chosen
, "/chosen", "linux,stdout-package",
2045 /* If it's a display, note it */
2046 memset(type
, 0, sizeof(type
));
2047 prom_getprop(stdout_node
, "device_type", type
, sizeof(type
));
2048 if (strcmp(type
, "display") == 0)
2049 prom_setprop(stdout_node
, path
, "linux,boot-display", NULL
, 0);
2053 static int __init
prom_find_machine_type(void)
2062 /* Look for a PowerMac or a Cell */
2063 len
= prom_getprop(prom
.root
, "compatible",
2064 compat
, sizeof(compat
)-1);
2068 char *p
= &compat
[i
];
2072 if (strstr(p
, "Power Macintosh") ||
2073 strstr(p
, "MacRISC"))
2074 return PLATFORM_POWERMAC
;
2076 /* We must make sure we don't detect the IBM Cell
2077 * blades as pSeries due to some firmware issues,
2080 if (strstr(p
, "IBM,CBEA") ||
2081 strstr(p
, "IBM,CPBW-1.0"))
2082 return PLATFORM_GENERIC
;
2083 #endif /* CONFIG_PPC64 */
2088 /* Try to detect OPAL */
2089 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2090 return PLATFORM_OPAL
;
2092 /* Try to figure out if it's an IBM pSeries or any other
2093 * PAPR compliant platform. We assume it is if :
2094 * - /device_type is "chrp" (please, do NOT use that for future
2098 len
= prom_getprop(prom
.root
, "device_type",
2099 compat
, sizeof(compat
)-1);
2101 return PLATFORM_GENERIC
;
2102 if (strcmp(compat
, "chrp"))
2103 return PLATFORM_GENERIC
;
2105 /* Default to pSeries. We need to know if we are running LPAR */
2106 rtas
= call_prom("finddevice", 1, 1, ADDR("/rtas"));
2107 if (!PHANDLE_VALID(rtas
))
2108 return PLATFORM_GENERIC
;
2109 x
= prom_getproplen(rtas
, "ibm,hypertas-functions");
2110 if (x
!= PROM_ERROR
) {
2111 prom_debug("Hypertas detected, assuming LPAR !\n");
2112 return PLATFORM_PSERIES_LPAR
;
2114 return PLATFORM_PSERIES
;
2116 return PLATFORM_GENERIC
;
2120 static int __init
prom_set_color(ihandle ih
, int i
, int r
, int g
, int b
)
2122 return call_prom("call-method", 6, 1, ADDR("color!"), ih
, i
, b
, g
, r
);
2126 * If we have a display that we don't know how to drive,
2127 * we will want to try to execute OF's open method for it
2128 * later. However, OF will probably fall over if we do that
2129 * we've taken over the MMU.
2130 * So we check whether we will need to open the display,
2131 * and if so, open it now.
2133 static void __init
prom_check_displays(void)
2135 char type
[16], *path
;
2140 static unsigned char default_colors
[] = {
2158 const unsigned char *clut
;
2160 prom_debug("Looking for displays\n");
2161 for (node
= 0; prom_next_node(&node
); ) {
2162 memset(type
, 0, sizeof(type
));
2163 prom_getprop(node
, "device_type", type
, sizeof(type
));
2164 if (strcmp(type
, "display") != 0)
2167 /* It seems OF doesn't null-terminate the path :-( */
2168 path
= prom_scratch
;
2169 memset(path
, 0, PROM_SCRATCH_SIZE
);
2172 * leave some room at the end of the path for appending extra
2175 if (call_prom("package-to-path", 3, 1, node
, path
,
2176 PROM_SCRATCH_SIZE
-10) == PROM_ERROR
)
2178 prom_printf("found display : %s, opening... ", path
);
2180 ih
= call_prom("open", 1, 1, path
);
2182 prom_printf("failed\n");
2187 prom_printf("done\n");
2188 prom_setprop(node
, path
, "linux,opened", NULL
, 0);
2190 /* Setup a usable color table when the appropriate
2191 * method is available. Should update this to set-colors */
2192 clut
= default_colors
;
2193 for (i
= 0; i
< 16; i
++, clut
+= 3)
2194 if (prom_set_color(ih
, i
, clut
[0], clut
[1],
2198 #ifdef CONFIG_LOGO_LINUX_CLUT224
2199 clut
= PTRRELOC(logo_linux_clut224
.clut
);
2200 for (i
= 0; i
< logo_linux_clut224
.clutsize
; i
++, clut
+= 3)
2201 if (prom_set_color(ih
, i
+ 32, clut
[0], clut
[1],
2204 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2206 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2207 if (prom_getprop(node
, "linux,boot-display", NULL
, 0) !=
2209 u32 width
, height
, pitch
, addr
;
2211 prom_printf("Setting btext !\n");
2212 prom_getprop(node
, "width", &width
, 4);
2213 prom_getprop(node
, "height", &height
, 4);
2214 prom_getprop(node
, "linebytes", &pitch
, 4);
2215 prom_getprop(node
, "address", &addr
, 4);
2216 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2217 width
, height
, pitch
, addr
);
2218 btext_setup_display(width
, height
, 8, pitch
, addr
);
2220 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2225 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2226 static void __init
*make_room(unsigned long *mem_start
, unsigned long *mem_end
,
2227 unsigned long needed
, unsigned long align
)
2231 *mem_start
= _ALIGN(*mem_start
, align
);
2232 while ((*mem_start
+ needed
) > *mem_end
) {
2233 unsigned long room
, chunk
;
2235 prom_debug("Chunk exhausted, claiming more at %x...\n",
2237 room
= alloc_top
- alloc_bottom
;
2238 if (room
> DEVTREE_CHUNK_SIZE
)
2239 room
= DEVTREE_CHUNK_SIZE
;
2240 if (room
< PAGE_SIZE
)
2241 prom_panic("No memory for flatten_device_tree "
2243 chunk
= alloc_up(room
, 0);
2245 prom_panic("No memory for flatten_device_tree "
2246 "(claim failed)\n");
2247 *mem_end
= chunk
+ room
;
2250 ret
= (void *)*mem_start
;
2251 *mem_start
+= needed
;
2256 #define dt_push_token(token, mem_start, mem_end) do { \
2257 void *room = make_room(mem_start, mem_end, 4, 4); \
2258 *(__be32 *)room = cpu_to_be32(token); \
2261 static unsigned long __init
dt_find_string(char *str
)
2265 s
= os
= (char *)dt_string_start
;
2267 while (s
< (char *)dt_string_end
) {
2268 if (strcmp(s
, str
) == 0)
2276 * The Open Firmware 1275 specification states properties must be 31 bytes or
2277 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2279 #define MAX_PROPERTY_NAME 64
2281 static void __init
scan_dt_build_strings(phandle node
,
2282 unsigned long *mem_start
,
2283 unsigned long *mem_end
)
2285 char *prev_name
, *namep
, *sstart
;
2289 sstart
= (char *)dt_string_start
;
2291 /* get and store all property names */
2294 /* 64 is max len of name including nul. */
2295 namep
= make_room(mem_start
, mem_end
, MAX_PROPERTY_NAME
, 1);
2296 if (call_prom("nextprop", 3, 1, node
, prev_name
, namep
) != 1) {
2297 /* No more nodes: unwind alloc */
2298 *mem_start
= (unsigned long)namep
;
2303 if (strcmp(namep
, "name") == 0) {
2304 *mem_start
= (unsigned long)namep
;
2308 /* get/create string entry */
2309 soff
= dt_find_string(namep
);
2311 *mem_start
= (unsigned long)namep
;
2312 namep
= sstart
+ soff
;
2314 /* Trim off some if we can */
2315 *mem_start
= (unsigned long)namep
+ strlen(namep
) + 1;
2316 dt_string_end
= *mem_start
;
2321 /* do all our children */
2322 child
= call_prom("child", 1, 1, node
);
2323 while (child
!= 0) {
2324 scan_dt_build_strings(child
, mem_start
, mem_end
);
2325 child
= call_prom("peer", 1, 1, child
);
2329 static void __init
scan_dt_build_struct(phandle node
, unsigned long *mem_start
,
2330 unsigned long *mem_end
)
2333 char *namep
, *prev_name
, *sstart
, *p
, *ep
, *lp
, *path
;
2335 unsigned char *valp
;
2336 static char pname
[MAX_PROPERTY_NAME
];
2337 int l
, room
, has_phandle
= 0;
2339 dt_push_token(OF_DT_BEGIN_NODE
, mem_start
, mem_end
);
2341 /* get the node's full name */
2342 namep
= (char *)*mem_start
;
2343 room
= *mem_end
- *mem_start
;
2346 l
= call_prom("package-to-path", 3, 1, node
, namep
, room
);
2348 /* Didn't fit? Get more room. */
2350 if (l
>= *mem_end
- *mem_start
)
2351 namep
= make_room(mem_start
, mem_end
, l
+1, 1);
2352 call_prom("package-to-path", 3, 1, node
, namep
, l
);
2356 /* Fixup an Apple bug where they have bogus \0 chars in the
2357 * middle of the path in some properties, and extract
2358 * the unit name (everything after the last '/').
2360 for (lp
= p
= namep
, ep
= namep
+ l
; p
< ep
; p
++) {
2367 *mem_start
= _ALIGN((unsigned long)lp
+ 1, 4);
2370 /* get it again for debugging */
2371 path
= prom_scratch
;
2372 memset(path
, 0, PROM_SCRATCH_SIZE
);
2373 call_prom("package-to-path", 3, 1, node
, path
, PROM_SCRATCH_SIZE
-1);
2375 /* get and store all properties */
2377 sstart
= (char *)dt_string_start
;
2379 if (call_prom("nextprop", 3, 1, node
, prev_name
,
2384 if (strcmp(pname
, "name") == 0) {
2389 /* find string offset */
2390 soff
= dt_find_string(pname
);
2392 prom_printf("WARNING: Can't find string index for"
2393 " <%s>, node %s\n", pname
, path
);
2396 prev_name
= sstart
+ soff
;
2399 l
= call_prom("getproplen", 2, 1, node
, pname
);
2402 if (l
== PROM_ERROR
)
2405 /* push property head */
2406 dt_push_token(OF_DT_PROP
, mem_start
, mem_end
);
2407 dt_push_token(l
, mem_start
, mem_end
);
2408 dt_push_token(soff
, mem_start
, mem_end
);
2410 /* push property content */
2411 valp
= make_room(mem_start
, mem_end
, l
, 4);
2412 call_prom("getprop", 4, 1, node
, pname
, valp
, l
);
2413 *mem_start
= _ALIGN(*mem_start
, 4);
2415 if (!strcmp(pname
, "phandle"))
2419 /* Add a "linux,phandle" property if no "phandle" property already
2420 * existed (can happen with OPAL)
2423 soff
= dt_find_string("linux,phandle");
2425 prom_printf("WARNING: Can't find string index for"
2426 " <linux-phandle> node %s\n", path
);
2428 dt_push_token(OF_DT_PROP
, mem_start
, mem_end
);
2429 dt_push_token(4, mem_start
, mem_end
);
2430 dt_push_token(soff
, mem_start
, mem_end
);
2431 valp
= make_room(mem_start
, mem_end
, 4, 4);
2432 *(__be32
*)valp
= cpu_to_be32(node
);
2436 /* do all our children */
2437 child
= call_prom("child", 1, 1, node
);
2438 while (child
!= 0) {
2439 scan_dt_build_struct(child
, mem_start
, mem_end
);
2440 child
= call_prom("peer", 1, 1, child
);
2443 dt_push_token(OF_DT_END_NODE
, mem_start
, mem_end
);
2446 static void __init
flatten_device_tree(void)
2449 unsigned long mem_start
, mem_end
, room
;
2450 struct boot_param_header
*hdr
;
2455 * Check how much room we have between alloc top & bottom (+/- a
2456 * few pages), crop to 1MB, as this is our "chunk" size
2458 room
= alloc_top
- alloc_bottom
- 0x4000;
2459 if (room
> DEVTREE_CHUNK_SIZE
)
2460 room
= DEVTREE_CHUNK_SIZE
;
2461 prom_debug("starting device tree allocs at %x\n", alloc_bottom
);
2463 /* Now try to claim that */
2464 mem_start
= (unsigned long)alloc_up(room
, PAGE_SIZE
);
2466 prom_panic("Can't allocate initial device-tree chunk\n");
2467 mem_end
= mem_start
+ room
;
2469 /* Get root of tree */
2470 root
= call_prom("peer", 1, 1, (phandle
)0);
2471 if (root
== (phandle
)0)
2472 prom_panic ("couldn't get device tree root\n");
2474 /* Build header and make room for mem rsv map */
2475 mem_start
= _ALIGN(mem_start
, 4);
2476 hdr
= make_room(&mem_start
, &mem_end
,
2477 sizeof(struct boot_param_header
), 4);
2478 dt_header_start
= (unsigned long)hdr
;
2479 rsvmap
= make_room(&mem_start
, &mem_end
, sizeof(mem_reserve_map
), 8);
2481 /* Start of strings */
2482 mem_start
= PAGE_ALIGN(mem_start
);
2483 dt_string_start
= mem_start
;
2484 mem_start
+= 4; /* hole */
2486 /* Add "linux,phandle" in there, we'll need it */
2487 namep
= make_room(&mem_start
, &mem_end
, 16, 1);
2488 strcpy(namep
, "linux,phandle");
2489 mem_start
= (unsigned long)namep
+ strlen(namep
) + 1;
2491 /* Build string array */
2492 prom_printf("Building dt strings...\n");
2493 scan_dt_build_strings(root
, &mem_start
, &mem_end
);
2494 dt_string_end
= mem_start
;
2496 /* Build structure */
2497 mem_start
= PAGE_ALIGN(mem_start
);
2498 dt_struct_start
= mem_start
;
2499 prom_printf("Building dt structure...\n");
2500 scan_dt_build_struct(root
, &mem_start
, &mem_end
);
2501 dt_push_token(OF_DT_END
, &mem_start
, &mem_end
);
2502 dt_struct_end
= PAGE_ALIGN(mem_start
);
2505 hdr
->boot_cpuid_phys
= cpu_to_be32(prom
.cpu
);
2506 hdr
->magic
= cpu_to_be32(OF_DT_HEADER
);
2507 hdr
->totalsize
= cpu_to_be32(dt_struct_end
- dt_header_start
);
2508 hdr
->off_dt_struct
= cpu_to_be32(dt_struct_start
- dt_header_start
);
2509 hdr
->off_dt_strings
= cpu_to_be32(dt_string_start
- dt_header_start
);
2510 hdr
->dt_strings_size
= cpu_to_be32(dt_string_end
- dt_string_start
);
2511 hdr
->off_mem_rsvmap
= cpu_to_be32(((unsigned long)rsvmap
) - dt_header_start
);
2512 hdr
->version
= cpu_to_be32(OF_DT_VERSION
);
2513 /* Version 16 is not backward compatible */
2514 hdr
->last_comp_version
= cpu_to_be32(0x10);
2516 /* Copy the reserve map in */
2517 memcpy(rsvmap
, mem_reserve_map
, sizeof(mem_reserve_map
));
2522 prom_printf("reserved memory map:\n");
2523 for (i
= 0; i
< mem_reserve_cnt
; i
++)
2524 prom_printf(" %x - %x\n",
2525 be64_to_cpu(mem_reserve_map
[i
].base
),
2526 be64_to_cpu(mem_reserve_map
[i
].size
));
2529 /* Bump mem_reserve_cnt to cause further reservations to fail
2530 * since it's too late.
2532 mem_reserve_cnt
= MEM_RESERVE_MAP_SIZE
;
2534 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2535 dt_string_start
, dt_string_end
);
2536 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2537 dt_struct_start
, dt_struct_end
);
2540 #ifdef CONFIG_PPC_MAPLE
2541 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2542 * The values are bad, and it doesn't even have the right number of cells. */
2543 static void __init
fixup_device_tree_maple(void)
2546 u32 rloc
= 0x01002000; /* IO space; PCI device = 4 */
2550 name
= "/ht@0/isa@4";
2551 isa
= call_prom("finddevice", 1, 1, ADDR(name
));
2552 if (!PHANDLE_VALID(isa
)) {
2553 name
= "/ht@0/isa@6";
2554 isa
= call_prom("finddevice", 1, 1, ADDR(name
));
2555 rloc
= 0x01003000; /* IO space; PCI device = 6 */
2557 if (!PHANDLE_VALID(isa
))
2560 if (prom_getproplen(isa
, "ranges") != 12)
2562 if (prom_getprop(isa
, "ranges", isa_ranges
, sizeof(isa_ranges
))
2566 if (isa_ranges
[0] != 0x1 ||
2567 isa_ranges
[1] != 0xf4000000 ||
2568 isa_ranges
[2] != 0x00010000)
2571 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2573 isa_ranges
[0] = 0x1;
2574 isa_ranges
[1] = 0x0;
2575 isa_ranges
[2] = rloc
;
2576 isa_ranges
[3] = 0x0;
2577 isa_ranges
[4] = 0x0;
2578 isa_ranges
[5] = 0x00010000;
2579 prom_setprop(isa
, name
, "ranges",
2580 isa_ranges
, sizeof(isa_ranges
));
2583 #define CPC925_MC_START 0xf8000000
2584 #define CPC925_MC_LENGTH 0x1000000
2585 /* The values for memory-controller don't have right number of cells */
2586 static void __init
fixup_device_tree_maple_memory_controller(void)
2590 char *name
= "/hostbridge@f8000000";
2593 mc
= call_prom("finddevice", 1, 1, ADDR(name
));
2594 if (!PHANDLE_VALID(mc
))
2597 if (prom_getproplen(mc
, "reg") != 8)
2600 prom_getprop(prom
.root
, "#address-cells", &ac
, sizeof(ac
));
2601 prom_getprop(prom
.root
, "#size-cells", &sc
, sizeof(sc
));
2602 if ((ac
!= 2) || (sc
!= 2))
2605 if (prom_getprop(mc
, "reg", mc_reg
, sizeof(mc_reg
)) == PROM_ERROR
)
2608 if (mc_reg
[0] != CPC925_MC_START
|| mc_reg
[1] != CPC925_MC_LENGTH
)
2611 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2614 mc_reg
[1] = CPC925_MC_START
;
2616 mc_reg
[3] = CPC925_MC_LENGTH
;
2617 prom_setprop(mc
, name
, "reg", mc_reg
, sizeof(mc_reg
));
2620 #define fixup_device_tree_maple()
2621 #define fixup_device_tree_maple_memory_controller()
2624 #ifdef CONFIG_PPC_CHRP
2626 * Pegasos and BriQ lacks the "ranges" property in the isa node
2627 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2628 * Pegasos has the IDE configured in legacy mode, but advertised as native
2630 static void __init
fixup_device_tree_chrp(void)
2634 u32 rloc
= 0x01006000; /* IO space; PCI device = 12 */
2638 name
= "/pci@80000000/isa@c";
2639 ph
= call_prom("finddevice", 1, 1, ADDR(name
));
2640 if (!PHANDLE_VALID(ph
)) {
2641 name
= "/pci@ff500000/isa@6";
2642 ph
= call_prom("finddevice", 1, 1, ADDR(name
));
2643 rloc
= 0x01003000; /* IO space; PCI device = 6 */
2645 if (PHANDLE_VALID(ph
)) {
2646 rc
= prom_getproplen(ph
, "ranges");
2647 if (rc
== 0 || rc
== PROM_ERROR
) {
2648 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2655 prop
[5] = 0x00010000;
2656 prom_setprop(ph
, name
, "ranges", prop
, sizeof(prop
));
2660 name
= "/pci@80000000/ide@C,1";
2661 ph
= call_prom("finddevice", 1, 1, ADDR(name
));
2662 if (PHANDLE_VALID(ph
)) {
2663 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2666 prom_setprop(ph
, name
, "interrupts", prop
, 2*sizeof(u32
));
2667 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2668 rc
= prom_getprop(ph
, "class-code", prop
, sizeof(u32
));
2669 if (rc
== sizeof(u32
)) {
2671 prom_setprop(ph
, name
, "class-code", prop
, sizeof(u32
));
2676 #define fixup_device_tree_chrp()
2679 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2680 static void __init
fixup_device_tree_pmac(void)
2682 phandle u3
, i2c
, mpic
;
2687 /* Some G5s have a missing interrupt definition, fix it up here */
2688 u3
= call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2689 if (!PHANDLE_VALID(u3
))
2691 i2c
= call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2692 if (!PHANDLE_VALID(i2c
))
2694 mpic
= call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2695 if (!PHANDLE_VALID(mpic
))
2698 /* check if proper rev of u3 */
2699 if (prom_getprop(u3
, "device-rev", &u3_rev
, sizeof(u3_rev
))
2702 if (u3_rev
< 0x35 || u3_rev
> 0x39)
2704 /* does it need fixup ? */
2705 if (prom_getproplen(i2c
, "interrupts") > 0)
2708 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2710 /* interrupt on this revision of u3 is number 0 and level */
2713 prom_setprop(i2c
, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2714 &interrupts
, sizeof(interrupts
));
2716 prom_setprop(i2c
, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2717 &parent
, sizeof(parent
));
2720 #define fixup_device_tree_pmac()
2723 #ifdef CONFIG_PPC_EFIKA
2725 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2726 * to talk to the phy. If the phy-handle property is missing, then this
2727 * function is called to add the appropriate nodes and link it to the
2730 static void __init
fixup_device_tree_efika_add_phy(void)
2736 /* Check if /builtin/ethernet exists - bail if it doesn't */
2737 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2738 if (!PHANDLE_VALID(node
))
2741 /* Check if the phy-handle property exists - bail if it does */
2742 rv
= prom_getprop(node
, "phy-handle", prop
, sizeof(prop
));
2747 * At this point the ethernet device doesn't have a phy described.
2748 * Now we need to add the missing phy node and linkage
2751 /* Check for an MDIO bus node - if missing then create one */
2752 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2753 if (!PHANDLE_VALID(node
)) {
2754 prom_printf("Adding Ethernet MDIO node\n");
2755 call_prom("interpret", 1, 1,
2756 " s\" /builtin\" find-device"
2758 " 1 encode-int s\" #address-cells\" property"
2759 " 0 encode-int s\" #size-cells\" property"
2760 " s\" mdio\" device-name"
2761 " s\" fsl,mpc5200b-mdio\" encode-string"
2762 " s\" compatible\" property"
2763 " 0xf0003000 0x400 reg"
2765 " 0x5 encode-int encode+"
2766 " 0x3 encode-int encode+"
2767 " s\" interrupts\" property"
2771 /* Check for a PHY device node - if missing then create one and
2772 * give it's phandle to the ethernet node */
2773 node
= call_prom("finddevice", 1, 1,
2774 ADDR("/builtin/mdio/ethernet-phy"));
2775 if (!PHANDLE_VALID(node
)) {
2776 prom_printf("Adding Ethernet PHY node\n");
2777 call_prom("interpret", 1, 1,
2778 " s\" /builtin/mdio\" find-device"
2780 " s\" ethernet-phy\" device-name"
2781 " 0x10 encode-int s\" reg\" property"
2785 " s\" /builtin/ethernet\" find-device"
2787 " s\" phy-handle\" property"
2792 static void __init
fixup_device_tree_efika(void)
2794 int sound_irq
[3] = { 2, 2, 0 };
2795 int bcomm_irq
[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2796 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2797 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2798 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2803 /* Check if we're really running on a EFIKA */
2804 node
= call_prom("finddevice", 1, 1, ADDR("/"));
2805 if (!PHANDLE_VALID(node
))
2808 rv
= prom_getprop(node
, "model", prop
, sizeof(prop
));
2809 if (rv
== PROM_ERROR
)
2811 if (strcmp(prop
, "EFIKA5K2"))
2814 prom_printf("Applying EFIKA device tree fixups\n");
2816 /* Claiming to be 'chrp' is death */
2817 node
= call_prom("finddevice", 1, 1, ADDR("/"));
2818 rv
= prom_getprop(node
, "device_type", prop
, sizeof(prop
));
2819 if (rv
!= PROM_ERROR
&& (strcmp(prop
, "chrp") == 0))
2820 prom_setprop(node
, "/", "device_type", "efika", sizeof("efika"));
2822 /* CODEGEN,description is exposed in /proc/cpuinfo so
2824 rv
= prom_getprop(node
, "CODEGEN,description", prop
, sizeof(prop
));
2825 if (rv
!= PROM_ERROR
&& (strstr(prop
, "CHRP")))
2826 prom_setprop(node
, "/", "CODEGEN,description",
2827 "Efika 5200B PowerPC System",
2828 sizeof("Efika 5200B PowerPC System"));
2830 /* Fixup bestcomm interrupts property */
2831 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2832 if (PHANDLE_VALID(node
)) {
2833 len
= prom_getproplen(node
, "interrupts");
2835 prom_printf("Fixing bestcomm interrupts property\n");
2836 prom_setprop(node
, "/builtin/bestcom", "interrupts",
2837 bcomm_irq
, sizeof(bcomm_irq
));
2841 /* Fixup sound interrupts property */
2842 node
= call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2843 if (PHANDLE_VALID(node
)) {
2844 rv
= prom_getprop(node
, "interrupts", prop
, sizeof(prop
));
2845 if (rv
== PROM_ERROR
) {
2846 prom_printf("Adding sound interrupts property\n");
2847 prom_setprop(node
, "/builtin/sound", "interrupts",
2848 sound_irq
, sizeof(sound_irq
));
2852 /* Make sure ethernet phy-handle property exists */
2853 fixup_device_tree_efika_add_phy();
2856 #define fixup_device_tree_efika()
2859 #ifdef CONFIG_PPC_PASEMI_NEMO
2861 * CFE supplied on Nemo is broken in several ways, biggest
2862 * problem is that it reassigns ISA interrupts to unused mpic ints.
2863 * Add an interrupt-controller property for the io-bridge to use
2864 * and correct the ints so we can attach them to an irq_domain
2866 static void __init
fixup_device_tree_pasemi(void)
2868 u32 interrupts
[2], parent
, rval
, val
= 0;
2869 char *name
, *pci_name
;
2872 /* Find the root pci node */
2873 name
= "/pxp@0,e0000000";
2874 iob
= call_prom("finddevice", 1, 1, ADDR(name
));
2875 if (!PHANDLE_VALID(iob
))
2878 /* check if interrupt-controller node set yet */
2879 if (prom_getproplen(iob
, "interrupt-controller") !=PROM_ERROR
)
2882 prom_printf("adding interrupt-controller property for SB600...\n");
2884 prom_setprop(iob
, name
, "interrupt-controller", &val
, 0);
2886 pci_name
= "/pxp@0,e0000000/pci@11";
2887 node
= call_prom("finddevice", 1, 1, ADDR(pci_name
));
2890 for( ; prom_next_node(&node
); ) {
2891 /* scan each node for one with an interrupt */
2892 if (!PHANDLE_VALID(node
))
2895 rval
= prom_getproplen(node
, "interrupts");
2896 if (rval
== 0 || rval
== PROM_ERROR
)
2899 prom_getprop(node
, "interrupts", &interrupts
, sizeof(interrupts
));
2900 if ((interrupts
[0] < 212) || (interrupts
[0] > 222))
2903 /* found a node, update both interrupts and interrupt-parent */
2904 if ((interrupts
[0] >= 212) && (interrupts
[0] <= 215))
2905 interrupts
[0] -= 203;
2906 if ((interrupts
[0] >= 216) && (interrupts
[0] <= 220))
2907 interrupts
[0] -= 213;
2908 if (interrupts
[0] == 221)
2910 if (interrupts
[0] == 222)
2913 prom_setprop(node
, pci_name
, "interrupts", interrupts
,
2914 sizeof(interrupts
));
2915 prom_setprop(node
, pci_name
, "interrupt-parent", &parent
,
2920 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2921 * so that generic isa-bridge code can add the SB600 and its on-board
2924 name
= "/pxp@0,e0000000/io-bridge@0";
2925 iob
= call_prom("finddevice", 1, 1, ADDR(name
));
2926 if (!PHANDLE_VALID(iob
))
2929 /* device_type is already set, just change it. */
2931 prom_printf("Changing device_type of SB600 node...\n");
2933 prom_setprop(iob
, name
, "device_type", "isa", sizeof("isa"));
2935 #else /* !CONFIG_PPC_PASEMI_NEMO */
2936 static inline void fixup_device_tree_pasemi(void) { }
2939 static void __init
fixup_device_tree(void)
2941 fixup_device_tree_maple();
2942 fixup_device_tree_maple_memory_controller();
2943 fixup_device_tree_chrp();
2944 fixup_device_tree_pmac();
2945 fixup_device_tree_efika();
2946 fixup_device_tree_pasemi();
2949 static void __init
prom_find_boot_cpu(void)
2956 if (prom_getprop(prom
.chosen
, "cpu", &rval
, sizeof(rval
)) <= 0)
2958 prom_cpu
= be32_to_cpu(rval
);
2960 cpu_pkg
= call_prom("instance-to-package", 1, 1, prom_cpu
);
2962 if (!PHANDLE_VALID(cpu_pkg
))
2965 prom_getprop(cpu_pkg
, "reg", &rval
, sizeof(rval
));
2966 prom
.cpu
= be32_to_cpu(rval
);
2968 prom_debug("Booting CPU hw index = %lu\n", prom
.cpu
);
2971 static void __init
prom_check_initrd(unsigned long r3
, unsigned long r4
)
2973 #ifdef CONFIG_BLK_DEV_INITRD
2974 if (r3
&& r4
&& r4
!= 0xdeadbeef) {
2977 prom_initrd_start
= is_kernel_addr(r3
) ? __pa(r3
) : r3
;
2978 prom_initrd_end
= prom_initrd_start
+ r4
;
2980 val
= cpu_to_be64(prom_initrd_start
);
2981 prom_setprop(prom
.chosen
, "/chosen", "linux,initrd-start",
2983 val
= cpu_to_be64(prom_initrd_end
);
2984 prom_setprop(prom
.chosen
, "/chosen", "linux,initrd-end",
2987 reserve_mem(prom_initrd_start
,
2988 prom_initrd_end
- prom_initrd_start
);
2990 prom_debug("initrd_start=0x%x\n", prom_initrd_start
);
2991 prom_debug("initrd_end=0x%x\n", prom_initrd_end
);
2993 #endif /* CONFIG_BLK_DEV_INITRD */
2997 #ifdef CONFIG_RELOCATABLE
2998 static void reloc_toc(void)
3002 static void unreloc_toc(void)
3006 static void __reloc_toc(unsigned long offset
, unsigned long nr_entries
)
3009 unsigned long *toc_entry
;
3011 /* Get the start of the TOC by using r2 directly. */
3012 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry
));
3014 for (i
= 0; i
< nr_entries
; i
++) {
3015 *toc_entry
= *toc_entry
+ offset
;
3020 static void reloc_toc(void)
3022 unsigned long offset
= reloc_offset();
3023 unsigned long nr_entries
=
3024 (__prom_init_toc_end
- __prom_init_toc_start
) / sizeof(long);
3026 __reloc_toc(offset
, nr_entries
);
3031 static void unreloc_toc(void)
3033 unsigned long offset
= reloc_offset();
3034 unsigned long nr_entries
=
3035 (__prom_init_toc_end
- __prom_init_toc_start
) / sizeof(long);
3039 __reloc_toc(-offset
, nr_entries
);
3045 * We enter here early on, when the Open Firmware prom is still
3046 * handling exceptions and the MMU hash table for us.
3049 unsigned long __init
prom_init(unsigned long r3
, unsigned long r4
,
3051 unsigned long r6
, unsigned long r7
,
3052 unsigned long kbase
)
3057 unsigned long offset
= reloc_offset();
3064 * First zero the BSS
3066 memset(&__bss_start
, 0, __bss_stop
- __bss_start
);
3069 * Init interface to Open Firmware, get some node references,
3072 prom_init_client_services(pp
);
3075 * See if this OF is old enough that we need to do explicit maps
3076 * and other workarounds
3081 * Init prom stdout device
3085 prom_printf("Preparing to boot %s", linux_banner
);
3088 * Get default machine type. At this point, we do not differentiate
3089 * between pSeries SMP and pSeries LPAR
3091 of_platform
= prom_find_machine_type();
3092 prom_printf("Detected machine type: %x\n", of_platform
);
3094 #ifndef CONFIG_NONSTATIC_KERNEL
3095 /* Bail if this is a kdump kernel. */
3096 if (PHYSICAL_START
> 0)
3097 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3101 * Check for an initrd
3103 prom_check_initrd(r3
, r4
);
3106 * Do early parsing of command line
3108 early_cmdline_parse();
3110 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
3112 * On pSeries, inform the firmware about our capabilities
3114 if (of_platform
== PLATFORM_PSERIES
||
3115 of_platform
== PLATFORM_PSERIES_LPAR
)
3116 prom_send_capabilities();
3120 * Copy the CPU hold code
3122 if (of_platform
!= PLATFORM_POWERMAC
)
3123 copy_and_flush(0, kbase
, 0x100, 0);
3126 * Initialize memory management within prom_init
3131 * Determine which cpu is actually running right _now_
3133 prom_find_boot_cpu();
3136 * Initialize display devices
3138 prom_check_displays();
3140 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3142 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3143 * that uses the allocator, we need to make sure we get the top of memory
3144 * available for us here...
3146 if (of_platform
== PLATFORM_PSERIES
)
3147 prom_initialize_tce_table();
3151 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3152 * have a usable RTAS implementation.
3154 if (of_platform
!= PLATFORM_POWERMAC
&&
3155 of_platform
!= PLATFORM_OPAL
)
3156 prom_instantiate_rtas();
3158 #ifdef CONFIG_PPC_POWERNV
3159 if (of_platform
== PLATFORM_OPAL
)
3160 prom_instantiate_opal();
3161 #endif /* CONFIG_PPC_POWERNV */
3164 /* instantiate sml */
3165 prom_instantiate_sml();
3169 * On non-powermacs, put all CPUs in spin-loops.
3171 * PowerMacs use a different mechanism to spin CPUs
3173 * (This must be done after instanciating RTAS)
3175 if (of_platform
!= PLATFORM_POWERMAC
&&
3176 of_platform
!= PLATFORM_OPAL
)
3180 * Fill in some infos for use by the kernel later on
3182 if (prom_memory_limit
) {
3183 __be64 val
= cpu_to_be64(prom_memory_limit
);
3184 prom_setprop(prom
.chosen
, "/chosen", "linux,memory-limit",
3189 prom_setprop(prom
.chosen
, "/chosen", "linux,iommu-off",
3192 if (prom_iommu_force_on
)
3193 prom_setprop(prom
.chosen
, "/chosen", "linux,iommu-force-on",
3196 if (prom_tce_alloc_start
) {
3197 prom_setprop(prom
.chosen
, "/chosen", "linux,tce-alloc-start",
3198 &prom_tce_alloc_start
,
3199 sizeof(prom_tce_alloc_start
));
3200 prom_setprop(prom
.chosen
, "/chosen", "linux,tce-alloc-end",
3201 &prom_tce_alloc_end
,
3202 sizeof(prom_tce_alloc_end
));
3207 * Fixup any known bugs in the device-tree
3209 fixup_device_tree();
3212 * Now finally create the flattened device-tree
3214 prom_printf("copying OF device tree...\n");
3215 flatten_device_tree();
3218 * in case stdin is USB and still active on IBM machines...
3219 * Unfortunately quiesce crashes on some powermacs if we have
3220 * closed stdin already (in particular the powerbook 101). It
3221 * appears that the OPAL version of OFW doesn't like it either.
3223 if (of_platform
!= PLATFORM_POWERMAC
&&
3224 of_platform
!= PLATFORM_OPAL
)
3228 * Call OF "quiesce" method to shut down pending DMA's from
3231 prom_printf("Quiescing Open Firmware ...\n");
3232 call_prom("quiesce", 0, 0);
3235 * And finally, call the kernel passing it the flattened device
3236 * tree and NULL as r5, thus triggering the new entry point which
3237 * is common to us and kexec
3239 hdr
= dt_header_start
;
3241 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3242 if (of_platform
!= PLATFORM_OPAL
) {
3243 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase
);
3244 prom_debug("->dt_header_start=0x%x\n", hdr
);
3248 reloc_got2(-offset
);
3253 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3254 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3255 __start(hdr
, kbase
, 0, 0, 0,
3256 prom_opal_base
, prom_opal_entry
);
3258 __start(hdr
, kbase
, 0, 0, 0, 0, 0);