2 * linux/arch/sparc64/kernel/setup.c
4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/unistd.h>
14 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/screen_info.h>
18 #include <linux/delay.h>
20 #include <linux/seq_file.h>
21 #include <linux/syscalls.h>
22 #include <linux/kdev_t.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/inet.h>
27 #include <linux/console.h>
28 #include <linux/root_dev.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/initrd.h>
32 #include <linux/module.h>
33 #include <linux/start_kernel.h>
34 #include <linux/bootmem.h>
37 #include <asm/processor.h>
38 #include <asm/oplib.h>
40 #include <asm/pgtable.h>
41 #include <asm/idprom.h>
43 #include <asm/starfire.h>
44 #include <asm/mmu_context.h>
45 #include <asm/timer.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
49 #include <asm/ns87303.h>
50 #include <asm/btext.h>
52 #include <asm/mdesc.h>
53 #include <asm/cacheflush.h>
58 #include <net/ipconfig.h>
64 /* Used to synchronize accesses to NatSemi SUPER I/O chip configure
65 * operations in asm/ns87303.h
67 DEFINE_SPINLOCK(ns87303_lock
);
68 EXPORT_SYMBOL(ns87303_lock
);
70 struct screen_info screen_info
= {
71 0, 0, /* orig-x, orig-y */
73 0, /* orig-video-page */
74 0, /* orig-video-mode */
75 128, /* orig-video-cols */
76 0, 0, 0, /* unused, ega_bx, unused */
77 54, /* orig-video-lines */
78 0, /* orig-video-isVGA */
79 16 /* orig-video-points */
83 prom_console_write(struct console
*con
, const char *s
, unsigned int n
)
88 /* Exported for mm/init.c:paging_init. */
89 unsigned long cmdline_memory_size
= 0;
91 static struct console prom_early_console
= {
93 .write
= prom_console_write
,
94 .flags
= CON_PRINTBUFFER
| CON_BOOT
| CON_ANYTIME
,
99 * Process kernel command line switches that are specific to the
100 * SPARC or that require special low-level processing.
102 static void __init
process_switch(char c
)
109 prom_printf("boot_flags_init: Halt!\n");
113 prom_early_console
.flags
&= ~CON_BOOT
;
116 /* Force UltraSPARC-III P-Cache on. */
117 if (tlb_type
!= cheetah
) {
118 printk("BOOT: Ignoring P-Cache force option.\n");
121 cheetah_pcache_forced_on
= 1;
122 add_taint(TAINT_MACHINE_CHECK
, LOCKDEP_NOW_UNRELIABLE
);
123 cheetah_enable_pcache();
127 printk("Unknown boot switch (-%c)\n", c
);
132 static void __init
boot_flags_init(char *commands
)
135 /* Move to the start of the next "argument". */
136 while (*commands
== ' ')
139 /* Process any command switches, otherwise skip it. */
140 if (*commands
== '\0')
142 if (*commands
== '-') {
144 while (*commands
&& *commands
!= ' ')
145 process_switch(*commands
++);
148 if (!strncmp(commands
, "mem=", 4))
149 cmdline_memory_size
= memparse(commands
+ 4, &commands
);
151 while (*commands
&& *commands
!= ' ')
156 extern unsigned short root_flags
;
157 extern unsigned short root_dev
;
158 extern unsigned short ram_flags
;
159 #define RAMDISK_IMAGE_START_MASK 0x07FF
160 #define RAMDISK_PROMPT_FLAG 0x8000
161 #define RAMDISK_LOAD_FLAG 0x4000
163 extern int root_mountflags
;
165 char reboot_command
[COMMAND_LINE_SIZE
];
167 static struct pt_regs fake_swapper_regs
= { { 0, }, 0, 0, 0, 0 };
169 static void __init
per_cpu_patch(void)
171 struct cpuid_patch_entry
*p
;
175 if (tlb_type
== spitfire
&& !this_is_starfire
)
179 if (tlb_type
!= hypervisor
) {
180 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
181 is_jbus
= ((ver
>> 32UL) == __JALAPENO_ID
||
182 (ver
>> 32UL) == __SERRANO_ID
);
186 while (p
< &__cpuid_patch_end
) {
187 unsigned long addr
= p
->addr
;
192 insns
= &p
->starfire
[0];
197 insns
= &p
->cheetah_jbus
[0];
199 insns
= &p
->cheetah_safari
[0];
202 insns
= &p
->sun4v
[0];
205 prom_printf("Unknown cpu type, halting.\n");
209 *(unsigned int *) (addr
+ 0) = insns
[0];
211 __asm__
__volatile__("flush %0" : : "r" (addr
+ 0));
213 *(unsigned int *) (addr
+ 4) = insns
[1];
215 __asm__
__volatile__("flush %0" : : "r" (addr
+ 4));
217 *(unsigned int *) (addr
+ 8) = insns
[2];
219 __asm__
__volatile__("flush %0" : : "r" (addr
+ 8));
221 *(unsigned int *) (addr
+ 12) = insns
[3];
223 __asm__
__volatile__("flush %0" : : "r" (addr
+ 12));
229 void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry
*start
,
230 struct sun4v_1insn_patch_entry
*end
)
232 while (start
< end
) {
233 unsigned long addr
= start
->addr
;
235 *(unsigned int *) (addr
+ 0) = start
->insn
;
237 __asm__
__volatile__("flush %0" : : "r" (addr
+ 0));
243 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry
*start
,
244 struct sun4v_2insn_patch_entry
*end
)
246 while (start
< end
) {
247 unsigned long addr
= start
->addr
;
249 *(unsigned int *) (addr
+ 0) = start
->insns
[0];
251 __asm__
__volatile__("flush %0" : : "r" (addr
+ 0));
253 *(unsigned int *) (addr
+ 4) = start
->insns
[1];
255 __asm__
__volatile__("flush %0" : : "r" (addr
+ 4));
261 void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry
*start
,
262 struct sun4v_2insn_patch_entry
*end
)
264 while (start
< end
) {
265 unsigned long addr
= start
->addr
;
267 *(unsigned int *) (addr
+ 0) = start
->insns
[0];
269 __asm__
__volatile__("flush %0" : : "r" (addr
+ 0));
271 *(unsigned int *) (addr
+ 4) = start
->insns
[1];
273 __asm__
__volatile__("flush %0" : : "r" (addr
+ 4));
279 static void __init
sun4v_patch(void)
281 extern void sun4v_hvapi_init(void);
283 if (tlb_type
!= hypervisor
)
286 sun4v_patch_1insn_range(&__sun4v_1insn_patch
,
287 &__sun4v_1insn_patch_end
);
289 sun4v_patch_2insn_range(&__sun4v_2insn_patch
,
290 &__sun4v_2insn_patch_end
);
291 if (sun4v_chip_type
== SUN4V_CHIP_SPARC_M7
||
292 sun4v_chip_type
== SUN4V_CHIP_SPARC_SN
)
293 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch
,
294 &__sun_m7_2insn_patch_end
);
299 static void __init
popc_patch(void)
301 struct popc_3insn_patch_entry
*p3
;
302 struct popc_6insn_patch_entry
*p6
;
304 p3
= &__popc_3insn_patch
;
305 while (p3
< &__popc_3insn_patch_end
) {
306 unsigned long i
, addr
= p3
->addr
;
308 for (i
= 0; i
< 3; i
++) {
309 *(unsigned int *) (addr
+ (i
* 4)) = p3
->insns
[i
];
311 __asm__
__volatile__("flush %0"
312 : : "r" (addr
+ (i
* 4)));
318 p6
= &__popc_6insn_patch
;
319 while (p6
< &__popc_6insn_patch_end
) {
320 unsigned long i
, addr
= p6
->addr
;
322 for (i
= 0; i
< 6; i
++) {
323 *(unsigned int *) (addr
+ (i
* 4)) = p6
->insns
[i
];
325 __asm__
__volatile__("flush %0"
326 : : "r" (addr
+ (i
* 4)));
333 static void __init
pause_patch(void)
335 struct pause_patch_entry
*p
;
337 p
= &__pause_3insn_patch
;
338 while (p
< &__pause_3insn_patch_end
) {
339 unsigned long i
, addr
= p
->addr
;
341 for (i
= 0; i
< 3; i
++) {
342 *(unsigned int *) (addr
+ (i
* 4)) = p
->insns
[i
];
344 __asm__
__volatile__("flush %0"
345 : : "r" (addr
+ (i
* 4)));
352 void __init
start_early_boot(void)
360 cpu
= hard_smp_processor_id();
361 if (cpu
>= NR_CPUS
) {
362 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
366 current_thread_info()->cpu
= cpu
;
373 /* On Ultra, we support all of the v8 capabilities. */
374 unsigned long sparc64_elf_hwcap
= (HWCAP_SPARC_FLUSH
| HWCAP_SPARC_STBAR
|
375 HWCAP_SPARC_SWAP
| HWCAP_SPARC_MULDIV
|
377 EXPORT_SYMBOL(sparc64_elf_hwcap
);
379 static const char *hwcaps
[] = {
380 "flush", "stbar", "swap", "muldiv", "v9",
381 "ultra3", "blkinit", "n2",
383 /* These strings are as they appear in the machine description
384 * 'hwcap-list' property for cpu nodes.
386 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
387 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
388 "ima", "cspare", "pause", "cbcond", NULL
/*reserved for crypto */,
392 static const char *crypto_hwcaps
[] = {
393 "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
394 "sha512", "mpmul", "montmul", "montsqr", "crc32c",
397 void cpucap_info(struct seq_file
*m
)
399 unsigned long caps
= sparc64_elf_hwcap
;
402 seq_puts(m
, "cpucaps\t\t: ");
403 for (i
= 0; i
< ARRAY_SIZE(hwcaps
); i
++) {
404 unsigned long bit
= 1UL << i
;
405 if (hwcaps
[i
] && (caps
& bit
)) {
406 seq_printf(m
, "%s%s",
407 printed
? "," : "", hwcaps
[i
]);
411 if (caps
& HWCAP_SPARC_CRYPTO
) {
414 __asm__
__volatile__("rd %%asr26, %0" : "=r" (cfr
));
415 for (i
= 0; i
< ARRAY_SIZE(crypto_hwcaps
); i
++) {
416 unsigned long bit
= 1UL << i
;
418 seq_printf(m
, "%s%s",
419 printed
? "," : "", crypto_hwcaps
[i
]);
427 static void __init
report_one_hwcap(int *printed
, const char *name
)
430 printk(KERN_INFO
"CPU CAPS: [");
431 printk(KERN_CONT
"%s%s",
432 (*printed
) ? "," : "", name
);
433 if (++(*printed
) == 8) {
434 printk(KERN_CONT
"]\n");
439 static void __init
report_crypto_hwcaps(int *printed
)
444 __asm__
__volatile__("rd %%asr26, %0" : "=r" (cfr
));
446 for (i
= 0; i
< ARRAY_SIZE(crypto_hwcaps
); i
++) {
447 unsigned long bit
= 1UL << i
;
449 report_one_hwcap(printed
, crypto_hwcaps
[i
]);
453 static void __init
report_hwcaps(unsigned long caps
)
457 for (i
= 0; i
< ARRAY_SIZE(hwcaps
); i
++) {
458 unsigned long bit
= 1UL << i
;
459 if (hwcaps
[i
] && (caps
& bit
))
460 report_one_hwcap(&printed
, hwcaps
[i
]);
462 if (caps
& HWCAP_SPARC_CRYPTO
)
463 report_crypto_hwcaps(&printed
);
465 printk(KERN_CONT
"]\n");
468 static unsigned long __init
mdesc_cpu_hwcap_list(void)
470 struct mdesc_handle
*hp
;
471 unsigned long caps
= 0;
480 pn
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "cpu");
481 if (pn
== MDESC_NODE_NULL
)
484 prop
= mdesc_get_property(hp
, pn
, "hwcap-list", &len
);
491 for (i
= 0; i
< ARRAY_SIZE(hwcaps
); i
++) {
492 unsigned long bit
= 1UL << i
;
494 if (hwcaps
[i
] && !strcmp(prop
, hwcaps
[i
])) {
499 for (i
= 0; i
< ARRAY_SIZE(crypto_hwcaps
); i
++) {
500 if (!strcmp(prop
, crypto_hwcaps
[i
]))
501 caps
|= HWCAP_SPARC_CRYPTO
;
504 plen
= strlen(prop
) + 1;
514 /* This yields a mask that user programs can use to figure out what
515 * instruction set this cpu supports.
517 static void __init
init_sparc64_elf_hwcap(void)
519 unsigned long cap
= sparc64_elf_hwcap
;
520 unsigned long mdesc_caps
;
522 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
523 cap
|= HWCAP_SPARC_ULTRA3
;
524 else if (tlb_type
== hypervisor
) {
525 if (sun4v_chip_type
== SUN4V_CHIP_NIAGARA1
||
526 sun4v_chip_type
== SUN4V_CHIP_NIAGARA2
||
527 sun4v_chip_type
== SUN4V_CHIP_NIAGARA3
||
528 sun4v_chip_type
== SUN4V_CHIP_NIAGARA4
||
529 sun4v_chip_type
== SUN4V_CHIP_NIAGARA5
||
530 sun4v_chip_type
== SUN4V_CHIP_SPARC_M6
||
531 sun4v_chip_type
== SUN4V_CHIP_SPARC_M7
||
532 sun4v_chip_type
== SUN4V_CHIP_SPARC_SN
||
533 sun4v_chip_type
== SUN4V_CHIP_SPARC64X
)
534 cap
|= HWCAP_SPARC_BLKINIT
;
535 if (sun4v_chip_type
== SUN4V_CHIP_NIAGARA2
||
536 sun4v_chip_type
== SUN4V_CHIP_NIAGARA3
||
537 sun4v_chip_type
== SUN4V_CHIP_NIAGARA4
||
538 sun4v_chip_type
== SUN4V_CHIP_NIAGARA5
||
539 sun4v_chip_type
== SUN4V_CHIP_SPARC_M6
||
540 sun4v_chip_type
== SUN4V_CHIP_SPARC_M7
||
541 sun4v_chip_type
== SUN4V_CHIP_SPARC_SN
||
542 sun4v_chip_type
== SUN4V_CHIP_SPARC64X
)
543 cap
|= HWCAP_SPARC_N2
;
546 cap
|= (AV_SPARC_MUL32
| AV_SPARC_DIV32
| AV_SPARC_V8PLUS
);
548 mdesc_caps
= mdesc_cpu_hwcap_list();
550 if (tlb_type
== spitfire
)
552 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
553 cap
|= AV_SPARC_VIS
| AV_SPARC_VIS2
;
554 if (tlb_type
== cheetah_plus
) {
555 unsigned long impl
, ver
;
557 __asm__
__volatile__("rdpr %%ver, %0" : "=r" (ver
));
558 impl
= ((ver
>> 32) & 0xffff);
559 if (impl
== PANTHER_IMPL
)
560 cap
|= AV_SPARC_POPC
;
562 if (tlb_type
== hypervisor
) {
563 if (sun4v_chip_type
== SUN4V_CHIP_NIAGARA1
)
564 cap
|= AV_SPARC_ASI_BLK_INIT
;
565 if (sun4v_chip_type
== SUN4V_CHIP_NIAGARA2
||
566 sun4v_chip_type
== SUN4V_CHIP_NIAGARA3
||
567 sun4v_chip_type
== SUN4V_CHIP_NIAGARA4
||
568 sun4v_chip_type
== SUN4V_CHIP_NIAGARA5
||
569 sun4v_chip_type
== SUN4V_CHIP_SPARC_M6
||
570 sun4v_chip_type
== SUN4V_CHIP_SPARC_M7
||
571 sun4v_chip_type
== SUN4V_CHIP_SPARC_SN
||
572 sun4v_chip_type
== SUN4V_CHIP_SPARC64X
)
573 cap
|= (AV_SPARC_VIS
| AV_SPARC_VIS2
|
574 AV_SPARC_ASI_BLK_INIT
|
576 if (sun4v_chip_type
== SUN4V_CHIP_NIAGARA3
||
577 sun4v_chip_type
== SUN4V_CHIP_NIAGARA4
||
578 sun4v_chip_type
== SUN4V_CHIP_NIAGARA5
||
579 sun4v_chip_type
== SUN4V_CHIP_SPARC_M6
||
580 sun4v_chip_type
== SUN4V_CHIP_SPARC_M7
||
581 sun4v_chip_type
== SUN4V_CHIP_SPARC_SN
||
582 sun4v_chip_type
== SUN4V_CHIP_SPARC64X
)
583 cap
|= (AV_SPARC_VIS3
| AV_SPARC_HPC
|
587 sparc64_elf_hwcap
= cap
| mdesc_caps
;
589 report_hwcaps(sparc64_elf_hwcap
);
591 if (sparc64_elf_hwcap
& AV_SPARC_POPC
)
593 if (sparc64_elf_hwcap
& AV_SPARC_PAUSE
)
597 void __init
alloc_irqstack_bootmem(void)
599 unsigned int i
, node
;
601 for_each_possible_cpu(i
) {
602 node
= cpu_to_node(i
);
604 softirq_stack
[i
] = __alloc_bootmem_node(NODE_DATA(node
),
607 hardirq_stack
[i
] = __alloc_bootmem_node(NODE_DATA(node
),
613 void __init
setup_arch(char **cmdline_p
)
615 /* Initialize PROM console and command line. */
616 *cmdline_p
= prom_getbootargs();
617 strlcpy(boot_command_line
, *cmdline_p
, COMMAND_LINE_SIZE
);
620 boot_flags_init(*cmdline_p
);
621 #ifdef CONFIG_EARLYFB
622 if (btext_find_display())
624 register_console(&prom_early_console
);
626 if (tlb_type
== hypervisor
)
627 printk("ARCH: SUN4V\n");
629 printk("ARCH: SUN4U\n");
631 #ifdef CONFIG_DUMMY_CONSOLE
632 conswitchp
= &dummy_con
;
638 root_mountflags
&= ~MS_RDONLY
;
639 ROOT_DEV
= old_decode_dev(root_dev
);
640 #ifdef CONFIG_BLK_DEV_RAM
641 rd_image_start
= ram_flags
& RAMDISK_IMAGE_START_MASK
;
642 rd_prompt
= ((ram_flags
& RAMDISK_PROMPT_FLAG
) != 0);
643 rd_doload
= ((ram_flags
& RAMDISK_LOAD_FLAG
) != 0);
646 task_thread_info(&init_task
)->kregs
= &fake_swapper_regs
;
649 if (!ic_set_manually
) {
650 phandle chosen
= prom_finddevice("/chosen");
653 cl
= prom_getintdefault (chosen
, "client-ip", 0);
654 sv
= prom_getintdefault (chosen
, "server-ip", 0);
655 gw
= prom_getintdefault (chosen
, "gateway-ip", 0);
661 #if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
662 ic_proto_enabled
= 0;
668 /* Get boot processor trap_block[] setup. */
669 init_cur_cpu_trap(current_thread_info());
672 init_sparc64_elf_hwcap();
673 smp_fill_in_cpu_possible_map();
675 * Once the OF device tree and MDESC have been setup and nr_cpus has
676 * been parsed, we know the list of possible cpus. Therefore we can
677 * allocate the IRQ stacks.
679 alloc_irqstack_bootmem();
682 extern int stop_a_enabled
;
684 void sun_do_break(void)
690 flush_user_windows();
694 EXPORT_SYMBOL(sun_do_break
);
696 int stop_a_enabled
= 1;
697 EXPORT_SYMBOL(stop_a_enabled
);