2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2007 Cavium Networks
7 * Copyright (C) 2008, 2009 Wind River Systems
8 * written by Ralf Baechle <ralf@linux-mips.org>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/console.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
17 #include <linux/serial.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20 #include <linux/string.h> /* for memset */
21 #include <linux/tty.h>
22 #include <linux/time.h>
23 #include <linux/platform_device.h>
24 #include <linux/serial_core.h>
25 #include <linux/serial_8250.h>
26 #include <linux/of_fdt.h>
27 #include <linux/libfdt.h>
28 #include <linux/kexec.h>
30 #include <asm/processor.h>
31 #include <asm/reboot.h>
32 #include <asm/smp-ops.h>
33 #include <asm/irq_cpu.h>
34 #include <asm/mipsregs.h>
35 #include <asm/bootinfo.h>
36 #include <asm/sections.h>
39 #include <asm/octeon/octeon.h>
40 #include <asm/octeon/pci-octeon.h>
41 #include <asm/octeon/cvmx-mio-defs.h>
43 #ifdef CONFIG_CAVIUM_DECODE_RSL
44 extern void cvmx_interrupt_rsl_decode(void);
45 extern int __cvmx_interrupt_ecc_report_single_bit_errors
;
46 extern void cvmx_interrupt_rsl_enable(void);
49 extern struct plat_smp_ops octeon_smp_ops
;
52 extern void pci_console_init(const char *arg
);
55 static unsigned long long MAX_MEMORY
= 512ull << 20;
57 struct octeon_boot_descriptor
*octeon_boot_desc_ptr
;
59 struct cvmx_bootinfo
*octeon_bootinfo
;
60 EXPORT_SYMBOL(octeon_bootinfo
);
62 static unsigned long long RESERVE_LOW_MEM
= 0ull;
66 * Wait for relocation code is prepared and send
67 * secondary CPUs to spin until kernel is relocated.
69 static void octeon_kexec_smp_down(void *ignored
)
71 int cpu
= smp_processor_id();
74 set_cpu_online(cpu
, false);
75 while (!atomic_read(&kexec_ready_to_reboot
))
82 relocated_kexec_smp_wait(NULL
);
86 #define OCTEON_DDR0_BASE (0x0ULL)
87 #define OCTEON_DDR0_SIZE (0x010000000ULL)
88 #define OCTEON_DDR1_BASE (0x410000000ULL)
89 #define OCTEON_DDR1_SIZE (0x010000000ULL)
90 #define OCTEON_DDR2_BASE (0x020000000ULL)
91 #define OCTEON_DDR2_SIZE (0x3e0000000ULL)
92 #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
94 static struct kimage
*kimage_ptr
;
96 static void kexec_bootmem_init(uint64_t mem_size
, uint32_t low_reserved_bytes
)
99 struct cvmx_bootmem_desc
*bootmem_desc
;
101 bootmem_desc
= cvmx_bootmem_get_desc();
103 if (mem_size
> OCTEON_MAX_PHY_MEM_SIZE
) {
104 mem_size
= OCTEON_MAX_PHY_MEM_SIZE
;
105 pr_err("Error: requested memory too large,"
106 "truncating to maximum size\n");
109 bootmem_desc
->major_version
= CVMX_BOOTMEM_DESC_MAJ_VER
;
110 bootmem_desc
->minor_version
= CVMX_BOOTMEM_DESC_MIN_VER
;
112 addr
= (OCTEON_DDR0_BASE
+ RESERVE_LOW_MEM
+ low_reserved_bytes
);
113 bootmem_desc
->head_addr
= 0;
115 if (mem_size
<= OCTEON_DDR0_SIZE
) {
116 __cvmx_bootmem_phy_free(addr
,
117 mem_size
- RESERVE_LOW_MEM
-
118 low_reserved_bytes
, 0);
122 __cvmx_bootmem_phy_free(addr
,
123 OCTEON_DDR0_SIZE
- RESERVE_LOW_MEM
-
124 low_reserved_bytes
, 0);
126 mem_size
-= OCTEON_DDR0_SIZE
;
128 if (mem_size
> OCTEON_DDR1_SIZE
) {
129 __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE
, OCTEON_DDR1_SIZE
, 0);
130 __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE
,
131 mem_size
- OCTEON_DDR1_SIZE
, 0);
133 __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE
, mem_size
, 0);
136 static int octeon_kexec_prepare(struct kimage
*image
)
139 char *bootloader
= "kexec";
141 octeon_boot_desc_ptr
->argc
= 0;
142 for (i
= 0; i
< image
->nr_segments
; i
++) {
143 if (!strncmp(bootloader
, (char *)image
->segment
[i
].buf
,
144 strlen(bootloader
))) {
146 * convert command line string to array
147 * of parameters (as bootloader does).
150 char *str
= (char *)image
->segment
[i
].buf
;
151 char *ptr
= strchr(str
, ' ');
152 while (ptr
&& (OCTEON_ARGV_MAX_ARGS
> argc
)) {
155 offt
= (int)(ptr
- str
+ 1);
156 octeon_boot_desc_ptr
->argv
[argc
] =
157 image
->segment
[i
].mem
+ offt
;
160 ptr
= strchr(ptr
+ 1, ' ');
162 octeon_boot_desc_ptr
->argc
= argc
;
168 * Information about segments will be needed during pre-boot memory
175 static void octeon_generic_shutdown(void)
178 struct cvmx_bootmem_desc
*bootmem_desc
;
179 void *named_block_array_ptr
;
181 bootmem_desc
= cvmx_bootmem_get_desc();
182 named_block_array_ptr
=
183 cvmx_phys_to_ptr(bootmem_desc
->named_block_array_addr
);
186 /* disable watchdogs */
187 for_each_online_cpu(cpu
)
188 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu
)), 0);
190 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
192 if (kimage_ptr
!= kexec_crash_image
) {
193 memset(named_block_array_ptr
,
195 CVMX_BOOTMEM_NUM_NAMED_BLOCKS
*
196 sizeof(struct cvmx_bootmem_named_block_desc
));
198 * Mark all memory (except low 0x100000 bytes) as free.
199 * It is the same thing that bootloader does.
201 kexec_bootmem_init(octeon_bootinfo
->dram_size
*1024ULL*1024ULL,
204 * Allocate all segments to avoid their corruption during boot.
206 for (i
= 0; i
< kimage_ptr
->nr_segments
; i
++)
207 cvmx_bootmem_alloc_address(
208 kimage_ptr
->segment
[i
].memsz
+ 2*PAGE_SIZE
,
209 kimage_ptr
->segment
[i
].mem
- PAGE_SIZE
,
213 * Do not mark all memory as free. Free only named sections
214 * leaving the rest of memory unchanged.
216 struct cvmx_bootmem_named_block_desc
*ptr
=
217 (struct cvmx_bootmem_named_block_desc
*)
218 named_block_array_ptr
;
220 for (i
= 0; i
< bootmem_desc
->named_block_num_blocks
; i
++)
222 cvmx_bootmem_free_named(ptr
[i
].name
);
224 kexec_args
[2] = 1UL; /* running on octeon_main_processor */
225 kexec_args
[3] = (unsigned long)octeon_boot_desc_ptr
;
227 secondary_kexec_args
[2] = 0UL; /* running on secondary cpu */
228 secondary_kexec_args
[3] = (unsigned long)octeon_boot_desc_ptr
;
232 static void octeon_shutdown(void)
234 octeon_generic_shutdown();
236 smp_call_function(octeon_kexec_smp_down
, NULL
, 0);
238 while (num_online_cpus() > 1) {
245 static void octeon_crash_shutdown(struct pt_regs
*regs
)
247 octeon_generic_shutdown();
248 default_machine_crash_shutdown(regs
);
251 #endif /* CONFIG_KEXEC */
253 #ifdef CONFIG_CAVIUM_RESERVE32
254 uint64_t octeon_reserve32_memory
;
255 EXPORT_SYMBOL(octeon_reserve32_memory
);
259 /* crashkernel cmdline parameter is parsed _after_ memory setup
260 * we also parse it here (workaround for EHB5200) */
261 static uint64_t crashk_size
, crashk_base
;
264 static int octeon_uart
;
266 extern asmlinkage
void handle_int(void);
267 extern asmlinkage
void plat_irq_dispatch(void);
270 * Return non zero if we are currently running in the Octeon simulator
274 int octeon_is_simulation(void)
276 return octeon_bootinfo
->board_type
== CVMX_BOARD_TYPE_SIM
;
278 EXPORT_SYMBOL(octeon_is_simulation
);
281 * Return true if Octeon is in PCI Host mode. This means
282 * Linux can control the PCI bus.
284 * Returns Non zero if Octeon in host mode.
286 int octeon_is_pci_host(void)
289 return octeon_bootinfo
->config_flags
& CVMX_BOOTINFO_CFG_FLAG_PCI_HOST
;
296 * Get the clock rate of Octeon
298 * Returns Clock rate in HZ
300 uint64_t octeon_get_clock_rate(void)
302 struct cvmx_sysinfo
*sysinfo
= cvmx_sysinfo_get();
304 return sysinfo
->cpu_clock_hz
;
306 EXPORT_SYMBOL(octeon_get_clock_rate
);
308 static u64 octeon_io_clock_rate
;
310 u64
octeon_get_io_clock_rate(void)
312 return octeon_io_clock_rate
;
314 EXPORT_SYMBOL(octeon_get_io_clock_rate
);
318 * Write to the LCD display connected to the bootbus. This display
319 * exists on most Cavium evaluation boards. If it doesn't exist, then
320 * this function doesn't do anything.
322 * @s: String to write
324 void octeon_write_lcd(const char *s
)
326 if (octeon_bootinfo
->led_display_base_addr
) {
327 void __iomem
*lcd_address
=
328 ioremap_nocache(octeon_bootinfo
->led_display_base_addr
,
331 for (i
= 0; i
< 8; i
++, s
++) {
333 iowrite8(*s
, lcd_address
+ i
);
335 iowrite8(' ', lcd_address
+ i
);
337 iounmap(lcd_address
);
342 * Return the console uart passed by the bootloader
344 * Returns uart (0 or 1)
346 int octeon_get_boot_uart(void)
349 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
352 uart
= (octeon_boot_desc_ptr
->flags
& OCTEON_BL_FLAG_CONSOLE_UART1
) ?
359 * Get the coremask Linux was booted on.
363 int octeon_get_boot_coremask(void)
365 return octeon_boot_desc_ptr
->core_mask
;
369 * Check the hardware BIST results for a CPU
371 void octeon_check_cpu_bist(void)
373 const int coreid
= cvmx_get_core_num();
374 unsigned long long mask
;
375 unsigned long long bist_val
;
377 /* Check BIST results for COP0 registers */
378 mask
= 0x1f00000000ull
;
379 bist_val
= read_octeon_c0_icacheerr();
381 pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
384 bist_val
= read_octeon_c0_dcacheerr();
386 pr_err("Core%d L1 Dcache parity error: "
387 "CacheErr(dcache) = 0x%llx\n",
390 mask
= 0xfc00000000000000ull
;
391 bist_val
= read_c0_cvmmemctl();
393 pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
396 write_octeon_c0_dcacheerr(0);
402 * @command: Command to pass to the bootloader. Currently ignored.
404 static void octeon_restart(char *command
)
406 /* Disable all watchdogs before soft reset. They don't get cleared */
409 for_each_online_cpu(cpu
)
410 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu
)), 0);
412 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
417 cvmx_write_csr(CVMX_CIU_SOFT_RST
, 1);
422 * Permanently stop a core.
426 static void octeon_kill_core(void *arg
)
429 if (octeon_is_simulation()) {
430 /* The simulator needs the watchdog to stop for dead cores */
431 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
432 /* A break instruction causes the simulator stop a core */
433 asm volatile ("sync\nbreak");
441 static void octeon_halt(void)
443 smp_call_function(octeon_kill_core
, NULL
, 0);
445 switch (octeon_bootinfo
->board_type
) {
446 case CVMX_BOARD_TYPE_NAO38
:
447 /* Driving a 1 to GPIO 12 shuts off this board */
448 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
449 cvmx_write_csr(CVMX_GPIO_TX_SET
, 0x1000);
452 octeon_write_lcd("PowerOff");
456 octeon_kill_core(NULL
);
460 * Handle all the error condition interrupts that might occur.
463 #ifdef CONFIG_CAVIUM_DECODE_RSL
464 static irqreturn_t
octeon_rlm_interrupt(int cpl
, void *dev_id
)
466 cvmx_interrupt_rsl_decode();
472 * Return a string representing the system type
476 const char *octeon_board_type_string(void)
478 static char name
[80];
479 sprintf(name
, "%s (%s)",
480 cvmx_board_type_to_string(octeon_bootinfo
->board_type
),
481 octeon_model_get_string(read_c0_prid()));
485 const char *get_system_type(void)
486 __attribute__ ((alias("octeon_board_type_string")));
488 void octeon_user_io_init(void)
490 union octeon_cvmemctl cvmmemctl
;
491 union cvmx_iob_fau_timeout fau_timeout
;
492 union cvmx_pow_nw_tim nm_tim
;
494 /* Get the current settings for CP0_CVMMEMCTL_REG */
495 cvmmemctl
.u64
= read_c0_cvmmemctl();
496 /* R/W If set, marked write-buffer entries time out the same
497 * as as other entries; if clear, marked write-buffer entries
498 * use the maximum timeout. */
499 cvmmemctl
.s
.dismarkwblongto
= 1;
500 /* R/W If set, a merged store does not clear the write-buffer
501 * entry timeout state. */
502 cvmmemctl
.s
.dismrgclrwbto
= 0;
503 /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
504 * word location for an IOBDMA. The other 8 bits come from the
505 * SCRADDR field of the IOBDMA. */
506 cvmmemctl
.s
.iobdmascrmsb
= 0;
507 /* R/W If set, SYNCWS and SYNCS only order marked stores; if
508 * clear, SYNCWS and SYNCS only order unmarked
509 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
511 cvmmemctl
.s
.syncwsmarked
= 0;
512 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
513 cvmmemctl
.s
.dissyncws
= 0;
514 /* R/W If set, no stall happens on write buffer full. */
515 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
))
516 cvmmemctl
.s
.diswbfst
= 1;
518 cvmmemctl
.s
.diswbfst
= 0;
519 /* R/W If set (and SX set), supervisor-level loads/stores can
520 * use XKPHYS addresses with <48>==0 */
521 cvmmemctl
.s
.xkmemenas
= 0;
523 /* R/W If set (and UX set), user-level loads/stores can use
524 * XKPHYS addresses with VA<48>==0 */
525 cvmmemctl
.s
.xkmemenau
= 0;
527 /* R/W If set (and SX set), supervisor-level loads/stores can
528 * use XKPHYS addresses with VA<48>==1 */
529 cvmmemctl
.s
.xkioenas
= 0;
531 /* R/W If set (and UX set), user-level loads/stores can use
532 * XKPHYS addresses with VA<48>==1 */
533 cvmmemctl
.s
.xkioenau
= 0;
535 /* R/W If set, all stores act as SYNCW (NOMERGE must be set
536 * when this is set) RW, reset to 0. */
537 cvmmemctl
.s
.allsyncw
= 0;
539 /* R/W If set, no stores merge, and all stores reach the
540 * coherent bus in order. */
541 cvmmemctl
.s
.nomerge
= 0;
542 /* R/W Selects the bit in the counter used for DID time-outs 0
543 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
544 * between 1x and 2x this interval. For example, with
545 * DIDTTO=3, expiration interval is between 16K and 32K. */
546 cvmmemctl
.s
.didtto
= 0;
547 /* R/W If set, the (mem) CSR clock never turns off. */
548 cvmmemctl
.s
.csrckalwys
= 0;
549 /* R/W If set, mclk never turns off. */
550 cvmmemctl
.s
.mclkalwys
= 0;
551 /* R/W Selects the bit in the counter used for write buffer
552 * flush time-outs (WBFLT+11) is the bit position in an
553 * internal counter used to determine expiration. The write
554 * buffer expires between 1x and 2x this interval. For
555 * example, with WBFLT = 0, a write buffer expires between 2K
556 * and 4K cycles after the write buffer entry is allocated. */
557 cvmmemctl
.s
.wbfltime
= 0;
558 /* R/W If set, do not put Istream in the L2 cache. */
559 cvmmemctl
.s
.istrnol2
= 0;
562 * R/W The write buffer threshold. As per erratum Core-14752
563 * for CN63XX, a sc/scd might fail if the write buffer is
564 * full. Lowering WBTHRESH greatly lowers the chances of the
565 * write buffer ever being full and triggering the erratum.
567 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X
))
568 cvmmemctl
.s
.wbthresh
= 4;
570 cvmmemctl
.s
.wbthresh
= 10;
572 /* R/W If set, CVMSEG is available for loads/stores in
573 * kernel/debug mode. */
574 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
575 cvmmemctl
.s
.cvmsegenak
= 1;
577 cvmmemctl
.s
.cvmsegenak
= 0;
579 /* R/W If set, CVMSEG is available for loads/stores in
580 * supervisor mode. */
581 cvmmemctl
.s
.cvmsegenas
= 0;
582 /* R/W If set, CVMSEG is available for loads/stores in user
584 cvmmemctl
.s
.cvmsegenau
= 0;
585 /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
586 * is max legal value. */
587 cvmmemctl
.s
.lmemsz
= CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
;
589 write_c0_cvmmemctl(cvmmemctl
.u64
);
591 if (smp_processor_id() == 0)
592 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
593 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
,
594 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
* 128);
596 /* Set a default for the hardware timeouts */
598 fau_timeout
.s
.tout_val
= 0xfff;
599 /* Disable tagwait FAU timeout */
600 fau_timeout
.s
.tout_enb
= 0;
601 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT
, fau_timeout
.u64
);
606 cvmx_write_csr(CVMX_POW_NW_TIM
, nm_tim
.u64
);
608 write_octeon_c0_icacheerr(0);
609 write_c0_derraddr1(0);
613 * Early entry point for arch setup
615 void __init
prom_init(void)
617 struct cvmx_sysinfo
*sysinfo
;
622 #ifdef CONFIG_CAVIUM_RESERVE32
626 * The bootloader passes a pointer to the boot descriptor in
627 * $a3, this is available as fw_arg3.
629 octeon_boot_desc_ptr
= (struct octeon_boot_descriptor
*)fw_arg3
;
631 cvmx_phys_to_ptr(octeon_boot_desc_ptr
->cvmx_desc_vaddr
);
632 cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo
->phy_mem_desc_addr
));
634 sysinfo
= cvmx_sysinfo_get();
635 memset(sysinfo
, 0, sizeof(*sysinfo
));
636 sysinfo
->system_dram_size
= octeon_bootinfo
->dram_size
<< 20;
637 sysinfo
->phy_mem_desc_ptr
=
638 cvmx_phys_to_ptr(octeon_bootinfo
->phy_mem_desc_addr
);
639 sysinfo
->core_mask
= octeon_bootinfo
->core_mask
;
640 sysinfo
->exception_base_addr
= octeon_bootinfo
->exception_base_addr
;
641 sysinfo
->cpu_clock_hz
= octeon_bootinfo
->eclock_hz
;
642 sysinfo
->dram_data_rate_hz
= octeon_bootinfo
->dclock_hz
* 2;
643 sysinfo
->board_type
= octeon_bootinfo
->board_type
;
644 sysinfo
->board_rev_major
= octeon_bootinfo
->board_rev_major
;
645 sysinfo
->board_rev_minor
= octeon_bootinfo
->board_rev_minor
;
646 memcpy(sysinfo
->mac_addr_base
, octeon_bootinfo
->mac_addr_base
,
647 sizeof(sysinfo
->mac_addr_base
));
648 sysinfo
->mac_addr_count
= octeon_bootinfo
->mac_addr_count
;
649 memcpy(sysinfo
->board_serial_number
,
650 octeon_bootinfo
->board_serial_number
,
651 sizeof(sysinfo
->board_serial_number
));
652 sysinfo
->compact_flash_common_base_addr
=
653 octeon_bootinfo
->compact_flash_common_base_addr
;
654 sysinfo
->compact_flash_attribute_base_addr
=
655 octeon_bootinfo
->compact_flash_attribute_base_addr
;
656 sysinfo
->led_display_base_addr
= octeon_bootinfo
->led_display_base_addr
;
657 sysinfo
->dfa_ref_clock_hz
= octeon_bootinfo
->dfa_ref_clock_hz
;
658 sysinfo
->bootloader_config_flags
= octeon_bootinfo
->config_flags
;
660 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
661 /* I/O clock runs at a different rate than the CPU. */
662 union cvmx_mio_rst_boot rst_boot
;
663 rst_boot
.u64
= cvmx_read_csr(CVMX_MIO_RST_BOOT
);
664 octeon_io_clock_rate
= 50000000 * rst_boot
.s
.pnr_mul
;
666 octeon_io_clock_rate
= sysinfo
->cpu_clock_hz
;
670 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
671 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
673 if (!octeon_is_simulation() &&
674 octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER
)) {
675 cvmx_write_csr(CVMX_LED_EN
, 0);
676 cvmx_write_csr(CVMX_LED_PRT
, 0);
677 cvmx_write_csr(CVMX_LED_DBG
, 0);
678 cvmx_write_csr(CVMX_LED_PRT_FMT
, 0);
679 cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
680 cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
681 cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
682 cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
683 cvmx_write_csr(CVMX_LED_EN
, 1);
685 #ifdef CONFIG_CAVIUM_RESERVE32
687 * We need to temporarily allocate all memory in the reserve32
688 * region. This makes sure the kernel doesn't allocate this
689 * memory when it is getting memory from the
690 * bootloader. Later, after the memory allocations are
691 * complete, the reserve32 will be freed.
693 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
694 * is in case we later use hugetlb entries with it.
696 addr
= cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32
<< 20,
698 "CAVIUM_RESERVE32", 0);
700 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
702 octeon_reserve32_memory
= addr
;
705 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
706 if (cvmx_read_csr(CVMX_L2D_FUS3
) & (3ull << 34)) {
707 pr_info("Skipping L2 locking due to reduced L2 cache size\n");
709 uint32_t ebase
= read_c0_ebase() & 0x3ffff000;
710 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
712 cvmx_l2c_lock_mem_region(ebase
, 0x100);
714 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
715 /* General exception */
716 cvmx_l2c_lock_mem_region(ebase
+ 0x180, 0x80);
718 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
719 /* Interrupt handler */
720 cvmx_l2c_lock_mem_region(ebase
+ 0x200, 0x80);
722 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
723 cvmx_l2c_lock_mem_region(__pa_symbol(handle_int
), 0x100);
724 cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch
), 0x80);
726 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
727 cvmx_l2c_lock_mem_region(__pa_symbol(memcpy
), 0x480);
732 octeon_check_cpu_bist();
734 octeon_uart
= octeon_get_boot_uart();
737 octeon_write_lcd("LinuxSMP");
739 octeon_write_lcd("Linux");
742 #ifdef CONFIG_CAVIUM_GDB
744 * When debugging the linux kernel, force the cores to enter
745 * the debug exception handler to break in.
747 if (octeon_get_boot_debug_flag()) {
748 cvmx_write_csr(CVMX_CIU_DINT
, 1 << cvmx_get_core_num());
749 cvmx_read_csr(CVMX_CIU_DINT
);
753 octeon_setup_delays();
756 * BIST should always be enabled when doing a soft reset. L2
757 * Cache locking for instance is not cleared unless BIST is
758 * enabled. Unfortunately due to a chip errata G-200 for
759 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
761 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
) ||
762 OCTEON_IS_MODEL(OCTEON_CN31XX
))
763 cvmx_write_csr(CVMX_CIU_SOFT_BIST
, 0);
765 cvmx_write_csr(CVMX_CIU_SOFT_BIST
, 1);
767 /* Default to 64MB in the simulator to speed things up */
768 if (octeon_is_simulation())
769 MAX_MEMORY
= 64ull << 20;
771 arg
= strstr(arcs_cmdline
, "mem=");
773 MAX_MEMORY
= memparse(arg
+ 4, &p
);
775 MAX_MEMORY
= 32ull << 30;
777 RESERVE_LOW_MEM
= memparse(p
+ 1, &p
);
781 argc
= octeon_boot_desc_ptr
->argc
;
782 for (i
= 0; i
< argc
; i
++) {
784 cvmx_phys_to_ptr(octeon_boot_desc_ptr
->argv
[i
]);
785 if ((strncmp(arg
, "MEM=", 4) == 0) ||
786 (strncmp(arg
, "mem=", 4) == 0)) {
787 MAX_MEMORY
= memparse(arg
+ 4, &p
);
789 MAX_MEMORY
= 32ull << 30;
791 RESERVE_LOW_MEM
= memparse(p
+ 1, &p
);
792 } else if (strcmp(arg
, "ecc_verbose") == 0) {
793 #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
794 __cvmx_interrupt_ecc_report_single_bit_errors
= 1;
795 pr_notice("Reporting of single bit ECC errors is "
799 } else if (strncmp(arg
, "crashkernel=", 12) == 0) {
800 crashk_size
= memparse(arg
+12, &p
);
802 crashk_base
= memparse(p
+1, &p
);
803 strcat(arcs_cmdline
, " ");
804 strcat(arcs_cmdline
, arg
);
806 * To do: switch parsing to new style, something like:
807 * parse_crashkernel(arg, sysinfo->system_dram_size,
808 * &crashk_size, &crashk_base);
811 } else if (strlen(arcs_cmdline
) + strlen(arg
) + 1 <
812 sizeof(arcs_cmdline
) - 1) {
813 strcat(arcs_cmdline
, " ");
814 strcat(arcs_cmdline
, arg
);
818 if (strstr(arcs_cmdline
, "console=") == NULL
) {
819 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
820 strcat(arcs_cmdline
, " console=ttyS0,115200");
822 if (octeon_uart
== 1)
823 strcat(arcs_cmdline
, " console=ttyS1,115200");
825 strcat(arcs_cmdline
, " console=ttyS0,115200");
829 if (octeon_is_simulation()) {
831 * The simulator uses a mtdram device pre filled with
832 * the filesystem. Also specify the calibration delay
833 * to avoid calculating it every time.
835 strcat(arcs_cmdline
, " rw root=1f00 slram=root,0x40000000,+1073741824");
838 mips_hpt_frequency
= octeon_get_clock_rate();
840 octeon_init_cvmcount();
842 _machine_restart
= octeon_restart
;
843 _machine_halt
= octeon_halt
;
846 _machine_kexec_shutdown
= octeon_shutdown
;
847 _machine_crash_shutdown
= octeon_crash_shutdown
;
848 _machine_kexec_prepare
= octeon_kexec_prepare
;
851 octeon_user_io_init();
852 register_smp_ops(&octeon_smp_ops
);
855 /* Exclude a single page from the regions obtained in plat_mem_setup. */
856 #ifndef CONFIG_CRASH_DUMP
857 static __init
void memory_exclude_page(u64 addr
, u64
*mem
, u64
*size
)
859 if (addr
> *mem
&& addr
< *mem
+ *size
) {
860 u64 inc
= addr
- *mem
;
861 add_memory_region(*mem
, inc
, BOOT_MEM_RAM
);
866 if (addr
== *mem
&& *size
> PAGE_SIZE
) {
871 #endif /* CONFIG_CRASH_DUMP */
873 void __init
plat_mem_setup(void)
875 uint64_t mem_alloc_size
;
878 #ifndef CONFIG_CRASH_DUMP
880 uint64_t kernel_start
;
881 uint64_t kernel_size
;
888 * The Mips memory init uses the first memory location for
889 * some memory vectors. When SPARSEMEM is in use, it doesn't
890 * verify that the size is big enough for the final
891 * vectors. Making the smallest chuck 4MB seems to be enough
892 * to consistently work.
894 mem_alloc_size
= 4 << 20;
895 if (mem_alloc_size
> MAX_MEMORY
)
896 mem_alloc_size
= MAX_MEMORY
;
898 /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
899 #ifdef CONFIG_CRASH_DUMP
900 add_memory_region(RESERVE_LOW_MEM
, MAX_MEMORY
, BOOT_MEM_RAM
);
904 if (crashk_size
> 0) {
905 add_memory_region(crashk_base
, crashk_size
, BOOT_MEM_RAM
);
906 crashk_end
= crashk_base
+ crashk_size
;
910 * When allocating memory, we want incrementing addresses from
911 * bootmem_alloc so the code in add_memory_region can merge
912 * regions next to each other.
915 while ((boot_mem_map
.nr_map
< BOOT_MEM_MAP_MAX
)
916 && (total
< MAX_MEMORY
)) {
917 memory
= cvmx_bootmem_phy_alloc(mem_alloc_size
,
918 __pa_symbol(&__init_end
), -1,
920 CVMX_BOOTMEM_FLAG_NO_LOCKING
);
922 u64 size
= mem_alloc_size
;
928 * exclude a page at the beginning and end of
929 * the 256MB PCIe 'hole' so the kernel will not
930 * try to allocate multi-page buffers that
931 * span the discontinuity.
933 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE
,
935 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE
+
936 CVMX_PCIE_BAR1_PHYS_SIZE
,
939 end
= memory
+ mem_alloc_size
;
942 * This function automatically merges address regions
943 * next to each other if they are received in
946 if (memory
< crashk_base
&& end
> crashk_end
) {
947 /* region is fully in */
948 add_memory_region(memory
,
949 crashk_base
- memory
,
951 total
+= crashk_base
- memory
;
952 add_memory_region(crashk_end
,
955 total
+= end
- crashk_end
;
959 if (memory
>= crashk_base
&& end
<= crashk_end
)
961 * Entire memory region is within the new
962 * kernel's memory, ignore it.
966 if (memory
> crashk_base
&& memory
< crashk_end
&&
969 * Overlap with the beginning of the region,
970 * reserve the beginning.
972 mem_alloc_size
-= crashk_end
- memory
;
974 } else if (memory
< crashk_base
&& end
> crashk_base
&&
977 * Overlap with the beginning of the region,
980 mem_alloc_size
-= end
- crashk_base
;
982 add_memory_region(memory
, mem_alloc_size
, BOOT_MEM_RAM
);
983 total
+= mem_alloc_size
;
984 /* Recovering mem_alloc_size */
985 mem_alloc_size
= 4 << 20;
990 cvmx_bootmem_unlock();
991 /* Add the memory region for the kernel. */
992 kernel_start
= (unsigned long) _text
;
993 kernel_size
= ALIGN(_end
- _text
, 0x100000);
995 /* Adjust for physical offset. */
996 kernel_start
&= ~0xffffffff80000000ULL
;
997 add_memory_region(kernel_start
, kernel_size
, BOOT_MEM_RAM
);
998 #endif /* CONFIG_CRASH_DUMP */
1000 #ifdef CONFIG_CAVIUM_RESERVE32
1002 * Now that we've allocated the kernel memory it is safe to
1003 * free the reserved region. We free it here so that builtin
1004 * drivers can use the memory.
1006 if (octeon_reserve32_memory
)
1007 cvmx_bootmem_free_named("CAVIUM_RESERVE32");
1008 #endif /* CONFIG_CAVIUM_RESERVE32 */
1011 panic("Unable to allocate memory from "
1012 "cvmx_bootmem_phy_alloc\n");
1016 * Emit one character to the boot UART. Exported for use by the
1019 int prom_putchar(char c
)
1023 /* Spin until there is room */
1025 lsrval
= cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart
));
1026 } while ((lsrval
& 0x20) == 0);
1028 /* Write the byte */
1029 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart
), c
& 0xffull
);
1032 EXPORT_SYMBOL(prom_putchar
);
1034 void prom_free_prom_memory(void)
1036 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X
)) {
1037 /* Check for presence of Core-14449 fix. */
1043 asm volatile("# before" : : : "memory");
1047 ".set noreorder\n\t"
1050 "1:\tlw %0,-12($31)\n\t"
1052 : "=r" (insn
) : : "$31", "memory");
1054 if ((insn
>> 26) != 0x33)
1055 panic("No PREF instruction at Core-14449 probe point.");
1057 if (((insn
>> 16) & 0x1f) != 28)
1058 panic("Core-14449 WAR not in place (%04x).\n"
1059 "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn
);
1061 #ifdef CONFIG_CAVIUM_DECODE_RSL
1062 cvmx_interrupt_rsl_enable();
1064 /* Add an interrupt handler for general failures. */
1065 if (request_irq(OCTEON_IRQ_RML
, octeon_rlm_interrupt
, IRQF_SHARED
,
1066 "RML/RSL", octeon_rlm_interrupt
)) {
1067 panic("Unable to request_irq(OCTEON_IRQ_RML)");
1072 int octeon_prune_device_tree(void);
1074 extern const char __dtb_octeon_3xxx_begin
;
1075 extern const char __dtb_octeon_3xxx_end
;
1076 extern const char __dtb_octeon_68xx_begin
;
1077 extern const char __dtb_octeon_68xx_end
;
1078 void __init
device_tree_init(void)
1081 struct boot_param_header
*fdt
;
1084 if (octeon_bootinfo
->minor_version
>= 3 && octeon_bootinfo
->fdt_addr
) {
1085 fdt
= phys_to_virt(octeon_bootinfo
->fdt_addr
);
1086 if (fdt_check_header(fdt
))
1087 panic("Corrupt Device Tree passed to kernel.");
1088 dt_size
= be32_to_cpu(fdt
->totalsize
);
1090 } else if (OCTEON_IS_MODEL(OCTEON_CN68XX
)) {
1091 fdt
= (struct boot_param_header
*)&__dtb_octeon_68xx_begin
;
1092 dt_size
= &__dtb_octeon_68xx_end
- &__dtb_octeon_68xx_begin
;
1095 fdt
= (struct boot_param_header
*)&__dtb_octeon_3xxx_begin
;
1096 dt_size
= &__dtb_octeon_3xxx_end
- &__dtb_octeon_3xxx_begin
;
1100 /* Copy the default tree from init memory. */
1101 initial_boot_params
= early_init_dt_alloc_memory_arch(dt_size
, 8);
1102 if (initial_boot_params
== NULL
)
1103 panic("Could not allocate initial_boot_params\n");
1104 memcpy(initial_boot_params
, fdt
, dt_size
);
1107 octeon_prune_device_tree();
1108 pr_info("Using internal Device Tree.\n");
1110 pr_info("Using passed Device Tree.\n");
1112 unflatten_device_tree();
1115 static int __initdata disable_octeon_edac_p
;
1117 static int __init
disable_octeon_edac(char *str
)
1119 disable_octeon_edac_p
= 1;
1122 early_param("disable_octeon_edac", disable_octeon_edac
);
1124 static char *edac_device_names
[] = {
1129 static int __init
edac_devinit(void)
1131 struct platform_device
*dev
;
1136 if (disable_octeon_edac_p
)
1139 for (i
= 0; i
< ARRAY_SIZE(edac_device_names
); i
++) {
1140 name
= edac_device_names
[i
];
1141 dev
= platform_device_register_simple(name
, -1, NULL
, 0);
1143 pr_err("Registation of %s failed!\n", name
);
1148 num_lmc
= OCTEON_IS_MODEL(OCTEON_CN68XX
) ? 4 :
1149 (OCTEON_IS_MODEL(OCTEON_CN56XX
) ? 2 : 1);
1150 for (i
= 0; i
< num_lmc
; i
++) {
1151 dev
= platform_device_register_simple("octeon_lmc_edac",
1154 pr_err("Registation of octeon_lmc_edac %d failed!\n", i
);
1161 device_initcall(edac_devinit
);