2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2007 Cavium Networks
7 * Copyright (C) 2008, 2009 Wind River Systems
8 * written by Ralf Baechle <ralf@linux-mips.org>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/console.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
17 #include <linux/serial.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20 #include <linux/string.h> /* for memset */
21 #include <linux/tty.h>
22 #include <linux/time.h>
23 #include <linux/platform_device.h>
24 #include <linux/serial_core.h>
25 #include <linux/serial_8250.h>
26 #include <linux/of_fdt.h>
27 #include <linux/libfdt.h>
28 #include <linux/kexec.h>
30 #include <asm/processor.h>
31 #include <asm/reboot.h>
32 #include <asm/smp-ops.h>
33 #include <asm/irq_cpu.h>
34 #include <asm/mipsregs.h>
35 #include <asm/bootinfo.h>
36 #include <asm/sections.h>
39 #include <asm/octeon/octeon.h>
40 #include <asm/octeon/pci-octeon.h>
41 #include <asm/octeon/cvmx-mio-defs.h>
43 #ifdef CONFIG_CAVIUM_DECODE_RSL
44 extern void cvmx_interrupt_rsl_decode(void);
45 extern int __cvmx_interrupt_ecc_report_single_bit_errors
;
46 extern void cvmx_interrupt_rsl_enable(void);
49 extern struct plat_smp_ops octeon_smp_ops
;
52 extern void pci_console_init(const char *arg
);
55 static unsigned long long MAX_MEMORY
= 512ull << 20;
57 struct octeon_boot_descriptor
*octeon_boot_desc_ptr
;
59 struct cvmx_bootinfo
*octeon_bootinfo
;
60 EXPORT_SYMBOL(octeon_bootinfo
);
62 static unsigned long long RESERVE_LOW_MEM
= 0ull;
66 * Wait for relocation code is prepared and send
67 * secondary CPUs to spin until kernel is relocated.
69 static void octeon_kexec_smp_down(void *ignored
)
71 int cpu
= smp_processor_id();
74 set_cpu_online(cpu
, false);
75 while (!atomic_read(&kexec_ready_to_reboot
))
82 relocated_kexec_smp_wait(NULL
);
86 #define OCTEON_DDR0_BASE (0x0ULL)
87 #define OCTEON_DDR0_SIZE (0x010000000ULL)
88 #define OCTEON_DDR1_BASE (0x410000000ULL)
89 #define OCTEON_DDR1_SIZE (0x010000000ULL)
90 #define OCTEON_DDR2_BASE (0x020000000ULL)
91 #define OCTEON_DDR2_SIZE (0x3e0000000ULL)
92 #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
94 static struct kimage
*kimage_ptr
;
96 static void kexec_bootmem_init(uint64_t mem_size
, uint32_t low_reserved_bytes
)
99 struct cvmx_bootmem_desc
*bootmem_desc
;
101 bootmem_desc
= cvmx_bootmem_get_desc();
103 if (mem_size
> OCTEON_MAX_PHY_MEM_SIZE
) {
104 mem_size
= OCTEON_MAX_PHY_MEM_SIZE
;
105 pr_err("Error: requested memory too large,"
106 "truncating to maximum size\n");
109 bootmem_desc
->major_version
= CVMX_BOOTMEM_DESC_MAJ_VER
;
110 bootmem_desc
->minor_version
= CVMX_BOOTMEM_DESC_MIN_VER
;
112 addr
= (OCTEON_DDR0_BASE
+ RESERVE_LOW_MEM
+ low_reserved_bytes
);
113 bootmem_desc
->head_addr
= 0;
115 if (mem_size
<= OCTEON_DDR0_SIZE
) {
116 __cvmx_bootmem_phy_free(addr
,
117 mem_size
- RESERVE_LOW_MEM
-
118 low_reserved_bytes
, 0);
122 __cvmx_bootmem_phy_free(addr
,
123 OCTEON_DDR0_SIZE
- RESERVE_LOW_MEM
-
124 low_reserved_bytes
, 0);
126 mem_size
-= OCTEON_DDR0_SIZE
;
128 if (mem_size
> OCTEON_DDR1_SIZE
) {
129 __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE
, OCTEON_DDR1_SIZE
, 0);
130 __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE
,
131 mem_size
- OCTEON_DDR1_SIZE
, 0);
133 __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE
, mem_size
, 0);
136 static int octeon_kexec_prepare(struct kimage
*image
)
139 char *bootloader
= "kexec";
141 octeon_boot_desc_ptr
->argc
= 0;
142 for (i
= 0; i
< image
->nr_segments
; i
++) {
143 if (!strncmp(bootloader
, (char *)image
->segment
[i
].buf
,
144 strlen(bootloader
))) {
146 * convert command line string to array
147 * of parameters (as bootloader does).
150 char *str
= (char *)image
->segment
[i
].buf
;
151 char *ptr
= strchr(str
, ' ');
152 while (ptr
&& (OCTEON_ARGV_MAX_ARGS
> argc
)) {
155 offt
= (int)(ptr
- str
+ 1);
156 octeon_boot_desc_ptr
->argv
[argc
] =
157 image
->segment
[i
].mem
+ offt
;
160 ptr
= strchr(ptr
+ 1, ' ');
162 octeon_boot_desc_ptr
->argc
= argc
;
168 * Information about segments will be needed during pre-boot memory
175 static void octeon_generic_shutdown(void)
181 struct cvmx_bootmem_desc
*bootmem_desc
;
182 void *named_block_array_ptr
;
184 bootmem_desc
= cvmx_bootmem_get_desc();
185 named_block_array_ptr
=
186 cvmx_phys_to_ptr(bootmem_desc
->named_block_array_addr
);
189 /* disable watchdogs */
190 for_each_online_cpu(cpu
)
191 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu
)), 0);
193 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
195 if (kimage_ptr
!= kexec_crash_image
) {
196 memset(named_block_array_ptr
,
198 CVMX_BOOTMEM_NUM_NAMED_BLOCKS
*
199 sizeof(struct cvmx_bootmem_named_block_desc
));
201 * Mark all memory (except low 0x100000 bytes) as free.
202 * It is the same thing that bootloader does.
204 kexec_bootmem_init(octeon_bootinfo
->dram_size
*1024ULL*1024ULL,
207 * Allocate all segments to avoid their corruption during boot.
209 for (i
= 0; i
< kimage_ptr
->nr_segments
; i
++)
210 cvmx_bootmem_alloc_address(
211 kimage_ptr
->segment
[i
].memsz
+ 2*PAGE_SIZE
,
212 kimage_ptr
->segment
[i
].mem
- PAGE_SIZE
,
216 * Do not mark all memory as free. Free only named sections
217 * leaving the rest of memory unchanged.
219 struct cvmx_bootmem_named_block_desc
*ptr
=
220 (struct cvmx_bootmem_named_block_desc
*)
221 named_block_array_ptr
;
223 for (i
= 0; i
< bootmem_desc
->named_block_num_blocks
; i
++)
225 cvmx_bootmem_free_named(ptr
[i
].name
);
227 kexec_args
[2] = 1UL; /* running on octeon_main_processor */
228 kexec_args
[3] = (unsigned long)octeon_boot_desc_ptr
;
230 secondary_kexec_args
[2] = 0UL; /* running on secondary cpu */
231 secondary_kexec_args
[3] = (unsigned long)octeon_boot_desc_ptr
;
235 static void octeon_shutdown(void)
237 octeon_generic_shutdown();
239 smp_call_function(octeon_kexec_smp_down
, NULL
, 0);
241 while (num_online_cpus() > 1) {
248 static void octeon_crash_shutdown(struct pt_regs
*regs
)
250 octeon_generic_shutdown();
251 default_machine_crash_shutdown(regs
);
254 #endif /* CONFIG_KEXEC */
256 #ifdef CONFIG_CAVIUM_RESERVE32
257 uint64_t octeon_reserve32_memory
;
258 EXPORT_SYMBOL(octeon_reserve32_memory
);
262 /* crashkernel cmdline parameter is parsed _after_ memory setup
263 * we also parse it here (workaround for EHB5200) */
264 static uint64_t crashk_size
, crashk_base
;
267 static int octeon_uart
;
269 extern asmlinkage
void handle_int(void);
270 extern asmlinkage
void plat_irq_dispatch(void);
273 * Return non zero if we are currently running in the Octeon simulator
277 int octeon_is_simulation(void)
279 return octeon_bootinfo
->board_type
== CVMX_BOARD_TYPE_SIM
;
281 EXPORT_SYMBOL(octeon_is_simulation
);
284 * Return true if Octeon is in PCI Host mode. This means
285 * Linux can control the PCI bus.
287 * Returns Non zero if Octeon in host mode.
289 int octeon_is_pci_host(void)
292 return octeon_bootinfo
->config_flags
& CVMX_BOOTINFO_CFG_FLAG_PCI_HOST
;
299 * Get the clock rate of Octeon
301 * Returns Clock rate in HZ
303 uint64_t octeon_get_clock_rate(void)
305 struct cvmx_sysinfo
*sysinfo
= cvmx_sysinfo_get();
307 return sysinfo
->cpu_clock_hz
;
309 EXPORT_SYMBOL(octeon_get_clock_rate
);
311 static u64 octeon_io_clock_rate
;
313 u64
octeon_get_io_clock_rate(void)
315 return octeon_io_clock_rate
;
317 EXPORT_SYMBOL(octeon_get_io_clock_rate
);
321 * Write to the LCD display connected to the bootbus. This display
322 * exists on most Cavium evaluation boards. If it doesn't exist, then
323 * this function doesn't do anything.
325 * @s: String to write
327 void octeon_write_lcd(const char *s
)
329 if (octeon_bootinfo
->led_display_base_addr
) {
330 void __iomem
*lcd_address
=
331 ioremap_nocache(octeon_bootinfo
->led_display_base_addr
,
334 for (i
= 0; i
< 8; i
++, s
++) {
336 iowrite8(*s
, lcd_address
+ i
);
338 iowrite8(' ', lcd_address
+ i
);
340 iounmap(lcd_address
);
345 * Return the console uart passed by the bootloader
347 * Returns uart (0 or 1)
349 int octeon_get_boot_uart(void)
352 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
355 uart
= (octeon_boot_desc_ptr
->flags
& OCTEON_BL_FLAG_CONSOLE_UART1
) ?
362 * Get the coremask Linux was booted on.
366 int octeon_get_boot_coremask(void)
368 return octeon_boot_desc_ptr
->core_mask
;
372 * Check the hardware BIST results for a CPU
374 void octeon_check_cpu_bist(void)
376 const int coreid
= cvmx_get_core_num();
377 unsigned long long mask
;
378 unsigned long long bist_val
;
380 /* Check BIST results for COP0 registers */
381 mask
= 0x1f00000000ull
;
382 bist_val
= read_octeon_c0_icacheerr();
384 pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
387 bist_val
= read_octeon_c0_dcacheerr();
389 pr_err("Core%d L1 Dcache parity error: "
390 "CacheErr(dcache) = 0x%llx\n",
393 mask
= 0xfc00000000000000ull
;
394 bist_val
= read_c0_cvmmemctl();
396 pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
399 write_octeon_c0_dcacheerr(0);
405 * @command: Command to pass to the bootloader. Currently ignored.
407 static void octeon_restart(char *command
)
409 /* Disable all watchdogs before soft reset. They don't get cleared */
412 for_each_online_cpu(cpu
)
413 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu
)), 0);
415 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
420 cvmx_write_csr(CVMX_CIU_SOFT_RST
, 1);
425 * Permanently stop a core.
429 static void octeon_kill_core(void *arg
)
431 if (octeon_is_simulation())
432 /* A break instruction causes the simulator stop a core */
433 asm volatile ("break" ::: "memory");
436 /* Disable watchdog on this core. */
437 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
438 /* Spin in a low power mode. */
440 asm volatile ("wait" ::: "memory");
447 static void octeon_halt(void)
449 smp_call_function(octeon_kill_core
, NULL
, 0);
451 switch (octeon_bootinfo
->board_type
) {
452 case CVMX_BOARD_TYPE_NAO38
:
453 /* Driving a 1 to GPIO 12 shuts off this board */
454 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
455 cvmx_write_csr(CVMX_GPIO_TX_SET
, 0x1000);
458 octeon_write_lcd("PowerOff");
462 octeon_kill_core(NULL
);
466 * Handle all the error condition interrupts that might occur.
469 #ifdef CONFIG_CAVIUM_DECODE_RSL
470 static irqreturn_t
octeon_rlm_interrupt(int cpl
, void *dev_id
)
472 cvmx_interrupt_rsl_decode();
478 * Return a string representing the system type
482 const char *octeon_board_type_string(void)
484 static char name
[80];
485 sprintf(name
, "%s (%s)",
486 cvmx_board_type_to_string(octeon_bootinfo
->board_type
),
487 octeon_model_get_string(read_c0_prid()));
491 const char *get_system_type(void)
492 __attribute__ ((alias("octeon_board_type_string")));
494 void octeon_user_io_init(void)
496 union octeon_cvmemctl cvmmemctl
;
497 union cvmx_iob_fau_timeout fau_timeout
;
498 union cvmx_pow_nw_tim nm_tim
;
500 /* Get the current settings for CP0_CVMMEMCTL_REG */
501 cvmmemctl
.u64
= read_c0_cvmmemctl();
502 /* R/W If set, marked write-buffer entries time out the same
503 * as as other entries; if clear, marked write-buffer entries
504 * use the maximum timeout. */
505 cvmmemctl
.s
.dismarkwblongto
= 1;
506 /* R/W If set, a merged store does not clear the write-buffer
507 * entry timeout state. */
508 cvmmemctl
.s
.dismrgclrwbto
= 0;
509 /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
510 * word location for an IOBDMA. The other 8 bits come from the
511 * SCRADDR field of the IOBDMA. */
512 cvmmemctl
.s
.iobdmascrmsb
= 0;
513 /* R/W If set, SYNCWS and SYNCS only order marked stores; if
514 * clear, SYNCWS and SYNCS only order unmarked
515 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
517 cvmmemctl
.s
.syncwsmarked
= 0;
518 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
519 cvmmemctl
.s
.dissyncws
= 0;
520 /* R/W If set, no stall happens on write buffer full. */
521 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
))
522 cvmmemctl
.s
.diswbfst
= 1;
524 cvmmemctl
.s
.diswbfst
= 0;
525 /* R/W If set (and SX set), supervisor-level loads/stores can
526 * use XKPHYS addresses with <48>==0 */
527 cvmmemctl
.s
.xkmemenas
= 0;
529 /* R/W If set (and UX set), user-level loads/stores can use
530 * XKPHYS addresses with VA<48>==0 */
531 cvmmemctl
.s
.xkmemenau
= 0;
533 /* R/W If set (and SX set), supervisor-level loads/stores can
534 * use XKPHYS addresses with VA<48>==1 */
535 cvmmemctl
.s
.xkioenas
= 0;
537 /* R/W If set (and UX set), user-level loads/stores can use
538 * XKPHYS addresses with VA<48>==1 */
539 cvmmemctl
.s
.xkioenau
= 0;
541 /* R/W If set, all stores act as SYNCW (NOMERGE must be set
542 * when this is set) RW, reset to 0. */
543 cvmmemctl
.s
.allsyncw
= 0;
545 /* R/W If set, no stores merge, and all stores reach the
546 * coherent bus in order. */
547 cvmmemctl
.s
.nomerge
= 0;
548 /* R/W Selects the bit in the counter used for DID time-outs 0
549 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
550 * between 1x and 2x this interval. For example, with
551 * DIDTTO=3, expiration interval is between 16K and 32K. */
552 cvmmemctl
.s
.didtto
= 0;
553 /* R/W If set, the (mem) CSR clock never turns off. */
554 cvmmemctl
.s
.csrckalwys
= 0;
555 /* R/W If set, mclk never turns off. */
556 cvmmemctl
.s
.mclkalwys
= 0;
557 /* R/W Selects the bit in the counter used for write buffer
558 * flush time-outs (WBFLT+11) is the bit position in an
559 * internal counter used to determine expiration. The write
560 * buffer expires between 1x and 2x this interval. For
561 * example, with WBFLT = 0, a write buffer expires between 2K
562 * and 4K cycles after the write buffer entry is allocated. */
563 cvmmemctl
.s
.wbfltime
= 0;
564 /* R/W If set, do not put Istream in the L2 cache. */
565 cvmmemctl
.s
.istrnol2
= 0;
568 * R/W The write buffer threshold. As per erratum Core-14752
569 * for CN63XX, a sc/scd might fail if the write buffer is
570 * full. Lowering WBTHRESH greatly lowers the chances of the
571 * write buffer ever being full and triggering the erratum.
573 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X
))
574 cvmmemctl
.s
.wbthresh
= 4;
576 cvmmemctl
.s
.wbthresh
= 10;
578 /* R/W If set, CVMSEG is available for loads/stores in
579 * kernel/debug mode. */
580 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
581 cvmmemctl
.s
.cvmsegenak
= 1;
583 cvmmemctl
.s
.cvmsegenak
= 0;
585 /* R/W If set, CVMSEG is available for loads/stores in
586 * supervisor mode. */
587 cvmmemctl
.s
.cvmsegenas
= 0;
588 /* R/W If set, CVMSEG is available for loads/stores in user
590 cvmmemctl
.s
.cvmsegenau
= 0;
591 /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
592 * is max legal value. */
593 cvmmemctl
.s
.lmemsz
= CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
;
595 write_c0_cvmmemctl(cvmmemctl
.u64
);
597 if (smp_processor_id() == 0)
598 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
599 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
,
600 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
* 128);
602 /* Set a default for the hardware timeouts */
604 fau_timeout
.s
.tout_val
= 0xfff;
605 /* Disable tagwait FAU timeout */
606 fau_timeout
.s
.tout_enb
= 0;
607 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT
, fau_timeout
.u64
);
612 cvmx_write_csr(CVMX_POW_NW_TIM
, nm_tim
.u64
);
614 write_octeon_c0_icacheerr(0);
615 write_c0_derraddr1(0);
619 * Early entry point for arch setup
621 void __init
prom_init(void)
623 struct cvmx_sysinfo
*sysinfo
;
628 #ifdef CONFIG_CAVIUM_RESERVE32
632 * The bootloader passes a pointer to the boot descriptor in
633 * $a3, this is available as fw_arg3.
635 octeon_boot_desc_ptr
= (struct octeon_boot_descriptor
*)fw_arg3
;
637 cvmx_phys_to_ptr(octeon_boot_desc_ptr
->cvmx_desc_vaddr
);
638 cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo
->phy_mem_desc_addr
));
640 sysinfo
= cvmx_sysinfo_get();
641 memset(sysinfo
, 0, sizeof(*sysinfo
));
642 sysinfo
->system_dram_size
= octeon_bootinfo
->dram_size
<< 20;
643 sysinfo
->phy_mem_desc_ptr
=
644 cvmx_phys_to_ptr(octeon_bootinfo
->phy_mem_desc_addr
);
645 sysinfo
->core_mask
= octeon_bootinfo
->core_mask
;
646 sysinfo
->exception_base_addr
= octeon_bootinfo
->exception_base_addr
;
647 sysinfo
->cpu_clock_hz
= octeon_bootinfo
->eclock_hz
;
648 sysinfo
->dram_data_rate_hz
= octeon_bootinfo
->dclock_hz
* 2;
649 sysinfo
->board_type
= octeon_bootinfo
->board_type
;
650 sysinfo
->board_rev_major
= octeon_bootinfo
->board_rev_major
;
651 sysinfo
->board_rev_minor
= octeon_bootinfo
->board_rev_minor
;
652 memcpy(sysinfo
->mac_addr_base
, octeon_bootinfo
->mac_addr_base
,
653 sizeof(sysinfo
->mac_addr_base
));
654 sysinfo
->mac_addr_count
= octeon_bootinfo
->mac_addr_count
;
655 memcpy(sysinfo
->board_serial_number
,
656 octeon_bootinfo
->board_serial_number
,
657 sizeof(sysinfo
->board_serial_number
));
658 sysinfo
->compact_flash_common_base_addr
=
659 octeon_bootinfo
->compact_flash_common_base_addr
;
660 sysinfo
->compact_flash_attribute_base_addr
=
661 octeon_bootinfo
->compact_flash_attribute_base_addr
;
662 sysinfo
->led_display_base_addr
= octeon_bootinfo
->led_display_base_addr
;
663 sysinfo
->dfa_ref_clock_hz
= octeon_bootinfo
->dfa_ref_clock_hz
;
664 sysinfo
->bootloader_config_flags
= octeon_bootinfo
->config_flags
;
666 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
667 /* I/O clock runs at a different rate than the CPU. */
668 union cvmx_mio_rst_boot rst_boot
;
669 rst_boot
.u64
= cvmx_read_csr(CVMX_MIO_RST_BOOT
);
670 octeon_io_clock_rate
= 50000000 * rst_boot
.s
.pnr_mul
;
672 octeon_io_clock_rate
= sysinfo
->cpu_clock_hz
;
676 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
677 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
679 if (!octeon_is_simulation() &&
680 octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER
)) {
681 cvmx_write_csr(CVMX_LED_EN
, 0);
682 cvmx_write_csr(CVMX_LED_PRT
, 0);
683 cvmx_write_csr(CVMX_LED_DBG
, 0);
684 cvmx_write_csr(CVMX_LED_PRT_FMT
, 0);
685 cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
686 cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
687 cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
688 cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
689 cvmx_write_csr(CVMX_LED_EN
, 1);
691 #ifdef CONFIG_CAVIUM_RESERVE32
693 * We need to temporarily allocate all memory in the reserve32
694 * region. This makes sure the kernel doesn't allocate this
695 * memory when it is getting memory from the
696 * bootloader. Later, after the memory allocations are
697 * complete, the reserve32 will be freed.
699 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
700 * is in case we later use hugetlb entries with it.
702 addr
= cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32
<< 20,
704 "CAVIUM_RESERVE32", 0);
706 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
708 octeon_reserve32_memory
= addr
;
711 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
712 if (cvmx_read_csr(CVMX_L2D_FUS3
) & (3ull << 34)) {
713 pr_info("Skipping L2 locking due to reduced L2 cache size\n");
715 uint32_t ebase
= read_c0_ebase() & 0x3ffff000;
716 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
718 cvmx_l2c_lock_mem_region(ebase
, 0x100);
720 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
721 /* General exception */
722 cvmx_l2c_lock_mem_region(ebase
+ 0x180, 0x80);
724 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
725 /* Interrupt handler */
726 cvmx_l2c_lock_mem_region(ebase
+ 0x200, 0x80);
728 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
729 cvmx_l2c_lock_mem_region(__pa_symbol(handle_int
), 0x100);
730 cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch
), 0x80);
732 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
733 cvmx_l2c_lock_mem_region(__pa_symbol(memcpy
), 0x480);
738 octeon_check_cpu_bist();
740 octeon_uart
= octeon_get_boot_uart();
743 octeon_write_lcd("LinuxSMP");
745 octeon_write_lcd("Linux");
748 #ifdef CONFIG_CAVIUM_GDB
750 * When debugging the linux kernel, force the cores to enter
751 * the debug exception handler to break in.
753 if (octeon_get_boot_debug_flag()) {
754 cvmx_write_csr(CVMX_CIU_DINT
, 1 << cvmx_get_core_num());
755 cvmx_read_csr(CVMX_CIU_DINT
);
759 octeon_setup_delays();
762 * BIST should always be enabled when doing a soft reset. L2
763 * Cache locking for instance is not cleared unless BIST is
764 * enabled. Unfortunately due to a chip errata G-200 for
765 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
767 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2
) ||
768 OCTEON_IS_MODEL(OCTEON_CN31XX
))
769 cvmx_write_csr(CVMX_CIU_SOFT_BIST
, 0);
771 cvmx_write_csr(CVMX_CIU_SOFT_BIST
, 1);
773 /* Default to 64MB in the simulator to speed things up */
774 if (octeon_is_simulation())
775 MAX_MEMORY
= 64ull << 20;
777 arg
= strstr(arcs_cmdline
, "mem=");
779 MAX_MEMORY
= memparse(arg
+ 4, &p
);
781 MAX_MEMORY
= 32ull << 30;
783 RESERVE_LOW_MEM
= memparse(p
+ 1, &p
);
787 argc
= octeon_boot_desc_ptr
->argc
;
788 for (i
= 0; i
< argc
; i
++) {
790 cvmx_phys_to_ptr(octeon_boot_desc_ptr
->argv
[i
]);
791 if ((strncmp(arg
, "MEM=", 4) == 0) ||
792 (strncmp(arg
, "mem=", 4) == 0)) {
793 MAX_MEMORY
= memparse(arg
+ 4, &p
);
795 MAX_MEMORY
= 32ull << 30;
797 RESERVE_LOW_MEM
= memparse(p
+ 1, &p
);
798 } else if (strcmp(arg
, "ecc_verbose") == 0) {
799 #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
800 __cvmx_interrupt_ecc_report_single_bit_errors
= 1;
801 pr_notice("Reporting of single bit ECC errors is "
805 } else if (strncmp(arg
, "crashkernel=", 12) == 0) {
806 crashk_size
= memparse(arg
+12, &p
);
808 crashk_base
= memparse(p
+1, &p
);
809 strcat(arcs_cmdline
, " ");
810 strcat(arcs_cmdline
, arg
);
812 * To do: switch parsing to new style, something like:
813 * parse_crashkernel(arg, sysinfo->system_dram_size,
814 * &crashk_size, &crashk_base);
817 } else if (strlen(arcs_cmdline
) + strlen(arg
) + 1 <
818 sizeof(arcs_cmdline
) - 1) {
819 strcat(arcs_cmdline
, " ");
820 strcat(arcs_cmdline
, arg
);
824 if (strstr(arcs_cmdline
, "console=") == NULL
) {
825 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
826 strcat(arcs_cmdline
, " console=ttyS0,115200");
828 if (octeon_uart
== 1)
829 strcat(arcs_cmdline
, " console=ttyS1,115200");
831 strcat(arcs_cmdline
, " console=ttyS0,115200");
835 if (octeon_is_simulation()) {
837 * The simulator uses a mtdram device pre filled with
838 * the filesystem. Also specify the calibration delay
839 * to avoid calculating it every time.
841 strcat(arcs_cmdline
, " rw root=1f00 slram=root,0x40000000,+1073741824");
844 mips_hpt_frequency
= octeon_get_clock_rate();
846 octeon_init_cvmcount();
848 _machine_restart
= octeon_restart
;
849 _machine_halt
= octeon_halt
;
852 _machine_kexec_shutdown
= octeon_shutdown
;
853 _machine_crash_shutdown
= octeon_crash_shutdown
;
854 _machine_kexec_prepare
= octeon_kexec_prepare
;
857 octeon_user_io_init();
858 register_smp_ops(&octeon_smp_ops
);
861 /* Exclude a single page from the regions obtained in plat_mem_setup. */
862 #ifndef CONFIG_CRASH_DUMP
863 static __init
void memory_exclude_page(u64 addr
, u64
*mem
, u64
*size
)
865 if (addr
> *mem
&& addr
< *mem
+ *size
) {
866 u64 inc
= addr
- *mem
;
867 add_memory_region(*mem
, inc
, BOOT_MEM_RAM
);
872 if (addr
== *mem
&& *size
> PAGE_SIZE
) {
877 #endif /* CONFIG_CRASH_DUMP */
879 void __init
plat_mem_setup(void)
881 uint64_t mem_alloc_size
;
884 #ifndef CONFIG_CRASH_DUMP
886 uint64_t kernel_start
;
887 uint64_t kernel_size
;
894 * The Mips memory init uses the first memory location for
895 * some memory vectors. When SPARSEMEM is in use, it doesn't
896 * verify that the size is big enough for the final
897 * vectors. Making the smallest chuck 4MB seems to be enough
898 * to consistently work.
900 mem_alloc_size
= 4 << 20;
901 if (mem_alloc_size
> MAX_MEMORY
)
902 mem_alloc_size
= MAX_MEMORY
;
904 /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
905 #ifdef CONFIG_CRASH_DUMP
906 add_memory_region(RESERVE_LOW_MEM
, MAX_MEMORY
, BOOT_MEM_RAM
);
910 if (crashk_size
> 0) {
911 add_memory_region(crashk_base
, crashk_size
, BOOT_MEM_RAM
);
912 crashk_end
= crashk_base
+ crashk_size
;
916 * When allocating memory, we want incrementing addresses from
917 * bootmem_alloc so the code in add_memory_region can merge
918 * regions next to each other.
921 while ((boot_mem_map
.nr_map
< BOOT_MEM_MAP_MAX
)
922 && (total
< MAX_MEMORY
)) {
923 memory
= cvmx_bootmem_phy_alloc(mem_alloc_size
,
924 __pa_symbol(&__init_end
), -1,
926 CVMX_BOOTMEM_FLAG_NO_LOCKING
);
928 u64 size
= mem_alloc_size
;
934 * exclude a page at the beginning and end of
935 * the 256MB PCIe 'hole' so the kernel will not
936 * try to allocate multi-page buffers that
937 * span the discontinuity.
939 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE
,
941 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE
+
942 CVMX_PCIE_BAR1_PHYS_SIZE
,
945 end
= memory
+ mem_alloc_size
;
948 * This function automatically merges address regions
949 * next to each other if they are received in
952 if (memory
< crashk_base
&& end
> crashk_end
) {
953 /* region is fully in */
954 add_memory_region(memory
,
955 crashk_base
- memory
,
957 total
+= crashk_base
- memory
;
958 add_memory_region(crashk_end
,
961 total
+= end
- crashk_end
;
965 if (memory
>= crashk_base
&& end
<= crashk_end
)
967 * Entire memory region is within the new
968 * kernel's memory, ignore it.
972 if (memory
> crashk_base
&& memory
< crashk_end
&&
975 * Overlap with the beginning of the region,
976 * reserve the beginning.
978 mem_alloc_size
-= crashk_end
- memory
;
980 } else if (memory
< crashk_base
&& end
> crashk_base
&&
983 * Overlap with the beginning of the region,
986 mem_alloc_size
-= end
- crashk_base
;
988 add_memory_region(memory
, mem_alloc_size
, BOOT_MEM_RAM
);
989 total
+= mem_alloc_size
;
990 /* Recovering mem_alloc_size */
991 mem_alloc_size
= 4 << 20;
996 cvmx_bootmem_unlock();
997 /* Add the memory region for the kernel. */
998 kernel_start
= (unsigned long) _text
;
999 kernel_size
= ALIGN(_end
- _text
, 0x100000);
1001 /* Adjust for physical offset. */
1002 kernel_start
&= ~0xffffffff80000000ULL
;
1003 add_memory_region(kernel_start
, kernel_size
, BOOT_MEM_RAM
);
1004 #endif /* CONFIG_CRASH_DUMP */
1006 #ifdef CONFIG_CAVIUM_RESERVE32
1008 * Now that we've allocated the kernel memory it is safe to
1009 * free the reserved region. We free it here so that builtin
1010 * drivers can use the memory.
1012 if (octeon_reserve32_memory
)
1013 cvmx_bootmem_free_named("CAVIUM_RESERVE32");
1014 #endif /* CONFIG_CAVIUM_RESERVE32 */
1017 panic("Unable to allocate memory from "
1018 "cvmx_bootmem_phy_alloc\n");
1022 * Emit one character to the boot UART. Exported for use by the
1025 int prom_putchar(char c
)
1029 /* Spin until there is room */
1031 lsrval
= cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart
));
1032 } while ((lsrval
& 0x20) == 0);
1034 /* Write the byte */
1035 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart
), c
& 0xffull
);
1038 EXPORT_SYMBOL(prom_putchar
);
1040 void prom_free_prom_memory(void)
1042 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X
)) {
1043 /* Check for presence of Core-14449 fix. */
1049 asm volatile("# before" : : : "memory");
1053 ".set noreorder\n\t"
1056 "1:\tlw %0,-12($31)\n\t"
1058 : "=r" (insn
) : : "$31", "memory");
1060 if ((insn
>> 26) != 0x33)
1061 panic("No PREF instruction at Core-14449 probe point.");
1063 if (((insn
>> 16) & 0x1f) != 28)
1064 panic("Core-14449 WAR not in place (%04x).\n"
1065 "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn
);
1067 #ifdef CONFIG_CAVIUM_DECODE_RSL
1068 cvmx_interrupt_rsl_enable();
1070 /* Add an interrupt handler for general failures. */
1071 if (request_irq(OCTEON_IRQ_RML
, octeon_rlm_interrupt
, IRQF_SHARED
,
1072 "RML/RSL", octeon_rlm_interrupt
)) {
1073 panic("Unable to request_irq(OCTEON_IRQ_RML)");
1078 int octeon_prune_device_tree(void);
1080 extern const char __dtb_octeon_3xxx_begin
;
1081 extern const char __dtb_octeon_3xxx_end
;
1082 extern const char __dtb_octeon_68xx_begin
;
1083 extern const char __dtb_octeon_68xx_end
;
1084 void __init
device_tree_init(void)
1087 struct boot_param_header
*fdt
;
1090 if (octeon_bootinfo
->minor_version
>= 3 && octeon_bootinfo
->fdt_addr
) {
1091 fdt
= phys_to_virt(octeon_bootinfo
->fdt_addr
);
1092 if (fdt_check_header(fdt
))
1093 panic("Corrupt Device Tree passed to kernel.");
1094 dt_size
= be32_to_cpu(fdt
->totalsize
);
1096 } else if (OCTEON_IS_MODEL(OCTEON_CN68XX
)) {
1097 fdt
= (struct boot_param_header
*)&__dtb_octeon_68xx_begin
;
1098 dt_size
= &__dtb_octeon_68xx_end
- &__dtb_octeon_68xx_begin
;
1101 fdt
= (struct boot_param_header
*)&__dtb_octeon_3xxx_begin
;
1102 dt_size
= &__dtb_octeon_3xxx_end
- &__dtb_octeon_3xxx_begin
;
1106 /* Copy the default tree from init memory. */
1107 initial_boot_params
= early_init_dt_alloc_memory_arch(dt_size
, 8);
1108 if (initial_boot_params
== NULL
)
1109 panic("Could not allocate initial_boot_params\n");
1110 memcpy(initial_boot_params
, fdt
, dt_size
);
1113 octeon_prune_device_tree();
1114 pr_info("Using internal Device Tree.\n");
1116 pr_info("Using passed Device Tree.\n");
1118 unflatten_device_tree();
1121 static int __initdata disable_octeon_edac_p
;
1123 static int __init
disable_octeon_edac(char *str
)
1125 disable_octeon_edac_p
= 1;
1128 early_param("disable_octeon_edac", disable_octeon_edac
);
1130 static char *edac_device_names
[] = {
1135 static int __init
edac_devinit(void)
1137 struct platform_device
*dev
;
1142 if (disable_octeon_edac_p
)
1145 for (i
= 0; i
< ARRAY_SIZE(edac_device_names
); i
++) {
1146 name
= edac_device_names
[i
];
1147 dev
= platform_device_register_simple(name
, -1, NULL
, 0);
1149 pr_err("Registation of %s failed!\n", name
);
1154 num_lmc
= OCTEON_IS_MODEL(OCTEON_CN68XX
) ? 4 :
1155 (OCTEON_IS_MODEL(OCTEON_CN56XX
) ? 2 : 1);
1156 for (i
= 0; i
< num_lmc
; i
++) {
1157 dev
= platform_device_register_simple("octeon_lmc_edac",
1160 pr_err("Registation of octeon_lmc_edac %d failed!\n", i
);
1167 device_initcall(edac_devinit
);