limit of out of order packets and receive workqueue size
[cor_2_6_31.git] / arch / mips / cavium-octeon / setup.c
blobda559249cc2fae44161c5322ce7cfd61b3cac328
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2004-2007 Cavium Networks
7 * Copyright (C) 2008 Wind River Systems
8 */
9 #include <linux/init.h>
10 #include <linux/console.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/serial.h>
16 #include <linux/smp.h>
17 #include <linux/types.h>
18 #include <linux/string.h> /* for memset */
19 #include <linux/tty.h>
20 #include <linux/time.h>
21 #include <linux/platform_device.h>
22 #include <linux/serial_core.h>
23 #include <linux/serial_8250.h>
25 #include <asm/processor.h>
26 #include <asm/reboot.h>
27 #include <asm/smp-ops.h>
28 #include <asm/system.h>
29 #include <asm/irq_cpu.h>
30 #include <asm/mipsregs.h>
31 #include <asm/bootinfo.h>
32 #include <asm/sections.h>
33 #include <asm/time.h>
35 #include <asm/octeon/octeon.h>
37 #ifdef CONFIG_CAVIUM_DECODE_RSL
38 extern void cvmx_interrupt_rsl_decode(void);
39 extern int __cvmx_interrupt_ecc_report_single_bit_errors;
40 extern void cvmx_interrupt_rsl_enable(void);
41 #endif
43 extern struct plat_smp_ops octeon_smp_ops;
45 #ifdef CONFIG_PCI
46 extern void pci_console_init(const char *arg);
47 #endif
49 #ifdef CONFIG_CAVIUM_RESERVE32
50 extern uint64_t octeon_reserve32_memory;
51 #endif
52 static unsigned long long MAX_MEMORY = 512ull << 20;
54 struct octeon_boot_descriptor *octeon_boot_desc_ptr;
56 struct cvmx_bootinfo *octeon_bootinfo;
57 EXPORT_SYMBOL(octeon_bootinfo);
59 #ifdef CONFIG_CAVIUM_RESERVE32
60 uint64_t octeon_reserve32_memory;
61 EXPORT_SYMBOL(octeon_reserve32_memory);
62 #endif
64 static int octeon_uart;
66 extern asmlinkage void handle_int(void);
67 extern asmlinkage void plat_irq_dispatch(void);
69 /**
70 * Return non zero if we are currently running in the Octeon simulator
72 * Returns
74 int octeon_is_simulation(void)
76 return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
78 EXPORT_SYMBOL(octeon_is_simulation);
80 /**
81 * Return true if Octeon is in PCI Host mode. This means
82 * Linux can control the PCI bus.
84 * Returns Non zero if Octeon in host mode.
86 int octeon_is_pci_host(void)
88 #ifdef CONFIG_PCI
89 return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
90 #else
91 return 0;
92 #endif
95 /**
96 * Get the clock rate of Octeon
98 * Returns Clock rate in HZ
100 uint64_t octeon_get_clock_rate(void)
102 if (octeon_is_simulation())
103 octeon_bootinfo->eclock_hz = 6000000;
104 return octeon_bootinfo->eclock_hz;
106 EXPORT_SYMBOL(octeon_get_clock_rate);
109 * Write to the LCD display connected to the bootbus. This display
110 * exists on most Cavium evaluation boards. If it doesn't exist, then
111 * this function doesn't do anything.
113 * @s: String to write
115 void octeon_write_lcd(const char *s)
117 if (octeon_bootinfo->led_display_base_addr) {
118 void __iomem *lcd_address =
119 ioremap_nocache(octeon_bootinfo->led_display_base_addr,
121 int i;
122 for (i = 0; i < 8; i++, s++) {
123 if (*s)
124 iowrite8(*s, lcd_address + i);
125 else
126 iowrite8(' ', lcd_address + i);
128 iounmap(lcd_address);
133 * Return the console uart passed by the bootloader
135 * Returns uart (0 or 1)
137 int octeon_get_boot_uart(void)
139 int uart;
140 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
141 uart = 1;
142 #else
143 uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
144 1 : 0;
145 #endif
146 return uart;
150 * Get the coremask Linux was booted on.
152 * Returns Core mask
154 int octeon_get_boot_coremask(void)
156 return octeon_boot_desc_ptr->core_mask;
160 * Check the hardware BIST results for a CPU
162 void octeon_check_cpu_bist(void)
164 const int coreid = cvmx_get_core_num();
165 unsigned long long mask;
166 unsigned long long bist_val;
168 /* Check BIST results for COP0 registers */
169 mask = 0x1f00000000ull;
170 bist_val = read_octeon_c0_icacheerr();
171 if (bist_val & mask)
172 pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
173 coreid, bist_val);
175 bist_val = read_octeon_c0_dcacheerr();
176 if (bist_val & 1)
177 pr_err("Core%d L1 Dcache parity error: "
178 "CacheErr(dcache) = 0x%llx\n",
179 coreid, bist_val);
181 mask = 0xfc00000000000000ull;
182 bist_val = read_c0_cvmmemctl();
183 if (bist_val & mask)
184 pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
185 coreid, bist_val);
187 write_octeon_c0_dcacheerr(0);
190 #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
192 * Called on every core to setup the wired tlb entry needed
193 * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
196 static void octeon_hal_setup_per_cpu_reserved32(void *unused)
199 * The config has selected to wire the reserve32 memory for all
200 * userspace applications. We need to put a wired TLB entry in for each
201 * 512MB of reserve32 memory. We only handle double 256MB pages here,
202 * so reserve32 must be multiple of 512MB.
204 uint32_t size = CONFIG_CAVIUM_RESERVE32;
205 uint32_t entrylo0 =
206 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
207 uint32_t entrylo1 = entrylo0 + (256 << 14);
208 uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
209 while (size >= 512) {
210 #if 0
211 pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
212 smp_processor_id(), entryhi);
213 #endif
214 add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
215 entrylo0 += 512 << 14;
216 entrylo1 += 512 << 14;
217 entryhi += 512 << 20;
218 size -= 512;
221 #endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
224 * Called to release the named block which was used to made sure
225 * that nobody used the memory for something else during
226 * init. Now we'll free it so userspace apps can use this
227 * memory region with bootmem_alloc.
229 * This function is called only once from prom_free_prom_memory().
231 void octeon_hal_setup_reserved32(void)
233 #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
234 on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
235 #endif
239 * Reboot Octeon
241 * @command: Command to pass to the bootloader. Currently ignored.
243 static void octeon_restart(char *command)
245 /* Disable all watchdogs before soft reset. They don't get cleared */
246 #ifdef CONFIG_SMP
247 int cpu;
248 for_each_online_cpu(cpu)
249 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
250 #else
251 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
252 #endif
254 mb();
255 while (1)
256 cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
261 * Permanently stop a core.
263 * @arg: Ignored.
265 static void octeon_kill_core(void *arg)
267 mb();
268 if (octeon_is_simulation()) {
269 /* The simulator needs the watchdog to stop for dead cores */
270 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
271 /* A break instruction causes the simulator stop a core */
272 asm volatile ("sync\nbreak");
278 * Halt the system
280 static void octeon_halt(void)
282 smp_call_function(octeon_kill_core, NULL, 0);
284 switch (octeon_bootinfo->board_type) {
285 case CVMX_BOARD_TYPE_NAO38:
286 /* Driving a 1 to GPIO 12 shuts off this board */
287 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
288 cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
289 break;
290 default:
291 octeon_write_lcd("PowerOff");
292 break;
295 octeon_kill_core(NULL);
298 #if 0
300 * Platform time init specifics.
301 * Returns
303 void __init plat_time_init(void)
305 /* Nothing special here, but we are required to have one */
308 #endif
311 * Handle all the error condition interrupts that might occur.
314 #ifdef CONFIG_CAVIUM_DECODE_RSL
315 static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
317 cvmx_interrupt_rsl_decode();
318 return IRQ_HANDLED;
320 #endif
323 * Return a string representing the system type
325 * Returns
327 const char *octeon_board_type_string(void)
329 static char name[80];
330 sprintf(name, "%s (%s)",
331 cvmx_board_type_to_string(octeon_bootinfo->board_type),
332 octeon_model_get_string(read_c0_prid()));
333 return name;
336 const char *get_system_type(void)
337 __attribute__ ((alias("octeon_board_type_string")));
339 void octeon_user_io_init(void)
341 union octeon_cvmemctl cvmmemctl;
342 union cvmx_iob_fau_timeout fau_timeout;
343 union cvmx_pow_nw_tim nm_tim;
344 uint64_t cvmctl;
346 /* Get the current settings for CP0_CVMMEMCTL_REG */
347 cvmmemctl.u64 = read_c0_cvmmemctl();
348 /* R/W If set, marked write-buffer entries time out the same
349 * as as other entries; if clear, marked write-buffer entries
350 * use the maximum timeout. */
351 cvmmemctl.s.dismarkwblongto = 1;
352 /* R/W If set, a merged store does not clear the write-buffer
353 * entry timeout state. */
354 cvmmemctl.s.dismrgclrwbto = 0;
355 /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
356 * word location for an IOBDMA. The other 8 bits come from the
357 * SCRADDR field of the IOBDMA. */
358 cvmmemctl.s.iobdmascrmsb = 0;
359 /* R/W If set, SYNCWS and SYNCS only order marked stores; if
360 * clear, SYNCWS and SYNCS only order unmarked
361 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
362 * set. */
363 cvmmemctl.s.syncwsmarked = 0;
364 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
365 cvmmemctl.s.dissyncws = 0;
366 /* R/W If set, no stall happens on write buffer full. */
367 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
368 cvmmemctl.s.diswbfst = 1;
369 else
370 cvmmemctl.s.diswbfst = 0;
371 /* R/W If set (and SX set), supervisor-level loads/stores can
372 * use XKPHYS addresses with <48>==0 */
373 cvmmemctl.s.xkmemenas = 0;
375 /* R/W If set (and UX set), user-level loads/stores can use
376 * XKPHYS addresses with VA<48>==0 */
377 cvmmemctl.s.xkmemenau = 0;
379 /* R/W If set (and SX set), supervisor-level loads/stores can
380 * use XKPHYS addresses with VA<48>==1 */
381 cvmmemctl.s.xkioenas = 0;
383 /* R/W If set (and UX set), user-level loads/stores can use
384 * XKPHYS addresses with VA<48>==1 */
385 cvmmemctl.s.xkioenau = 0;
387 /* R/W If set, all stores act as SYNCW (NOMERGE must be set
388 * when this is set) RW, reset to 0. */
389 cvmmemctl.s.allsyncw = 0;
391 /* R/W If set, no stores merge, and all stores reach the
392 * coherent bus in order. */
393 cvmmemctl.s.nomerge = 0;
394 /* R/W Selects the bit in the counter used for DID time-outs 0
395 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
396 * between 1x and 2x this interval. For example, with
397 * DIDTTO=3, expiration interval is between 16K and 32K. */
398 cvmmemctl.s.didtto = 0;
399 /* R/W If set, the (mem) CSR clock never turns off. */
400 cvmmemctl.s.csrckalwys = 0;
401 /* R/W If set, mclk never turns off. */
402 cvmmemctl.s.mclkalwys = 0;
403 /* R/W Selects the bit in the counter used for write buffer
404 * flush time-outs (WBFLT+11) is the bit position in an
405 * internal counter used to determine expiration. The write
406 * buffer expires between 1x and 2x this interval. For
407 * example, with WBFLT = 0, a write buffer expires between 2K
408 * and 4K cycles after the write buffer entry is allocated. */
409 cvmmemctl.s.wbfltime = 0;
410 /* R/W If set, do not put Istream in the L2 cache. */
411 cvmmemctl.s.istrnol2 = 0;
412 /* R/W The write buffer threshold. */
413 cvmmemctl.s.wbthresh = 10;
414 /* R/W If set, CVMSEG is available for loads/stores in
415 * kernel/debug mode. */
416 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
417 cvmmemctl.s.cvmsegenak = 1;
418 #else
419 cvmmemctl.s.cvmsegenak = 0;
420 #endif
421 /* R/W If set, CVMSEG is available for loads/stores in
422 * supervisor mode. */
423 cvmmemctl.s.cvmsegenas = 0;
424 /* R/W If set, CVMSEG is available for loads/stores in user
425 * mode. */
426 cvmmemctl.s.cvmsegenau = 0;
427 /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
428 * is max legal value. */
429 cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
432 if (smp_processor_id() == 0)
433 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
434 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
435 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
437 write_c0_cvmmemctl(cvmmemctl.u64);
439 /* Move the performance counter interrupts to IRQ 6 */
440 cvmctl = read_c0_cvmctl();
441 cvmctl &= ~(7 << 7);
442 cvmctl |= 6 << 7;
443 write_c0_cvmctl(cvmctl);
445 /* Set a default for the hardware timeouts */
446 fau_timeout.u64 = 0;
447 fau_timeout.s.tout_val = 0xfff;
448 /* Disable tagwait FAU timeout */
449 fau_timeout.s.tout_enb = 0;
450 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
452 nm_tim.u64 = 0;
453 /* 4096 cycles */
454 nm_tim.s.nw_tim = 3;
455 cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
457 write_octeon_c0_icacheerr(0);
458 write_c0_derraddr1(0);
462 * Early entry point for arch setup
464 void __init prom_init(void)
466 struct cvmx_sysinfo *sysinfo;
467 const int coreid = cvmx_get_core_num();
468 int i;
469 int argc;
470 struct uart_port octeon_port;
471 #ifdef CONFIG_CAVIUM_RESERVE32
472 int64_t addr = -1;
473 #endif
475 * The bootloader passes a pointer to the boot descriptor in
476 * $a3, this is available as fw_arg3.
478 octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
479 octeon_bootinfo =
480 cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
481 cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
484 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
485 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
487 if (!octeon_is_simulation() &&
488 octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
489 cvmx_write_csr(CVMX_LED_EN, 0);
490 cvmx_write_csr(CVMX_LED_PRT, 0);
491 cvmx_write_csr(CVMX_LED_DBG, 0);
492 cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
493 cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
494 cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
495 cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
496 cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
497 cvmx_write_csr(CVMX_LED_EN, 1);
499 #ifdef CONFIG_CAVIUM_RESERVE32
501 * We need to temporarily allocate all memory in the reserve32
502 * region. This makes sure the kernel doesn't allocate this
503 * memory when it is getting memory from the
504 * bootloader. Later, after the memory allocations are
505 * complete, the reserve32 will be freed.
507 #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
508 if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
509 pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
510 "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
511 "is set\n");
512 else
513 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
514 0, 0, 512 << 20,
515 "CAVIUM_RESERVE32", 0);
516 #else
518 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
519 * is in case we later use hugetlb entries with it.
521 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
522 0, 0, 2 << 20,
523 "CAVIUM_RESERVE32", 0);
524 #endif
525 if (addr < 0)
526 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
527 else
528 octeon_reserve32_memory = addr;
529 #endif
531 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
532 if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
533 pr_info("Skipping L2 locking due to reduced L2 cache size\n");
534 } else {
535 uint32_t ebase = read_c0_ebase() & 0x3ffff000;
536 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
537 /* TLB refill */
538 cvmx_l2c_lock_mem_region(ebase, 0x100);
539 #endif
540 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
541 /* General exception */
542 cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
543 #endif
544 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
545 /* Interrupt handler */
546 cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
547 #endif
548 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
549 cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
550 cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
551 #endif
552 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
553 cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
554 #endif
556 #endif
558 sysinfo = cvmx_sysinfo_get();
559 memset(sysinfo, 0, sizeof(*sysinfo));
560 sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
561 sysinfo->phy_mem_desc_ptr =
562 cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
563 sysinfo->core_mask = octeon_bootinfo->core_mask;
564 sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
565 sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
566 sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
567 sysinfo->board_type = octeon_bootinfo->board_type;
568 sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
569 sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
570 memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
571 sizeof(sysinfo->mac_addr_base));
572 sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
573 memcpy(sysinfo->board_serial_number,
574 octeon_bootinfo->board_serial_number,
575 sizeof(sysinfo->board_serial_number));
576 sysinfo->compact_flash_common_base_addr =
577 octeon_bootinfo->compact_flash_common_base_addr;
578 sysinfo->compact_flash_attribute_base_addr =
579 octeon_bootinfo->compact_flash_attribute_base_addr;
580 sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
581 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
582 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
585 octeon_check_cpu_bist();
587 octeon_uart = octeon_get_boot_uart();
590 * Disable All CIU Interrupts. The ones we need will be
591 * enabled later. Read the SUM register so we know the write
592 * completed.
594 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
595 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
596 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
597 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
598 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
600 #ifdef CONFIG_SMP
601 octeon_write_lcd("LinuxSMP");
602 #else
603 octeon_write_lcd("Linux");
604 #endif
606 #ifdef CONFIG_CAVIUM_GDB
608 * When debugging the linux kernel, force the cores to enter
609 * the debug exception handler to break in.
611 if (octeon_get_boot_debug_flag()) {
612 cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
613 cvmx_read_csr(CVMX_CIU_DINT);
615 #endif
618 * BIST should always be enabled when doing a soft reset. L2
619 * Cache locking for instance is not cleared unless BIST is
620 * enabled. Unfortunately due to a chip errata G-200 for
621 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
623 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
624 OCTEON_IS_MODEL(OCTEON_CN31XX))
625 cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
626 else
627 cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
629 /* Default to 64MB in the simulator to speed things up */
630 if (octeon_is_simulation())
631 MAX_MEMORY = 64ull << 20;
633 arcs_cmdline[0] = 0;
634 argc = octeon_boot_desc_ptr->argc;
635 for (i = 0; i < argc; i++) {
636 const char *arg =
637 cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
638 if ((strncmp(arg, "MEM=", 4) == 0) ||
639 (strncmp(arg, "mem=", 4) == 0)) {
640 sscanf(arg + 4, "%llu", &MAX_MEMORY);
641 MAX_MEMORY <<= 20;
642 if (MAX_MEMORY == 0)
643 MAX_MEMORY = 32ull << 30;
644 } else if (strcmp(arg, "ecc_verbose") == 0) {
645 #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
646 __cvmx_interrupt_ecc_report_single_bit_errors = 1;
647 pr_notice("Reporting of single bit ECC errors is "
648 "turned on\n");
649 #endif
650 } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
651 sizeof(arcs_cmdline) - 1) {
652 strcat(arcs_cmdline, " ");
653 strcat(arcs_cmdline, arg);
657 if (strstr(arcs_cmdline, "console=") == NULL) {
658 #ifdef CONFIG_GDB_CONSOLE
659 strcat(arcs_cmdline, " console=gdb");
660 #else
661 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
662 strcat(arcs_cmdline, " console=ttyS0,115200");
663 #else
664 if (octeon_uart == 1)
665 strcat(arcs_cmdline, " console=ttyS1,115200");
666 else
667 strcat(arcs_cmdline, " console=ttyS0,115200");
668 #endif
669 #endif
672 if (octeon_is_simulation()) {
674 * The simulator uses a mtdram device pre filled with
675 * the filesystem. Also specify the calibration delay
676 * to avoid calculating it every time.
678 strcat(arcs_cmdline, " rw root=1f00"
679 " lpj=60176 slram=root,0x40000000,+1073741824");
682 mips_hpt_frequency = octeon_get_clock_rate();
684 octeon_init_cvmcount();
686 _machine_restart = octeon_restart;
687 _machine_halt = octeon_halt;
689 memset(&octeon_port, 0, sizeof(octeon_port));
691 * For early_serial_setup we don't set the port type or
692 * UPF_FIXED_TYPE.
694 octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
695 octeon_port.iotype = UPIO_MEM;
696 /* I/O addresses are every 8 bytes */
697 octeon_port.regshift = 3;
698 /* Clock rate of the chip */
699 octeon_port.uartclk = mips_hpt_frequency;
700 octeon_port.fifosize = 64;
701 octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
702 octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
703 octeon_port.serial_in = octeon_serial_in;
704 octeon_port.serial_out = octeon_serial_out;
705 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
706 octeon_port.line = 0;
707 #else
708 octeon_port.line = octeon_uart;
709 #endif
710 octeon_port.irq = 42 + octeon_uart;
711 early_serial_setup(&octeon_port);
713 octeon_user_io_init();
714 register_smp_ops(&octeon_smp_ops);
717 void __init plat_mem_setup(void)
719 uint64_t mem_alloc_size;
720 uint64_t total;
721 int64_t memory;
723 total = 0;
725 /* First add the init memory we will be returning. */
726 memory = __pa_symbol(&__init_begin) & PAGE_MASK;
727 mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
728 if (mem_alloc_size > 0) {
729 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
730 total += mem_alloc_size;
734 * The Mips memory init uses the first memory location for
735 * some memory vectors. When SPARSEMEM is in use, it doesn't
736 * verify that the size is big enough for the final
737 * vectors. Making the smallest chuck 4MB seems to be enough
738 * to consistantly work.
740 mem_alloc_size = 4 << 20;
741 if (mem_alloc_size > MAX_MEMORY)
742 mem_alloc_size = MAX_MEMORY;
745 * When allocating memory, we want incrementing addresses from
746 * bootmem_alloc so the code in add_memory_region can merge
747 * regions next to each other.
749 cvmx_bootmem_lock();
750 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
751 && (total < MAX_MEMORY)) {
752 #if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
753 memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
754 __pa_symbol(&__init_end), -1,
755 0x100000,
756 CVMX_BOOTMEM_FLAG_NO_LOCKING);
757 #elif defined(CONFIG_HIGHMEM)
758 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
759 0x100000,
760 CVMX_BOOTMEM_FLAG_NO_LOCKING);
761 #else
762 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
763 0x100000,
764 CVMX_BOOTMEM_FLAG_NO_LOCKING);
765 #endif
766 if (memory >= 0) {
768 * This function automatically merges address
769 * regions next to each other if they are
770 * received in incrementing order.
772 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
773 total += mem_alloc_size;
774 } else {
775 break;
778 cvmx_bootmem_unlock();
780 #ifdef CONFIG_CAVIUM_RESERVE32
782 * Now that we've allocated the kernel memory it is safe to
783 * free the reserved region. We free it here so that builtin
784 * drivers can use the memory.
786 if (octeon_reserve32_memory)
787 cvmx_bootmem_free_named("CAVIUM_RESERVE32");
788 #endif /* CONFIG_CAVIUM_RESERVE32 */
790 if (total == 0)
791 panic("Unable to allocate memory from "
792 "cvmx_bootmem_phy_alloc\n");
796 int prom_putchar(char c)
798 uint64_t lsrval;
800 /* Spin until there is room */
801 do {
802 lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
803 } while ((lsrval & 0x20) == 0);
805 /* Write the byte */
806 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
807 return 1;
810 void prom_free_prom_memory(void)
812 #ifdef CONFIG_CAVIUM_DECODE_RSL
813 cvmx_interrupt_rsl_enable();
815 /* Add an interrupt handler for general failures. */
816 if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
817 "RML/RSL", octeon_rlm_interrupt)) {
818 panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
820 #endif
822 /* This call is here so that it is performed after any TLB
823 initializations. It needs to be after these in case the
824 CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
825 octeon_hal_setup_reserved32();
828 static struct octeon_cf_data octeon_cf_data;
830 static int __init octeon_cf_device_init(void)
832 union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
833 unsigned long base_ptr, region_base, region_size;
834 struct platform_device *pd;
835 struct resource cf_resources[3];
836 unsigned int num_resources;
837 int i;
838 int ret = 0;
840 /* Setup octeon-cf platform device if present. */
841 base_ptr = 0;
842 if (octeon_bootinfo->major_version == 1
843 && octeon_bootinfo->minor_version >= 1) {
844 if (octeon_bootinfo->compact_flash_common_base_addr)
845 base_ptr =
846 octeon_bootinfo->compact_flash_common_base_addr;
847 } else {
848 base_ptr = 0x1d000800;
851 if (!base_ptr)
852 return ret;
854 /* Find CS0 region. */
855 for (i = 0; i < 8; i++) {
856 mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
857 region_base = mio_boot_reg_cfg.s.base << 16;
858 region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
859 if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
860 && base_ptr < region_base + region_size)
861 break;
863 if (i >= 7) {
864 /* i and i + 1 are CS0 and CS1, both must be less than 8. */
865 goto out;
867 octeon_cf_data.base_region = i;
868 octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
869 octeon_cf_data.base_region_bias = base_ptr - region_base;
870 memset(cf_resources, 0, sizeof(cf_resources));
871 num_resources = 0;
872 cf_resources[num_resources].flags = IORESOURCE_MEM;
873 cf_resources[num_resources].start = region_base;
874 cf_resources[num_resources].end = region_base + region_size - 1;
875 num_resources++;
878 if (!(base_ptr & 0xfffful)) {
880 * Boot loader signals availability of DMA (true_ide
881 * mode) by setting low order bits of base_ptr to
882 * zero.
885 /* Asume that CS1 immediately follows. */
886 mio_boot_reg_cfg.u64 =
887 cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
888 region_base = mio_boot_reg_cfg.s.base << 16;
889 region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
890 if (!mio_boot_reg_cfg.s.en)
891 goto out;
893 cf_resources[num_resources].flags = IORESOURCE_MEM;
894 cf_resources[num_resources].start = region_base;
895 cf_resources[num_resources].end = region_base + region_size - 1;
896 num_resources++;
898 octeon_cf_data.dma_engine = 0;
899 cf_resources[num_resources].flags = IORESOURCE_IRQ;
900 cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
901 cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
902 num_resources++;
903 } else {
904 octeon_cf_data.dma_engine = -1;
907 pd = platform_device_alloc("pata_octeon_cf", -1);
908 if (!pd) {
909 ret = -ENOMEM;
910 goto out;
912 pd->dev.platform_data = &octeon_cf_data;
914 ret = platform_device_add_resources(pd, cf_resources, num_resources);
915 if (ret)
916 goto fail;
918 ret = platform_device_add(pd);
919 if (ret)
920 goto fail;
922 return ret;
923 fail:
924 platform_device_put(pd);
925 out:
926 return ret;
928 device_initcall(octeon_cf_device_init);