1 /* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
15 #include <asm/asm-offsets.h>
19 #include <asm/assembly.h>
21 #include <linux/linkage.h>
22 #include <linux/init.h>
23 #include <linux/pgtable.h>
38 .import init_task,data
39 .import init_stack,data
40 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
42 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
43 .import $global$ /* forward declaration */
44 #endif /*!CONFIG_64BIT*/
45 ENTRY(parisc_kernel_start)
49 /* Make sure sr4-sr7 are set to zero for the kernel address space */
55 /* Clear BSS (shouldn't the boot loader do this?) */
57 .import __bss_start,data
58 .import __bss_stop,data
60 load32 PA(__bss_start),%r3
61 load32 PA(__bss_stop),%r4
63 cmpb,<<,n %r3,%r4,$bss_loop
66 /* Save away the arguments the boot loader passed in (32 bit args) */
67 load32 PA(boot_args),%r1
73 #if defined(CONFIG_PA20)
74 /* check for 64-bit capable CPU as required by current kernel */
80 comib,<>,n 0,%r10,$cpu_ok
83 ldi msg1_end-msg1,%arg1
87 load32 PA(init_stack),%sp
88 #define MEM_CONS 0x3A0
89 ldw MEM_CONS+32(%r0),%arg0 // HPA
90 ldi ENTRY_IO_COUT,%arg1
91 ldw MEM_CONS+36(%r0),%arg2 // SPA
92 ldw MEM_CONS+8(%r0),%arg3 // layers
93 load32 PA(__bss_start),%r1
94 stw %r1,-52(%sp) // arg4
95 stw %r0,-56(%sp) // arg5
96 stw %r10,-60(%sp) // arg6 = ptr to text
97 stw %r11,-64(%sp) // arg7 = len
98 stw %r0,-68(%sp) // arg8
99 load32 PA(.iodc_panic_ret), %rp
100 ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
103 b . /* wait endless with ... */
104 or %r10,%r10,%r10 /* qemu idle sleep */
105 msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
113 /* Initialize startup VM. Just map first 16/32 MB of memory */
114 load32 PA(swapper_pg_dir),%r4
115 mtctl %r4,%cr24 /* Initialize kernel root pointer */
116 mtctl %r4,%cr25 /* Initialize user root pointer */
118 #if CONFIG_PGTABLE_LEVELS == 3
121 shrd %r5,PxD_VALUE_SHIFT,%r3
122 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
123 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
124 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
126 /* 2-level page table, so pmd == pgd */
127 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
130 /* Fill in pmd with enough pte directories */
132 SHRREG %r1,PxD_VALUE_SHIFT,%r3
133 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
135 ldi ASM_PT_INITIAL,%r1
139 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
141 #if CONFIG_PGTABLE_LEVELS == 3
142 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
144 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
148 /* Now initialize the PTEs themselves. We use RWX for
149 * everything ... it will get remapped correctly later */
150 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
151 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
155 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
156 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
157 addib,> -1,%r11,$pgt_fill_loop
160 /* Load the return address...er...crash 'n burn */
163 /* And the RFI Target address too */
164 load32 start_parisc,%r11
166 /* And the initial task pointer */
170 /* And the stack pointer too */
171 load32 init_stack,%sp
173 #if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
175 /* initialize mcount FPTR */
176 /* Get the global data pointer */
178 load32 PA(_mcount), %r10
182 #define MEM_PDC_LO 0x388
183 #define MEM_PDC_HI 0x35C
185 /* Get PDCE_PROC for monarch CPU. */
186 ldw MEM_PDC_LO(%r0),%r3
187 ldw MEM_PDC_HI(%r0),%r10
188 depd %r10, 31, 32, %r3 /* move to upper word */
193 /* Set the smp rendezvous address into page zero.
194 ** It would be safer to do this in init_smp_config() but
195 ** it's just way easier to deal with here because
196 ** of 64-bit function ptrs and the address is local to this file.
198 load32 PA(smp_slave_stext),%r10
199 stw %r10,0x10(%r0) /* MEM_RENDEZ */
200 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
205 #ifdef CONFIG_HOTPLUG_CPU
206 /* common_stext is far away in another section... jump there */
207 load32 PA(common_stext), %rp
210 /* common_stext and smp_slave_stext needs to be in text section */
215 ** Code Common to both Monarch and Slave processors.
219 ** %r11 must contain RFI target address.
220 ** %r25/%r26 args to pass to target function
221 ** %r2 in case rfi target decides it didn't like something
224 ** %r3 PDCE_PROC address
225 ** %r11 RFI target address
227 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
233 /* Clear PDC entry point - we won't use it */
234 stw %r0,0x10(%r0) /* MEM_RENDEZ */
235 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
236 #endif /*CONFIG_SMP*/
239 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
242 /* Save the rfi target address */
243 STREG %r11, TASK_PT_GR11(%r6)
244 /* Switch to wide mode Superdome doesn't support narrow PDC
247 1: mfia %rp /* clear upper part of pcoq */
253 /* Set Wide mode as the "Default" (eg for traps)
254 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
255 ** Someday, palo might not do this for the Monarch either.
259 ldo PDC_PSW(%r0),%arg0 /* 21 */
260 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
261 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
262 load32 PA(stext_pdc_ret), %rp
267 LDREG TASK_PT_GR11(%r6), %r11
269 mtctl %r6,%cr30 /* restore task thread info */
273 /* clear all BTLBs */
274 ldi PDC_BLOCK_TLB,%arg0
275 load32 PA(stext_pdc_btlb_ret), %rp
276 ldw MEM_PDC_LO(%r0),%r3
278 ldi PDC_BTLB_PURGE_ALL,%arg1
282 /* PARANOID: clear user scratch/user space SR's */
288 /* Initialize Protection Registers */
294 /* Initialize the global data pointer */
297 /* Set up our interrupt table. HPMCs might not work after this!
299 * We need to install the correct iva for PA1.1 or PA2.0. The
300 * following short sequence of instructions can determine this
301 * (without being illegal on a PA1.1 machine).
309 comib,<>,n 0,%r10,$is_pa20
310 ldil L%PA(fault_vector_11),%r10
312 ldo R%PA(fault_vector_11)(%r10),%r10
315 .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
316 #endif /*!CONFIG_64BIT*/
317 load32 PA(fault_vector_20),%r10
322 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
329 copy %r3, %arg0 /* PDCE_PROC for smp_callin() */
331 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
332 /* Don't need NOPs, have 8 compliant insn before rfi */
334 mtctl %r0,%cr17 /* Clear IIASQ tail */
335 mtctl %r0,%cr17 /* Clear IIASQ head */
337 /* Load RFI target into PC queue */
338 mtctl %r11,%cr18 /* IIAOQ head */
340 mtctl %r11,%cr18 /* IIAOQ tail */
342 load32 KERNEL_PSW,%r10
347 /* Jump through hyperspace to Virt Mode */
355 .import smp_init_current_idle_task,data
356 .import smp_callin,code
362 break 1,1 /* Break if returned from start_secondary */
366 #endif /*!CONFIG_64BIT*/
368 /***************************************************************************
369 * smp_slave_stext is executed by all non-monarch Processors when the Monarch
370 * pokes the slave CPUs in smp.c:smp_boot_cpus().
372 * Once here, registers values are initialized in order to branch to virtual
373 * mode. Once all available/eligible CPUs are in virtual mode, all are
374 * released and start out by executing their own idle task.
375 *****************************************************************************/
381 ** Initialize Space registers
390 * Enable Wide mode early, in case the task_struct for the idle
391 * task in smp_init_current_idle_task was allocated above 4GB.
393 1: mfia %rp /* clear upper part of pcoq */
401 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
402 load32 PA(smp_init_current_idle_task),%r6
406 LDREG TASK_STACK(%r6),%sp
408 ldo FRAME_SIZE(%sp),%sp
410 /* point CPU to kernel page tables */
411 load32 PA(swapper_pg_dir),%r4
412 mtctl %r4,%cr24 /* Initialize kernel root pointer */
413 mtctl %r4,%cr25 /* Initialize user root pointer */
416 /* Setup PDCE_PROC entry */
419 /* Load RFI *return* address in case smp_callin bails */
420 load32 smp_callin_rtn,%r2
423 /* Load RFI target address. */
424 load32 smp_callin,%r11
426 /* ok...common code can handle the rest */
431 #endif /* CONFIG_SMP */
434 .section .data..ro_after_init
437 .export $global$,data
439 .type $global$,@object
443 #endif /*!CONFIG_64BIT*/