1 /* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
15 #include <asm/asm-offsets.h>
19 #include <asm/assembly.h>
20 #include <asm/pgtable.h>
22 #include <linux/linkage.h>
23 #include <linux/init.h>
38 .import init_thread_union,data
39 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
42 .import $global$ /* forward declaration */
43 #endif /*!CONFIG_64BIT*/
44 ENTRY(parisc_kernel_start)
48 /* Make sure sr4-sr7 are set to zero for the kernel address space */
54 /* Clear BSS (shouldn't the boot loader do this?) */
56 .import __bss_start,data
57 .import __bss_stop,data
59 load32 PA(__bss_start),%r3
60 load32 PA(__bss_stop),%r4
62 cmpb,<<,n %r3,%r4,$bss_loop
65 /* Save away the arguments the boot loader passed in (32 bit args) */
66 load32 PA(boot_args),%r1
72 /* Initialize startup VM. Just map first 16/32 MB of memory */
73 load32 PA(swapper_pg_dir),%r4
74 mtctl %r4,%cr24 /* Initialize kernel root pointer */
75 mtctl %r4,%cr25 /* Initialize user root pointer */
77 #if CONFIG_PGTABLE_LEVELS == 3
80 shrd %r5,PxD_VALUE_SHIFT,%r3
81 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
82 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
83 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
85 /* 2-level page table, so pmd == pgd */
86 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
89 /* Fill in pmd with enough pte directories */
91 SHRREG %r1,PxD_VALUE_SHIFT,%r3
92 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
94 ldi ASM_PT_INITIAL,%r1
98 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
100 #if CONFIG_PGTABLE_LEVELS == 3
101 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
103 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
107 /* Now initialize the PTEs themselves. We use RWX for
108 * everything ... it will get remapped correctly later */
109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
114 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
115 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
116 addib,> -1,%r11,$pgt_fill_loop
119 /* Load the return address...er...crash 'n burn */
122 /* And the RFI Target address too */
123 load32 start_parisc,%r11
125 /* And the initial task pointer */
126 load32 init_thread_union,%r6
129 /* And the stack pointer too */
130 ldo THREAD_SZ_ALGN(%r6),%sp
133 /* Set the smp rendezvous address into page zero.
134 ** It would be safer to do this in init_smp_config() but
135 ** it's just way easier to deal with here because
136 ** of 64-bit function ptrs and the address is local to this file.
138 load32 PA(smp_slave_stext),%r10
139 stw %r10,0x10(%r0) /* MEM_RENDEZ */
140 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
146 ** Code Common to both Monarch and Slave processors.
150 ** %r11 must contain RFI target address.
151 ** %r25/%r26 args to pass to target function
152 ** %r2 in case rfi target decides it didn't like something
155 ** %r3 PDCE_PROC address
156 ** %r11 RFI target address
158 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
164 /* Clear PDC entry point - we won't use it */
165 stw %r0,0x10(%r0) /* MEM_RENDEZ */
166 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
167 #endif /*CONFIG_SMP*/
172 /* Save the rfi target address */
173 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
175 std %r11, TASK_PT_GR11(%r10)
176 /* Switch to wide mode Superdome doesn't support narrow PDC
179 1: mfia %rp /* clear upper part of pcoq */
185 /* Set Wide mode as the "Default" (eg for traps)
186 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
187 ** Someday, palo might not do this for the Monarch either.
190 #define MEM_PDC_LO 0x388
191 #define MEM_PDC_HI 0x35C
192 ldw MEM_PDC_LO(%r0),%r3
193 ldw MEM_PDC_HI(%r0),%r6
194 depd %r6, 31, 32, %r3 /* move to upper word */
196 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
198 ldo PDC_PSW(%r0),%arg0 /* 21 */
199 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
200 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
201 load32 PA(stext_pdc_ret), %rp
206 mtctl %r6,%cr30 /* restore task thread info */
208 /* restore rfi target address*/
209 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
211 ldd TASK_PT_GR11(%r10), %r11
215 /* PARANOID: clear user scratch/user space SR's */
221 /* Initialize Protection Registers */
227 /* Initialize the global data pointer */
230 /* Set up our interrupt table. HPMCs might not work after this!
232 * We need to install the correct iva for PA1.1 or PA2.0. The
233 * following short sequence of instructions can determine this
234 * (without being illegal on a PA1.1 machine).
242 comib,<>,n 0,%r10,$is_pa20
243 ldil L%PA(fault_vector_11),%r10
245 ldo R%PA(fault_vector_11)(%r10),%r10
248 .level LEVEL /* restore 1.1 || 2.0w */
249 #endif /*!CONFIG_64BIT*/
250 load32 PA(fault_vector_20),%r10
255 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
262 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
263 /* Don't need NOPs, have 8 compliant insn before rfi */
265 mtctl %r0,%cr17 /* Clear IIASQ tail */
266 mtctl %r0,%cr17 /* Clear IIASQ head */
268 /* Load RFI target into PC queue */
269 mtctl %r11,%cr18 /* IIAOQ head */
271 mtctl %r11,%cr18 /* IIAOQ tail */
273 load32 KERNEL_PSW,%r10
276 /* Jump through hyperspace to Virt Mode */
284 .import smp_init_current_idle_task,data
285 .import smp_callin,code
291 break 1,1 /* Break if returned from start_secondary */
295 #endif /*!CONFIG_64BIT*/
297 /***************************************************************************
298 * smp_slave_stext is executed by all non-monarch Processors when the Monarch
299 * pokes the slave CPUs in smp.c:smp_boot_cpus().
301 * Once here, registers values are initialized in order to branch to virtual
302 * mode. Once all available/eligible CPUs are in virtual mode, all are
303 * released and start out by executing their own idle task.
304 *****************************************************************************/
310 ** Initialize Space registers
317 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
318 load32 PA(smp_init_current_idle_task),%sp
319 LDREG 0(%sp),%sp /* load task address */
321 LDREG TASK_THREAD_INFO(%sp),%sp
322 mtctl %sp,%cr30 /* store in cr30 */
323 ldo THREAD_SZ_ALGN(%sp),%sp
325 /* point CPU to kernel page tables */
326 load32 PA(swapper_pg_dir),%r4
327 mtctl %r4,%cr24 /* Initialize kernel root pointer */
328 mtctl %r4,%cr25 /* Initialize user root pointer */
331 /* Setup PDCE_PROC entry */
334 /* Load RFI *return* address in case smp_callin bails */
335 load32 smp_callin_rtn,%r2
338 /* Load RFI target address. */
339 load32 smp_callin,%r11
341 /* ok...common code can handle the rest */
346 #endif /* CONFIG_SMP */
348 ENDPROC(parisc_kernel_start)
351 .section .data..read_mostly
354 .export $global$,data
356 .type $global$,@object
360 #endif /*!CONFIG_64BIT*/