1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/kernel/head.S
5 * Copyright (C) 1994-2002 Russell King
6 * Copyright (c) 2003 ARM Limited
9 * Kernel startup code for all 32-bit CPUs
11 #include <linux/linkage.h>
12 #include <linux/init.h>
13 #include <linux/pgtable.h>
15 #include <asm/assembler.h>
17 #include <asm/domain.h>
18 #include <asm/ptrace.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/memory.h>
21 #include <asm/thread_info.h>
23 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
24 #include CONFIG_DEBUG_LL_INCLUDE
28 * swapper_pg_dir is the virtual address of the initial page table.
29 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
30 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
31 * the least significant 16 bits to be 0x8000, but we could probably
32 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
34 #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
35 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
36 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
39 #ifdef CONFIG_ARM_LPAE
40 /* LPAE requires an additional page for the PGD */
41 #define PG_DIR_SIZE 0x5000
44 #define PG_DIR_SIZE 0x4000
49 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
51 .macro pgtbl, rd, phys
52 add \rd, \phys, #TEXT_OFFSET
53 sub \rd, \rd, #PG_DIR_SIZE
57 * Kernel startup entry point.
58 * ---------------------------
60 * This is normally called from the decompressor code. The requirements
61 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
62 * r1 = machine nr, r2 = atags or dtb pointer.
64 * This code is mostly position independent, so if you link the kernel at
65 * 0xc0008000, you call this at __pa(0xc0008000).
67 * See linux/arch/arm/tools/mach-types for the complete list of machine
70 * We're trying to keep crap to a minimum; DO NOT add any machine specific
71 * crap here - that's what the boot loader (or in extreme, well justified
72 * circumstances, zImage) is for.
78 ARM_BE8(setend be ) @ ensure we are in BE8 mode
80 THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
81 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
82 THUMB( .thumb ) @ switch to Thumb now.
85 #ifdef CONFIG_ARM_VIRT_EXT
88 @ ensure svc mode and all interrupts masked
89 safe_svcmode_maskall r9
91 mrc p15, 0, r9, c0, c0 @ get processor id
92 bl __lookup_processor_type @ r5=procinfo r9=cpuid
93 movs r10, r5 @ invalid processor (r5=0)?
94 THUMB( it eq ) @ force fixup-able long branch encoding
95 beq __error_p @ yes, error 'p'
97 #ifdef CONFIG_ARM_LPAE
98 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
99 and r3, r3, #0xf @ extract VMSA support
100 cmp r3, #5 @ long-descriptor translation table format?
101 THUMB( it lo ) @ force fixup-able long branch encoding
102 blo __error_lpae @ only classic page table format
105 #ifndef CONFIG_XIP_KERNEL
106 adr_l r8, _text @ __pa(_text)
107 sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET
109 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
113 * r1 = machine no, r2 = atags or dtb,
114 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
117 #ifdef CONFIG_SMP_ON_UP
120 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
123 bl __create_page_tables
126 * The following calls CPU specific code in a position independent
127 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
128 * xxx_proc_info structure selected by __lookup_processor_type
131 * The processor init function will be called with:
133 * r2 - boot data (atags/dt) pointer
134 * r4 - translation table base (low word)
135 * r5 - translation table base (high word, if LPAE)
136 * r8 - translation table base 1 (pfn if LPAE)
138 * r13 - virtual address for __enable_mmu -> __turn_mmu_on
140 * On return, the CPU will be ready for the MMU to be turned on,
141 * r0 will hold the CPU control register value, r1, r2, r4, and
142 * r9 will be preserved. r5 will also be preserved if LPAE.
144 ldr r13, =__mmap_switched @ address to jump to after
145 @ mmu has been enabled
146 badr lr, 1f @ return (PIC) address
147 #ifdef CONFIG_ARM_LPAE
148 mov r5, #0 @ high TTBR0
149 mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
151 mov r8, r4 @ set TTBR1 to swapper_pg_dir
153 ldr r12, [r10, #PROCINFO_INITFUNC]
161 * Setup the initial page tables. We only setup the barest
162 * amount which are required to get the kernel running, which
163 * generally means mapping in the kernel code.
165 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
168 * r0, r3, r5-r7 corrupted
169 * r4 = physical page table address
171 __create_page_tables:
172 pgtbl r4, r8 @ page table address
175 * Clear the swapper page table
179 add r6, r0, #PG_DIR_SIZE
187 #ifdef CONFIG_ARM_LPAE
189 * Build the PGD table (first level) to point to the PMD table. A PGD
190 * entry is 64-bit wide.
193 add r3, r4, #0x1000 @ first PMD table address
194 orr r3, r3, #3 @ PGD block type
195 mov r6, #4 @ PTRS_PER_PGD
196 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
198 #ifdef CONFIG_CPU_ENDIAN_BE8
199 str r7, [r0], #4 @ set top PGD entry bits
200 str r3, [r0], #4 @ set bottom PGD entry bits
202 str r3, [r0], #4 @ set bottom PGD entry bits
203 str r7, [r0], #4 @ set top PGD entry bits
205 add r3, r3, #0x1000 @ next PMD table
209 add r4, r4, #0x1000 @ point to the PMD tables
210 #ifdef CONFIG_CPU_ENDIAN_BE8
211 add r4, r4, #4 @ we only write the bottom word
215 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
218 * Create identity mapping to cater for __enable_mmu.
219 * This identity mapping will be removed by paging_init().
221 adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on)
222 adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end)
223 mov r5, r5, lsr #SECTION_SHIFT
224 mov r6, r6, lsr #SECTION_SHIFT
226 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
227 str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
229 addlo r5, r5, #1 @ next section
233 * Map our RAM from the start to the end of the kernel .bss section.
235 add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
238 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
239 1: str r3, [r0], #1 << PMD_ORDER
240 add r3, r3, #1 << SECTION_SHIFT
244 #ifdef CONFIG_XIP_KERNEL
246 * Map the kernel image separately as it is not located in RAM.
248 #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
250 mov r3, r3, lsr #SECTION_SHIFT
251 orr r3, r7, r3, lsl #SECTION_SHIFT
252 add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
253 str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
254 ldr r6, =(_edata_loc - 1)
255 add r0, r0, #1 << PMD_ORDER
256 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
258 add r3, r3, #1 << SECTION_SHIFT
259 strls r3, [r0], #1 << PMD_ORDER
264 * Then map boot params address in r2 if specified.
265 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
267 mov r0, r2, lsr #SECTION_SHIFT
269 ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
271 orrne r6, r7, r0, lsl #SECTION_SHIFT
272 strne r6, [r3], #1 << PMD_ORDER
273 addne r6, r6, #1 << SECTION_SHIFT
276 #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
277 sub r4, r4, #4 @ Fixup page table pointer
278 @ for 64-bit descriptors
281 #ifdef CONFIG_DEBUG_LL
282 #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
284 * Map in IO space for serial debugging.
285 * This allows debug messages to be output
286 * via a serial console before paging_init.
290 mov r3, r3, lsr #SECTION_SHIFT
291 mov r3, r3, lsl #PMD_ORDER
294 mov r3, r7, lsr #SECTION_SHIFT
295 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
296 orr r3, r7, r3, lsl #SECTION_SHIFT
297 #ifdef CONFIG_ARM_LPAE
298 mov r7, #1 << (54 - 32) @ XN
299 #ifdef CONFIG_CPU_ENDIAN_BE8
307 orr r3, r3, #PMD_SECT_XN
311 #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
312 /* we don't need any serial debugging mappings */
313 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
316 #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
318 * If we're using the NetWinder or CATS, we also need to map
319 * in the 16550-type serial port for the debug messages
321 add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
322 orr r3, r7, #0x7c000000
325 #ifdef CONFIG_ARCH_RPC
327 * Map in screen at 0x02000000 & SCREEN2_BASE
328 * Similar reasons here - for debug. This is
329 * only for Acorn RiscPC architectures.
331 add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
332 orr r3, r7, #0x02000000
334 add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
338 #ifdef CONFIG_ARM_LPAE
339 sub r4, r4, #0x1000 @ point to the PGD table
342 ENDPROC(__create_page_tables)
345 #if defined(CONFIG_SMP)
348 ENTRY(secondary_startup_arm)
349 THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
350 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
351 THUMB( .thumb ) @ switch to Thumb now.
353 ENTRY(secondary_startup)
355 * Common entry point for secondary CPUs.
357 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
358 * the processor type - there is no need to check the machine type
359 * as it has already been validated by the primary processor.
362 ARM_BE8(setend be) @ ensure we are in BE8 mode
364 #ifdef CONFIG_ARM_VIRT_EXT
365 bl __hyp_stub_install_secondary
367 safe_svcmode_maskall r9
369 mrc p15, 0, r9, c0, c0 @ get processor id
370 bl __lookup_processor_type
371 movs r10, r5 @ invalid processor?
372 moveq r0, #'p' @ yes, error 'p'
373 THUMB( it eq ) @ force fixup-able long branch encoding
377 * Use the page tables supplied from __cpu_up.
379 adr_l r3, secondary_data
380 mov_l r12, __secondary_switched
381 ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir
382 ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
383 ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
384 ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
385 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
386 badr lr, __enable_mmu @ return address
387 mov r13, r12 @ __secondary_switched address
388 ldr r12, [r10, #PROCINFO_INITFUNC]
389 add r12, r12, r10 @ initialise processor
390 @ (return control reg)
392 ENDPROC(secondary_startup)
393 ENDPROC(secondary_startup_arm)
395 ENTRY(__secondary_switched)
396 ldr_l r7, secondary_data + 12 @ get secondary_data.stack
399 b secondary_start_kernel
400 ENDPROC(__secondary_switched)
402 #endif /* defined(CONFIG_SMP) */
407 * Setup common bits before finally enabling the MMU. Essentially
408 * this is just loading the page table pointer and domain access
409 * registers. All these registers need to be preserved by the
410 * processor setup function (or set in the case of r0)
412 * r0 = cp#15 control register
414 * r2 = atags or dtb pointer
415 * r4 = TTBR pointer (low word)
416 * r5 = TTBR pointer (high word if LPAE)
418 * r13 = *virtual* address to jump to upon completion
421 #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
426 #ifdef CONFIG_CPU_DCACHE_DISABLE
429 #ifdef CONFIG_CPU_BPREDICT_DISABLE
432 #ifdef CONFIG_CPU_ICACHE_DISABLE
435 #ifdef CONFIG_ARM_LPAE
436 mcrr p15, 0, r4, r5, c2 @ load TTBR0
439 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
440 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
443 ENDPROC(__enable_mmu)
446 * Enable the MMU. This completely changes the structure of the visible
447 * memory space. You will not be able to trace execution through this.
448 * If you have an enquiry about this, *please* check the linux-arm-kernel
449 * mailing list archives BEFORE sending another post to the list.
451 * r0 = cp#15 control register
453 * r2 = atags or dtb pointer
455 * r13 = *virtual* address to jump to upon completion
457 * other registers depend on the function called upon completion
460 .pushsection .idmap.text, "ax"
464 mcr p15, 0, r0, c1, c0, 0 @ write control reg
465 mrc p15, 0, r3, c0, c0, 0 @ read id reg
471 ENDPROC(__turn_mmu_on)
475 #ifdef CONFIG_SMP_ON_UP
478 and r3, r9, #0x000f0000 @ architecture version
479 teq r3, #0x000f0000 @ CPU ID supported?
480 bne __fixup_smp_on_up @ no, assume UP
482 bic r3, r9, #0x00ff0000
483 bic r3, r3, #0x0000000f @ mask 0xff00fff0
485 orr r4, r4, #0x0000b000
486 orr r4, r4, #0x00000020 @ val 0x4100b020
487 teq r3, r4 @ ARM 11MPCore?
488 reteq lr @ yes, assume SMP
490 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
491 and r0, r0, #0xc0000000 @ multiprocessing extensions and
492 teq r0, #0x80000000 @ not part of a uniprocessor system?
493 bne __fixup_smp_on_up @ no, assume UP
495 @ Core indicates it is SMP. Check for Aegis SOC where a single
496 @ Cortex-A9 CPU is present but SMP operations fault.
498 orr r4, r4, #0x0000c000
499 orr r4, r4, #0x00000090
500 teq r3, r4 @ Check for ARM Cortex-A9
501 retne lr @ Not ARM Cortex-A9,
503 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
504 @ below address check will need to be #ifdef'd or equivalent
505 @ for the Aegis platform.
506 mrc p15, 4, r0, c15, c0 @ get SCU base address
507 teq r0, #0x0 @ '0' on actual UP A9 hardware
508 beq __fixup_smp_on_up @ So its an A9 UP
509 ldr r0, [r0, #4] @ read SCU Config
510 ARM_BE8(rev r0, r0) @ byteswap if big endian
511 and r0, r0, #0x3 @ number of CPUs
516 adr_l r4, __smpalt_begin
517 adr_l r5, __smpalt_end
518 b __do_fixup_smp_on_up
531 __do_fixup_smp_on_up:
535 ARM( str r6, [r0, r4] )
536 THUMB( add r0, r0, r4 )
539 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
541 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
542 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0.
543 THUMB( strh r6, [r0] )
544 b __do_fixup_smp_on_up
545 ENDPROC(__do_fixup_smp_on_up)
548 stmfd sp!, {r4 - r6, lr}
551 bl __do_fixup_smp_on_up
552 ldmfd sp!, {r4 - r6, pc}
555 #include "head-common.S"