sync hh.org
[hh.org.git] / arch / arm / boot / compressed / head.S.debug
blobde87f3e2d177638d7ac46de20afc132c358c0b19
1 /*
2  *  linux/arch/arm/boot/compressed/head.S
3  *
4  *  Copyright (C) 1996-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/linkage.h>
13 #define DEBUG
14 #define JTAG
17  * Debugging stuff
18  *
19  * Note that these macros must not contain any code which is not
20  * 100% relocatable.  Any attempt to do so will result in a crash.
21  * Please select one of the following when turning on debugging.
22  */
23 #ifdef DEBUG
24 #if defined(CONFIG_DEBUG_DC21285_PORT)
25                 .macro  loadsp, rb
26                 mov     \rb, #0x42000000
27                 .endm
28                 .macro  writeb, rb
29                 str     \rb, [r3, #0x160]
30                 .endm
31 #elif defined(CONFIG_FOOTBRIDGE)
32                 .macro  loadsp, rb
33                 mov     \rb, #0x7c000000
34                 .endm
35                 .macro  writeb, rb
36                 strb    \rb, [r3, #0x3f8]
37                 .endm
38 #elif defined(CONFIG_ARCH_RPC)
39                 .macro  loadsp, rb
40                 mov     \rb, #0x03000000
41                 orr     \rb, \rb, #0x00010000
42                 .endm
43                 .macro  writeb, rb
44                 strb    \rb, [r3, #0x3f8 << 2]
45                 .endm
46 #elif defined(CONFIG_ARCH_INTEGRATOR)
47                 .macro  loadsp, rb
48                 mov     \rb, #0x16000000
49                 .endm
50                 .macro  writeb, rb
51                 strb    \rb, [r3, #0]
52                 .endm
53 #elif defined(CONFIG_ARCH_PXA) /* Xscale-type */
54                 .macro  loadsp, rb
55                 mov     \rb, #0x40000000
56                 orr     \rb, \rb, #0x00100000
57                 .endm
58                 .macro  writeb, rb
59                 strb    \rb, [r3, #0]
60                 .endm
61 #elif defined(CONFIG_ARCH_SA1100)
62                 .macro  loadsp, rb
63                 mov     \rb, #0x80000000        @ physical base address
64 #  if defined(CONFIG_DEBUG_LL_SER3)
65                 add     \rb, \rb, #0x00050000   @ Ser3
66 #  else
67                 add     \rb, \rb, #0x00010000   @ Ser1
68 #  endif
69                 .endm
70                 .macro  writeb, rb
71                 str     \rb, [r3, #0x14]        @ UTDR
72                 .endm
73 #else
74 #error no serial architecture defined
75 #endif
76 #endif
78                 .macro  kputc,val
79                 mov     r0, \val
80                 bl      putc
81                 .endm
83                 .macro  kphex,val,len
84                 mov     r0, \val
85                 mov     r1, #\len
86                 bl      phex
87                 .endm
89                 .macro  debug_reloc_start
90 #ifdef DEBUG
91                 kputc   #'\n'
92                 kphex   r6, 8           /* processor id */
93                 kputc   #':'
94                 kphex   r7, 8           /* architecture id */
95                 kputc   #':'
96                 mrc     p15, 0, r0, c1, c0
97                 kphex   r0, 8           /* control reg */
98                 kputc   #'\n'
99                 kphex   r5, 8           /* decompressed kernel start */
100                 kputc   #'-'
101                 kphex   r8, 8           /* decompressed kernel end  */
102                 kputc   #'>'
103                 kphex   r4, 8           /* kernel execution address */
104                 kputc   #'\n'
105 #endif
106                 .endm
108                 .macro  debug_reloc_end
109 #ifdef DEBUG
110                 kphex   r5, 8           /* end of kernel */
111                 kputc   #'\n'
112                 mov     r0, r4
113                 bl      memdump         /* dump 256 bytes at start of kernel */
114 #endif
115                 .endm
117                 .section ".start", #alloc, #execinstr
119  * sort out different calling conventions
120  */
121                 .align
122 start:
123 #ifndef CONFIG_LAB
124                 .type   start,#function
125                 .rept   8
126                 mov     r0, r0
127                 .endr
129                 b       1f
130                 .word   0x016f2818              @ Magic numbers to help the loader
131                 .word   start                   @ absolute load/run zImage address
132                 .word   _edata                  @ zImage end address
133 1:              mov     r7, r1                  @ save architecture ID
134                 mov     r8, #0                  @ save r0
136 #ifndef __ARM_ARCH_2__
137                 /*
138                  * Booting from Angel - need to enter SVC mode and disable
139                  * FIQs/IRQs (numeric definitions from angel arm.h source).
140                  * We only do this if we were in user mode on entry.
141                  */
142                 mrs     r2, cpsr                @ get current mode
143                 tst     r2, #3                  @ not user?
144                 bne     not_angel
145                 mov     r0, #0x17               @ angel_SWIreason_EnterSVC
146                 swi     0x123456                @ angel_SWI_ARM
147 not_angel:
148                 mrs     r2, cpsr                @ turn off interrupts to
149                 orr     r2, r2, #0xc0           @ prevent angel from running
150                 msr     cpsr_c, r2
151 #else
152                 teqp    pc, #0x0c000003         @ turn off interrupts
153 #endif
155                 /*
156                  * Note that some cache flushing and other stuff may
157                  * be needed here - is there an Angel SWI call for this?
158                  */
159 #endif /* !CONFIG_LAB */
160                 /*
161                  * some architecture specific code can be inserted
162                  * by the linker here, but it should preserve r7 and r8.
163                  */
165                 .text
166 #ifdef DEBUG
167                 kputc #'H'
168 #endif
169                 adr     r0, LC0
170                 mov     r1, #8
171                 bl      phex
172                 adr     r0, LC0
173                 ldmia   r0, {r1, r2, r3, r4, r5, r6, ip, sp}
174                 subs    r0, r0, r1              @ calculate the delta offset
176                                                 @ if delta is zero, we're
177                 beq     not_relocated           @ running at the address we
178                                                 @ were linked at.
180                 /*
181                  * We're running at a different address.  We need to fix
182                  * up various pointers:
183                  *   r5 - zImage base address
184                  *   r6 - GOT start
185                  *   ip - GOT end
186                  */
187                 add     r5, r5, r0
188                 add     r6, r6, r0
189                 add     ip, ip, r0
191 #ifndef CONFIG_ZBOOT_ROM
192                 /*
193                  * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
194                  * we need to fix up pointers into the BSS region.
195                  *   r2 - BSS start
196                  *   r3 - BSS end
197                  *   sp - stack pointer
198                  */
199                 add     r2, r2, r0
200                 add     r3, r3, r0
201                 add     sp, sp, r0
202                 mov     r9,r0
203                 mov     r4,r1
204                 mov     r5,ip
205                 kputc #'G'
206                 mov     r0,r9
207                 mov     r1,r4
208                 mov     ip,r5
210                 /*
211                  * Relocate all entries in the GOT table.
212                  */
213 1:              mov     r9,r0
214                 kputc   #'R'
215                 mov     r0,r9
216                 ldr     r1, [r6, #0]            @ relocate entries in the GOT
217                 add     r1, r1, r0              @ table.  This fixes up the
218                 str     r1, [r6], #4            @ C references.
219                 cmp     r6, ip
220                 blo     1b
221                 kputc #'G'
222 #else
224                 /*
225                  * Relocate entries in the GOT table.  We only relocate
226                  * the entries that are outside the (relocated) BSS region.
227                  */
228 1:              ldr     r1, [r6, #0]            @ relocate entries in the GOT
229                 cmp     r1, r2                  @ entry < bss_start ||
230                 cmphs   r3, r1                  @ _end < entry
231                 addlo   r1, r1, r0              @ table.  This fixes up the
232                 str     r1, [r6], #4            @ C references.
233                 cmp     r6, ip
234                 blo     1b
235 #endif
237 not_relocated:  mov     r0, #0
238 1:              str     r0, [r2], #4            @ clear bss
239                 str     r0, [r2], #4
240                 str     r0, [r2], #4
241                 str     r0, [r2], #4
242                 cmp     r2, r3
243                 blo     1b
245 #ifdef DEBUG
246                 kputc #'C'      @ say that BSS is clear
247 #endif
249                 /*
250                  * The C runtime environment should now be setup
251                  * sufficiently.  Turn the cache on, set up some
252                  * pointers, and start decompressing.
253                  */
254                 bl      cache_on
256                 mov     r1, sp                  @ malloc space above stack
257                 add     r2, sp, #0x10000        @ 64k max
260  * Check to see if we will overwrite ourselves.
261  *   r4 = final kernel address
262  *   r5 = start of this image
263  *   r2 = end of malloc space (and therefore this image)
264  * We basically want:
265  *   r4 >= r2 -> OK
266  *   r4 + image length <= r5 -> OK
267  */
268                 cmp     r4, r2
269                 bhs     wont_overwrite
270                 add     r0, r4, #4096*1024      @ 4MB largest kernel size
271                 cmp     r0, r5
272                 bls     wont_overwrite
274                 mov     r5, r2                  @ decompress after malloc space
275                 mov     r0, r5
276                 mov     r3, r7
277                 bl      decompress_kernel
279                 add     r0, r0, #127
280                 bic     r0, r0, #127            @ align the kernel length
284  * r0     = decompressed kernel length
285  * r1-r3  = unused
286  * r4     = kernel execution address
287  * r5     = decompressed kernel start
288  * r6     = processor ID
289  * r7     = architecture ID
290  * r8-r14 = unused
291  */
292                 add     r1, r5, r0              @ end of decompressed kernel
293                 adr     r2, reloc_start
294                 ldr     r3, LC1
295                 add     r3, r2, r3
296 1:              ldmia   r2!, {r8 - r13}         @ copy relocation code
297                 stmia   r1!, {r8 - r13}
298                 ldmia   r2!, {r8 - r13}
299                 stmia   r1!, {r8 - r13}
300                 cmp     r2, r3
301                 blo     1b
303                 bl      cache_clean_flush
304                 add     pc, r5, r0              @ call relocation code
307  * We're not in danger of overwriting ourselves.  Do this the simple way.
309  * r4     = kernel execution address
310  * r7     = architecture ID
311  */
312 wont_overwrite: mov     r0, r4
313                 mov     r3, r7
314                 bl      decompress_kernel
315                 b       call_kernel
317                 .type   LC0, #object
318 LC0:            .word   LC0                     @ r1
319                 .word   __bss_start             @ r2
320                 .word   _end                    @ r3
321                 .word   _load_addr              @ r4
322                 .word   _start                  @ r5
323                 .word   _got_start              @ r6
324                 .word   _got_end                @ ip
325                 .word   user_stack+4096         @ sp
326 LC1:            .word   reloc_end - reloc_start
327                 .size   LC0, . - LC0
330  * Turn on the cache.  We need to setup some page tables so that we
331  * can have both the I and D caches on.
333  * We place the page tables 16k down from the kernel execution address,
334  * and we hope that nothing else is using it.  If we're using it, we
335  * will go pop!
337  * On entry,
338  *  r4 = kernel execution address
339  *  r6 = processor ID
340  *  r7 = architecture number
341  *  r8 = run-time address of "start"
342  * On exit,
343  *  r1, r2, r3, r8, r9, r12 corrupted
344  * This routine must preserve:
345  *  r4, r5, r6, r7
346  */
347                 .align  5
348 cache_on:       mov     r3, #8                  @ cache_on function
349                 b       call_cache_fn
351 __setup_mmu:    sub     r3, r4, #16384          @ Page directory size
352                 bic     r3, r3, #0xff           @ Align the pointer
353                 bic     r3, r3, #0x3f00
355  * Initialise the page tables, turning on the cacheable and bufferable
356  * bits for the RAM area only.
357  */
358                 mov     r0, r3
359                 mov     r8, r0, lsr #18
360                 mov     r8, r8, lsl #18         @ start of RAM
361                 add     r9, r8, #0x10000000     @ a reasonable RAM size
362                 mov     r1, #0x12
363                 orr     r1, r1, #3 << 10
364                 add     r2, r3, #16384
365 1:              cmp     r1, r8                  @ if virt > start of RAM
366                 orrhs   r1, r1, #0x0c           @ set cacheable, bufferable
367                 cmp     r1, r9                  @ if virt > end of RAM
368                 bichs   r1, r1, #0x0c           @ clear cacheable, bufferable
369                 str     r1, [r0], #4            @ 1:1 mapping
370                 add     r1, r1, #1048576
371                 teq     r0, r2
372                 bne     1b
374  * If ever we are running from Flash, then we surely want the cache
375  * to be enabled also for our execution instance...  We map 2MB of it
376  * so there is no map overlap problem for up to 1 MB compressed kernel.
377  * If the execution is in RAM then we would only be duplicating the above.
378  */
379                 mov     r1, #0x1e
380                 orr     r1, r1, #3 << 10
381                 mov     r2, pc, lsr #20
382                 orr     r1, r1, r2, lsl #20
383                 add     r0, r3, r2, lsl #2
384                 str     r1, [r0], #4
385                 add     r1, r1, #1048576
386                 str     r1, [r0]
387                 mov     pc, lr
389 __armv4_cache_on:
390                 mov     r12, lr
391                 bl      __setup_mmu
392                 mov     r0, #0
393                 mcr     p15, 0, r0, c7, c10, 4  @ drain write buffer
394                 mcr     p15, 0, r0, c8, c7, 0   @ flush I,D TLBs
395                 mrc     p15, 0, r0, c1, c0, 0   @ read control reg
396                 orr     r0, r0, #0x5000         @ I-cache enable, RR cache replacement
397                 orr     r0, r0, #0x0030
398                 bl      __common_cache_on
399                 mov     r0, #0
400                 mcr     p15, 0, r0, c8, c7, 0   @ flush I,D TLBs
401                 mov     pc, r12
403 __arm6_cache_on:
404                 mov     r12, lr
405                 bl      __setup_mmu
406                 mov     r0, #0
407                 mcr     p15, 0, r0, c7, c0, 0   @ invalidate whole cache v3
408                 mcr     p15, 0, r0, c5, c0, 0   @ invalidate whole TLB v3
409                 mov     r0, #0x30
410                 bl      __common_cache_on
411                 mov     r0, #0
412                 mcr     p15, 0, r0, c5, c0, 0   @ invalidate whole TLB v3
413                 mov     pc, r12
415 __common_cache_on:
416 #ifndef DEBUG
417                 orr     r0, r0, #0x000d         @ Write buffer, mmu
418 #endif
419                 mov     r1, #-1
420                 mcr     p15, 0, r3, c2, c0, 0   @ load page table pointer
421                 mcr     p15, 0, r1, c3, c0, 0   @ load domain access control
422                 mcr     p15, 0, r0, c1, c0, 0   @ load control register
423                 mov     pc, lr
426  * All code following this line is relocatable.  It is relocated by
427  * the above code to the end of the decompressed kernel image and
428  * executed there.  During this time, we have no stacks.
430  * r0     = decompressed kernel length
431  * r1-r3  = unused
432  * r4     = kernel execution address
433  * r5     = decompressed kernel start
434  * r6     = processor ID
435  * r7     = architecture ID
436  * r8-r14 = unused
437  */
438                 .align  5
439 reloc_start:    add     r8, r5, r0
440                 debug_reloc_start
441                 mov     r1, r4
443                 .rept   4
444                 ldmia   r5!, {r0, r2, r3, r9 - r13}     @ relocate kernel
445                 stmia   r1!, {r0, r2, r3, r9 - r13}
446                 .endr
448                 cmp     r5, r8
449                 blo     1b
450                 debug_reloc_end
452 call_kernel:    bl      cache_clean_flush
453                 bl      cache_off
455                 mov     r0, #0
456                 mov     r1, r7                  @ restore architecture number
457                 mov     pc, r4                  @ call kernel
460  * Here follow the relocatable cache support functions for the
461  * various processors.  This is a generic hook for locating an
462  * entry and jumping to an instruction at the specified offset
463  * from the start of the block.  Please note this is all position
464  * independent code.
466  *  r1  = corrupted
467  *  r2  = corrupted
468  *  r3  = block offset
469  *  r6  = corrupted
470  *  r12 = corrupted
471  */
473 call_cache_fn:  adr     r12, proc_types
474                 mrc     p15, 0, r6, c0, c0      @ get processor ID
475 1:              ldr     r1, [r12, #0]           @ get value
476                 ldr     r2, [r12, #4]           @ get mask
477                 eor     r1, r1, r6              @ (real ^ match)
478                 tst     r1, r2                  @       & mask
479                 addeq   pc, r12, r3             @ call cache function
480                 add     r12, r12, #4*5
481                 b       1b
484  * Table for cache operations.  This is basically:
485  *   - CPU ID match
486  *   - CPU ID mask
487  *   - 'cache on' method instruction
488  *   - 'cache off' method instruction
489  *   - 'cache flush' method instruction
491  * We match an entry using: ((real_id ^ match) & mask) == 0
493  * Writethrough caches generally only need 'on' and 'off'
494  * methods.  Writeback caches _must_ have the flush method
495  * defined.
496  */
497                 .type   proc_types,#object
498 proc_types:
499                 .word   0x41560600              @ ARM6/610
500                 .word   0xffffffe0
501                 b       __arm6_cache_off        @ works, but slow
502                 b       __arm6_cache_off
503                 mov     pc, lr
504 @               b       __arm6_cache_on         @ untested
505 @               b       __arm6_cache_off
506 @               b       __armv3_cache_flush
508                 .word   0x00000000              @ old ARM ID
509                 .word   0x0000f000
510                 mov     pc, lr
511                 mov     pc, lr
512                 mov     pc, lr
514                 .word   0x41007000              @ ARM7/710
515                 .word   0xfff8fe00
516                 b       __arm7_cache_off
517                 b       __arm7_cache_off
518                 mov     pc, lr
520                 .word   0x41807200              @ ARM720T (writethrough)
521                 .word   0xffffff00
522                 b       __armv4_cache_on
523                 b       __armv4_cache_off
524                 mov     pc, lr
526                 .word   0x00007000              @ ARM7 IDs
527                 .word   0x0000f000
528                 mov     pc, lr
529                 mov     pc, lr
530                 mov     pc, lr
532                 @ Everything from here on will be the new ID system.
534                 .word   0x4401a100              @ sa110 / sa1100
535                 .word   0xffffffe0
536                 b       __armv4_cache_on
537                 b       __armv4_cache_off
538                 b       __armv4_cache_flush
540                 .word   0x6901b110              @ sa1110
541                 .word   0xfffffff0
542                 b       __armv4_cache_on
543                 b       __armv4_cache_off
544                 b       __armv4_cache_flush
546                 @ These match on the architecture ID
548                 .word   0x00020000              @ ARMv4T
549                 .word   0x000f0000
550                 b       __armv4_cache_on
551                 b       __armv4_cache_off
552                 b       __armv4_cache_flush
554                 .word   0x00050000              @ ARMv5TE
555                 .word   0x000f0000
556                 b       __armv4_cache_on
557                 b       __armv4_cache_off
558                 b       __armv4_cache_flush
560                 .word   0x00060000              @ ARMv5TEJ
561                 .word   0x000f0000
562                 b       __armv4_cache_on
563                 b       __armv4_cache_off
564                 b       __armv4_cache_flush
566                 .word   0                       @ unrecognised type
567                 .word   0
568                 mov     pc, lr
569                 mov     pc, lr
570                 mov     pc, lr
572                 .size   proc_types, . - proc_types
575  * Turn off the Cache and MMU.  ARMv3 does not support
576  * reading the control register, but ARMv4 does.
578  * On entry,  r6 = processor ID
579  * On exit,   r0, r1, r2, r3, r12 corrupted
580  * This routine must preserve: r4, r6, r7
581  */
582                 .align  5
583 cache_off:      mov     r3, #12                 @ cache_off function
584                 b       call_cache_fn
586 __armv4_cache_off:
587                 mrc     p15, 0, r0, c1, c0
588                 bic     r0, r0, #0x000d
589                 mcr     p15, 0, r0, c1, c0      @ turn MMU and cache off
590                 mov     r0, #0
591                 mcr     p15, 0, r0, c7, c7      @ invalidate whole cache v4
592                 mcr     p15, 0, r0, c8, c7      @ invalidate whole TLB v4
593                 mov     pc, lr
595 __arm6_cache_off:
596                 mov     r0, #0x00000030         @ ARM6 control reg.
597                 b       __armv3_cache_off
599 __arm7_cache_off:
600                 mov     r0, #0x00000070         @ ARM7 control reg.
601                 b       __armv3_cache_off
603 __armv3_cache_off:
604                 mcr     p15, 0, r0, c1, c0, 0   @ turn MMU and cache off
605                 mov     r0, #0
606                 mcr     p15, 0, r0, c7, c0, 0   @ invalidate whole cache v3
607                 mcr     p15, 0, r0, c5, c0, 0   @ invalidate whole TLB v3
608                 mov     pc, lr
611  * Clean and flush the cache to maintain consistency.
613  * On entry,
614  *  r6 = processor ID
615  * On exit,
616  *  r1, r2, r3, r11, r12 corrupted
617  * This routine must preserve:
618  *  r0, r4, r5, r6, r7
619  */
620                 .align  5
621 cache_clean_flush:
622                 mov     r3, #16
623                 b       call_cache_fn
625 __armv4_cache_flush:
626                 mov     r2, #64*1024            @ default: 32K dcache size (*2)
627                 mov     r11, #32                @ default: 32 byte line size
628                 mrc     p15, 0, r3, c0, c0, 1   @ read cache type
629                 teq     r3, r6                  @ cache ID register present?
630                 beq     no_cache_id
631                 mov     r1, r3, lsr #18
632                 and     r1, r1, #7
633                 mov     r2, #1024
634                 mov     r2, r2, lsl r1          @ base dcache size *2
635                 tst     r3, #1 << 14            @ test M bit
636                 addne   r2, r2, r2, lsr #1      @ +1/2 size if M == 1
637                 mov     r3, r3, lsr #12
638                 and     r3, r3, #3
639                 mov     r11, #8
640                 mov     r11, r11, lsl r3        @ cache line size in bytes
641 no_cache_id:
642                 bic     r1, pc, #63             @ align to longest cache line
643                 add     r2, r1, r2
644 1:              ldr     r3, [r1], r11           @ s/w flush D cache
645                 teq     r1, r2
646                 bne     1b
648                 mcr     p15, 0, r1, c7, c5, 0   @ flush I cache
649                 mcr     p15, 0, r1, c7, c6, 0   @ flush D cache
650                 mcr     p15, 0, r1, c7, c10, 4  @ drain WB
651                 mov     pc, lr
653 __armv3_cache_flush:
654                 mov     r1, #0
655                 mcr     p15, 0, r0, c7, c0, 0   @ invalidate whole cache v3
656                 mov     pc, lr
659  * Various debugging routines for printing hex characters and
660  * memory, which again must be relocatable.
661  */
662 #ifdef DEBUG
663                 .type   phexbuf,#object
664 phexbuf:        .space  12
665                 .size   phexbuf, . - phexbuf
667 phex:           adr     r3, phexbuf
668                 mov     r2, #0
669                 mov     r1, #8
670                 strb    r2, [r3, r1]    @ store a null at the appropriate place
671 1:              subs    r1, r1, #1      @ in the red yet?
672                 movmi   r0, r3          @ yeah? get our address
673                 bmi     puts            @ and pass it to puts, have a nice day
674                 and     r2, r0, #15     @ no? not done? get the low nybble
675                 mov     r0, r0, lsr #4  @ shift
676                 cmp     r2, #10         @ add hexidecimalness
677                 addge   r2, r2, #7      @ ""
678                 add     r2, r2, #'0'    @ ""
679                 strb    r2, [r3, r1]    @ store it
680                 b       1b              @ go loop around again
682 #ifndef JTAG
683 puts:           loadsp  r3
684 1:              ldrb    r2, [r0], #1
685                 teq     r2, #0
686                 moveq   pc, lr
687 2:              writeb  r2
688                 mov     r1, #0x00020000
689 3:              subs    r1, r1, #1
690                 bne     3b
691                 teq     r2, #'\n'
692                 moveq   r2, #'\r'
693                 beq     2b
694                 teq     r0, #0
695                 bne     1b
696                 mov     pc, lr
697 #else /* JTAG */
698 puts:
699 1:              ldrb    r2, [r0], #1
700                 teq     r2, #0
701                 moveq   pc, lr
702 2:              mrc     p14, 0, r15, c14, c0, 0
703                 bvs     2b
704                 mcr     p14, 0, r2, c8, c0, 0
705                 mov     r1, #0x00020000
706 3:              subs    r1, r1, #1
707                 bne     3b
708                 teq     r2, #'\n'
709                 moveq   r2, #'\r'
710                 beq     2b
711                 teq     r0, #0
712                 bne     1b
713                 mov     pc, lr
714 #endif
715 putc:
716                 mov     r2, r0
717                 mov     r0, #0
718                 loadsp  r3
719                 b       2b
721 memdump:        mov     r12, r0
722                 mov     r10, lr
723                 mov     r11, #0
724 2:              mov     r0, r11, lsl #2
725                 add     r0, r0, r12
726                 mov     r1, #8
727                 bl      phex
728                 mov     r0, #':'
729                 bl      putc
730 1:              mov     r0, #' '
731                 bl      putc
732                 ldr     r0, [r12, r11, lsl #2]
733                 mov     r1, #8
734                 bl      phex
735                 and     r0, r11, #7
736                 teq     r0, #3
737                 moveq   r0, #' '
738                 bleq    putc
739                 and     r0, r11, #7
740                 add     r11, r11, #1
741                 teq     r0, #7
742                 bne     1b
743                 mov     r0, #'\n'
744                 bl      putc
745                 cmp     r11, #64
746                 blt     2b
747                 mov     pc, r10
748 #endif
750 reloc_end:
752                 .align
753                 .section ".stack", "w"
754 user_stack:     .space  4096