1 @ libgcc routines for ARM cpu.
2 @ Division routines
, written by Richard Earnshaw
, (rearnsha
@armltd.co.uk
)
4 /* Copyright
1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
5 Free Software Foundation
, Inc.
7 This file is free software
; you can redistribute it and/or modify it
8 under the terms of the GNU General
Public License as published by the
9 Free Software Foundation
; either version 2, or (at your option) any
12 In addition to the permissions
in the GNU General
Public License
, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of
this file
into combinations with other programs
,
15 and to distribute those combinations without any restriction coming
16 from the use of
this file.
(The General
Public License restrictions
17 do apply
in other respects
; for example, they cover modification of
18 the file
, and distribution when
not linked
into a combine
21 This file is distributed
in the hope that it will be useful
, but
22 WITHOUT ANY WARRANTY
; without even the implied warranty of
23 MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General
Public License for more details.
26 You should have received a copy of the GNU General
Public License
27 along with
this program
; see the file COPYING. If not, write to
28 the Free Software Foundation
, 51 Franklin Street
, Fifth Floor
,
29 Boston
, MA
02110-1301, USA.
*/
30 /* ------------------------------------------------------------------------ */
32 /* We need to know what prefix to
add to function names.
*/
34 #ifndef __USER_LABEL_PREFIX__
35 #error __USER_LABEL_PREFIX__
not defined
38 /* ANSI concatenation macros.
*/
40 #define CONCAT1
(a
, b
) CONCAT2
(a
, b
)
41 #define CONCAT2
(a
, b
) a ## b
43 /* Use the right prefix for
global labels.
*/
45 #define SYM
(x
) CONCAT1
(__USER_LABEL_PREFIX__
, x
)
49 #define __PLT__
/* Not supported
in Thumb assembler
(for now
).
*/
53 #define
TYPE(x
) .
type SYM
(x
),function
54 #define
SIZE(x
) .
size SYM
(x
), .
- SYM
(x
)
63 /* Function
end macros. Variants for interworking.
*/
65 @
This selects the minimum architecture level required.
66 #define __ARM_ARCH__
3
68 #if defined
(__ARM_ARCH_3M__
) || defined
(__ARM_ARCH_4__
) \
69 || defined
(__ARM_ARCH_4T__
)
70 /* We use __ARM_ARCH__ set to
4 here
, but
in reality it
's any processor with
71 long multiply instructions. That includes v3M. */
73 # define __ARM_ARCH__ 4
76 #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
77 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
78 || defined(__ARM_ARCH_5TEJ__)
80 # define __ARM_ARCH__ 5
83 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
84 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
85 || defined(__ARM_ARCH_6ZK__)
87 # define __ARM_ARCH__ 6
91 #error Unable to determine architecture.
94 /* How to return from a function call depends on the architecture variant. */
96 #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
99 # define RETc(x) bx##x lr
101 /* Special precautions for interworking on armv4t. */
102 # if (__ARM_ARCH__ == 4)
104 /* Always use bx, not ldr pc. */
105 # if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
106 # define __INTERWORKING__
107 # endif /* __THUMB__ || __THUMB_INTERWORK__ */
109 /* Include thumb stub before arm mode code. */
110 # if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
111 # define __INTERWORKING_STUBS__
112 # endif /* __thumb__ && !__THUMB_INTERWORK__ */
114 #endif /* __ARM_ARCH == 4 */
118 # define RET mov pc, lr
119 # define RETc(x) mov##x pc, lr
123 .macro cfi_pop advance, reg, cfa_offset
125 .pushsection .debug_frame
126 .byte 0x4 /* DW_CFA_advance_loc4 */
128 .byte (0xc0 | \reg) /* DW_CFA_restore */
129 .byte 0xe /* DW_CFA_def_cfa_offset */
134 .macro cfi_push advance, reg, offset, cfa_offset
136 .pushsection .debug_frame
137 .byte 0x4 /* DW_CFA_advance_loc4 */
139 .byte (0x80 | \reg) /* DW_CFA_offset */
140 .uleb128 (\offset / -4)
141 .byte 0xe /* DW_CFA_def_cfa_offset */
146 .macro cfi_start start_label, end_label
148 .pushsection .debug_frame
150 .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
152 .4byte 0xffffffff @ CIE Identifier Tag
153 .byte 0x1 @ CIE Version
154 .ascii "\0" @ CIE Augmentation
155 .uleb128 0x1 @ CIE Code Alignment Factor
156 .sleb128 -4 @ CIE Data Alignment Factor
157 .byte 0xe @ CIE RA Column
158 .byte 0xc @ DW_CFA_def_cfa
164 .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
166 .4byte LSYM(Lstart_frame) @ FDE CIE offset
167 .4byte \start_label @ FDE initial location
168 .4byte \end_label-\start_label @ FDE address range
172 .macro cfi_end end_label
174 .pushsection .debug_frame
182 /* Don't pass dirn
, it
's there just to get token pasting right. */
184 .macro RETLDM regs=, cond=, unwind=, dirn=ia
185 #if defined (__INTERWORKING__)
187 ldr\cond lr, [sp], #8
189 ldm\cond\dirn sp!, {\regs, lr}
192 /* Mark LR as restored. */
193 97: cfi_pop 97b - \unwind, 0xe, 0x0
198 ldr\cond pc, [sp], #8
200 ldm\cond\dirn sp!, {\regs, pc}
206 .macro ARM_LDIV0 name
208 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
209 bl SYM (__div0) __PLT__
210 mov r0, #0 @ About as wrong as it could be.
215 .macro THUMB_LDIV0 name
217 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
219 mov r0, #0 @ About as wrong as it could be.
220 #if defined (__INTERWORKING__)
232 .macro DIV_FUNC_END name
233 cfi_start __\name, LSYM(Lend_div0)
240 cfi_end LSYM(Lend_div0)
244 .macro THUMB_FUNC_START name
251 /* Function start macros. Variants for ARM and Thumb. */
254 #define THUMB_FUNC .thumb_func
255 #define THUMB_CODE .force_thumb
261 .macro FUNC_START name
271 /* Special function that will always be coded in ARM assembly, even if
272 in Thumb-only compilation. */
274 #if defined(__INTERWORKING_STUBS__)
275 .macro ARM_FUNC_START name
280 /* A hook to tell gdb that we've switched to ARM mode. Also used to
call
281 directly from other
local arm routines.
*/
284 #define EQUIV .thumb_set
285 /* Branch directly to a function declared with ARM_FUNC_START.
286 Must be called
in arm mode.
*/
291 .
macro ARM_FUNC_START
name
305 .
macro FUNC_ALIAS new old
307 #if defined
(__thumb__
)
308 .thumb_set SYM
(__
\new
), SYM
(__\old
)
310 .set SYM
(__
\new
), SYM
(__\old
)
314 .
macro ARM_FUNC_ALIAS new old
316 EQUIV SYM
(__
\new
), SYM
(__\old
)
317 #if defined
(__INTERWORKING_STUBS__
)
318 .set SYM
(_L__
\new
), SYM
(_L__\old
)
323 /* Register aliases.
*/
325 work .req r4 @ XXXX is
this safe
?
339 /* ------------------------------------------------------------------------ */
340 /* Bodies of the division
and modulo routines.
*/
341 /* ------------------------------------------------------------------------ */
342 .
macro ARM_DIV_BODY dividend
, divisor
, result
, curbit
344 #if __ARM_ARCH__
>= 5 && ! defined
(__OPTIMIZE_SIZE__
)
346 clz \curbit
, \dividend
347 clz
\result
, \divisor
348 sub \curbit
, \result
, \curbit
349 rsbs \curbit
, \curbit
, #
31
350 addne \curbit
, \curbit
, \curbit
, lsl #
1
352 addne pc
, pc
, \curbit
, lsl #
2
356 .set shift
, shift
- 1
357 cmp \dividend
, \divisor
, lsl #shift
358 adc \result
, \result
, \result
359 subcs \dividend
, \dividend
, \divisor
, lsl #shift
362 #else
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
363 #if __ARM_ARCH__
>= 5
365 clz \curbit
, \divisor
366 clz
\result
, \dividend
367 sub \result
, \curbit
, \result
369 mov \divisor
, \divisor
, lsl \result
370 mov \curbit
, \curbit
, lsl \result
373 #else
/* __ARM_ARCH__
< 5 */
375 @ Initially shift the divisor left
3 bits if possible
,
376 @ set curbit accordingly.
This allows for curbit to be located
377 @ at the left
end of each
4 bit nibbles
in the division
loop
378 @ to save one
loop in most cases.
379 tst \divisor
, #
0xe0000000
380 moveq \divisor
, \divisor
, lsl #
3
384 @ Unless the divisor is very big
, shift it up
in multiples of
385 @ four bits
, since
this is the amount of unwinding
in the main
386 @ division
loop. Continue shifting until the divisor is
387 @ larger than the dividend.
388 1: cmp \divisor
, #
0x10000000
389 cmplo \divisor
, \dividend
390 movlo \divisor
, \divisor
, lsl #
4
391 movlo \curbit
, \curbit
, lsl #
4
394 @ For very big divisors
, we must shift it a bit at a time
, or
395 @ we will be
in danger of overflowing.
396 1: cmp \divisor
, #
0x80000000
397 cmplo \divisor
, \dividend
398 movlo \divisor
, \divisor
, lsl #
1
399 movlo \curbit
, \curbit
, lsl #
1
404 #endif
/* __ARM_ARCH__
< 5 */
407 1: cmp \dividend
, \divisor
408 subhs \dividend
, \dividend
, \divisor
409 orrhs
\result
, \result
, \curbit
410 cmp \dividend
, \divisor
, lsr #
1
411 subhs \dividend
, \dividend
, \divisor
, lsr #
1
412 orrhs
\result
, \result
, \curbit
, lsr #
1
413 cmp \dividend
, \divisor
, lsr #
2
414 subhs \dividend
, \dividend
, \divisor
, lsr #
2
415 orrhs
\result
, \result
, \curbit
, lsr #
2
416 cmp \dividend
, \divisor
, lsr #
3
417 subhs \dividend
, \dividend
, \divisor
, lsr #
3
418 orrhs
\result
, \result
, \curbit
, lsr #
3
419 cmp \dividend
, #
0 @ Early termination
?
420 movnes \curbit
, \curbit
, lsr #
4 @ No
, any more bits to do
?
421 movne \divisor
, \divisor
, lsr #
4
424 #endif
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
427 /* ------------------------------------------------------------------------ */
428 .
macro ARM_DIV2_ORDER divisor
, order
430 #if __ARM_ARCH__
>= 5
433 rsb \order
, \order
, #
31
437 cmp \divisor
, #
(1 << 16)
438 movhs \divisor
, \divisor
, lsr #
16
442 cmp \divisor
, #
(1 << 8)
443 movhs \divisor
, \divisor
, lsr #
8
444 addhs \order
, \order
, #
8
446 cmp \divisor
, #
(1 << 4)
447 movhs \divisor
, \divisor
, lsr #
4
448 addhs \order
, \order
, #
4
450 cmp \divisor
, #
(1 << 2)
451 addhi \order
, \order
, #
3
452 addls \order
, \order
, \divisor
, lsr #
1
457 /* ------------------------------------------------------------------------ */
458 .
macro ARM_MOD_BODY dividend
, divisor
, order
, spare
460 #if __ARM_ARCH__
>= 5 && ! defined
(__OPTIMIZE_SIZE__
)
463 clz \spare
, \dividend
464 sub \order
, \order
, \spare
465 rsbs \order
, \order
, #
31
466 addne pc
, pc
, \order
, lsl #
3
470 .set shift
, shift
- 1
471 cmp \dividend
, \divisor
, lsl #shift
472 subcs \dividend
, \dividend
, \divisor
, lsl #shift
475 #else
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
476 #if __ARM_ARCH__
>= 5
479 clz \spare
, \dividend
480 sub \order
, \order
, \spare
481 mov \divisor
, \divisor
, lsl \order
483 #else
/* __ARM_ARCH__
< 5 */
487 @ Unless the divisor is very big
, shift it up
in multiples of
488 @ four bits
, since
this is the amount of unwinding
in the main
489 @ division
loop. Continue shifting until the divisor is
490 @ larger than the dividend.
491 1: cmp \divisor
, #
0x10000000
492 cmplo \divisor
, \dividend
493 movlo \divisor
, \divisor
, lsl #
4
494 addlo \order
, \order
, #
4
497 @ For very big divisors
, we must shift it a bit at a time
, or
498 @ we will be
in danger of overflowing.
499 1: cmp \divisor
, #
0x80000000
500 cmplo \divisor
, \dividend
501 movlo \divisor
, \divisor
, lsl #
1
502 addlo \order
, \order
, #
1
505 #endif
/* __ARM_ARCH__
< 5 */
507 @ Perform all needed substractions to keep only the reminder.
508 @ Do comparisons
in batch of
4 first.
509 subs \order
, \order
, #
3 @ yes
, 3 is intended here
512 1: cmp \dividend
, \divisor
513 subhs \dividend
, \dividend
, \divisor
514 cmp \dividend
, \divisor
, lsr #
1
515 subhs \dividend
, \dividend
, \divisor
, lsr #
1
516 cmp \dividend
, \divisor
, lsr #
2
517 subhs \dividend
, \dividend
, \divisor
, lsr #
2
518 cmp \dividend
, \divisor
, lsr #
3
519 subhs \dividend
, \dividend
, \divisor
, lsr #
3
521 mov \divisor
, \divisor
, lsr #
4
522 subges \order
, \order
, #
4
529 @ Either
1, 2 or 3 comparison
/substractions are left.
533 cmp \dividend
, \divisor
534 subhs \dividend
, \dividend
, \divisor
535 mov \divisor
, \divisor
, lsr #
1
536 3: cmp \dividend
, \divisor
537 subhs \dividend
, \dividend
, \divisor
538 mov \divisor
, \divisor
, lsr #
1
539 4: cmp \dividend
, \divisor
540 subhs \dividend
, \dividend
, \divisor
543 #endif
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
546 /* ------------------------------------------------------------------------ */
547 .
macro THUMB_DIV_MOD_BODY modulo
548 @ Load the constant
0x10000000 into our work register.
552 @ Unless the divisor is very big
, shift it up
in multiples of
553 @ four bits
, since
this is the amount of unwinding
in the main
554 @ division
loop. Continue shifting until the divisor is
555 @ larger than the dividend.
558 cmp divisor
, dividend
564 @ Set work to
0x80000000
567 @ For very big divisors
, we must shift it a bit at a time
, or
568 @ we will be
in danger of overflowing.
571 cmp divisor
, dividend
577 @
Test for possible subtractions ...
579 @ ... On the final pass
, this may subtract too much from the dividend
,
580 @ so keep track of which subtractions are done
, we can fix them up
583 cmp dividend
, divisor
585 sub dividend
, dividend
, divisor
587 lsr work
, divisor
, #
1
590 sub dividend
, dividend
, work
597 lsr work
, divisor
, #
2
600 sub dividend
, dividend
, work
607 lsr work
, divisor
, #
3
610 sub dividend
, dividend
, work
619 @ ...
and note which bits are done
in the result. On the final pass
,
620 @
this may subtract too much from the dividend
, but the result will be ok
,
621 @ since the
"bit" will have been shifted
out at the bottom.
622 cmp dividend
, divisor
624 sub dividend
, dividend
, divisor
625 orr result
, result
, curbit
627 lsr work
, divisor
, #
1
630 sub dividend
, dividend
, work
634 lsr work
, divisor
, #
2
637 sub dividend
, dividend
, work
641 lsr work
, divisor
, #
3
644 sub dividend
, dividend
, work
650 cmp dividend
, #
0 @ Early termination
?
652 lsr curbit
, #
4 @ No
, any more bits to do
?
658 @ Any subtractions that we should
not have done will be recorded
in
659 @ the top three bits of
"overdone". Exactly which were
not needed
660 @ are governed by the position of the bit
, stored
in ip.
664 beq LSYM
(Lgot_result
)
666 @ If we terminated early
, because dividend became zero
, then the
667 @ bit
in ip will
not be
in the bottom nibble
, and we should
not
668 @ perform the additions below. We must
test for
this though
669 @
(rather relying upon the TSTs to prevent the additions
) since
670 @ the bit
in ip could be
in the top two bits which might then match
671 @ with one of the smaller RORs.
675 beq LSYM
(Lgot_result
)
682 lsr work
, divisor
, #
3
690 lsr work
, divisor
, #
2
697 beq LSYM
(Lgot_result
)
698 lsr work
, divisor
, #
1
703 /* ------------------------------------------------------------------------ */
704 /* Start of the Real Functions
*/
705 /* ------------------------------------------------------------------------ */
709 FUNC_ALIAS aeabi_uidiv udivsi3
719 cmp dividend
, divisor
720 blo LSYM
(Lgot_result
)
728 #else
/* ARM version.
*/
738 ARM_DIV_BODY r0
, r1
, r2
, r3
747 12: ARM_DIV2_ORDER r1
, r2
752 #endif
/* ARM version
*/
756 FUNC_START aeabi_uidivmod
765 stmfd
sp!, { r0, r1, lr }
767 ldmfd
sp!, { r1, r2, lr }
772 FUNC_END aeabi_uidivmod
774 #endif
/* L_udivsi3
*/
775 /* ------------------------------------------------------------------------ */
785 cmp dividend
, divisor
797 #else
/* ARM version.
*/
799 subs r2
, r1
, #
1 @ compare divisor with
1
801 cmpne r0
, r1 @ compare dividend with divisor
803 tsthi r1
, r2 @ see if divisor is power of
2
807 ARM_MOD_BODY r0
, r1
, r2
, r3
811 #endif
/* ARM version.
*/
815 #endif
/* L_umodsi3
*/
816 /* ------------------------------------------------------------------------ */
820 FUNC_ALIAS aeabi_idiv divsi3
828 eor work
, divisor @ Save the sign of the result.
834 neg divisor
, divisor @ Loops below use unsigned.
838 neg dividend
, dividend
840 cmp dividend
, divisor
841 blo LSYM
(Lgot_result
)
854 #else
/* ARM version.
*/
857 eor ip
, r0
, r1 @ save the sign of the result.
859 rsbmi r1
, r1
, #
0 @ loops below use unsigned.
860 subs r2
, r1
, #
1 @ division by
1 or -1 ?
863 rsbmi r3
, r0
, #
0 @ positive dividend value
866 tst r1
, r2 @ divisor is power of
2 ?
869 ARM_DIV_BODY r3
, r1
, r0
, r2
875 10: teq ip
, r0 @ same sign
?
880 moveq r0
, ip
, asr #
31
884 12: ARM_DIV2_ORDER r1
, r2
891 #endif
/* ARM version
*/
895 FUNC_START aeabi_idivmod
904 stmfd
sp!, { r0, r1, lr }
906 ldmfd
sp!, { r1, r2, lr }
911 FUNC_END aeabi_idivmod
913 #endif
/* L_divsi3
*/
914 /* ------------------------------------------------------------------------ */
925 neg divisor
, divisor @ Loops below use unsigned.
928 @ Need to save the sign of the dividend
, unfortunately
, we need
929 @ work later on. Must do
this after saving the original value of
930 @ the work register
, because we will
pop this value off first.
934 neg dividend
, dividend
936 cmp dividend
, divisor
937 blo LSYM
(Lgot_result
)
944 neg dividend
, dividend
949 #else
/* ARM version.
*/
953 rsbmi r1
, r1
, #
0 @ loops below use unsigned.
954 movs ip
, r0 @ preserve sign of dividend
955 rsbmi r0
, r0
, #
0 @ if negative make positive
956 subs r2
, r1
, #
1 @ compare divisor with
1
957 cmpne r0
, r1 @ compare dividend with divisor
959 tsthi r1
, r2 @ see if divisor is power of
2
963 ARM_MOD_BODY r0
, r1
, r2
, r3
969 #endif
/* ARM version
*/
973 #endif
/* L_modsi3
*/
974 /* ------------------------------------------------------------------------ */
978 FUNC_ALIAS aeabi_idiv0 div0
979 FUNC_ALIAS aeabi_ldiv0 div0
987 #endif
/* L_divmodsi_tools
*/
988 /* ------------------------------------------------------------------------ */
990 @ GNU
/Linux division
-by zero handler. Used
in place of L_dvmd_tls
992 /* Constant taken from
<asm
/signal.h
>.
*/
1000 bl SYM
(raise
) __PLT__
1005 #endif
/* L_dvmd_lnx
*/
1006 /* ------------------------------------------------------------------------ */
1007 /* Dword shift operations.
*/
1008 /* All the following
Dword shift variants rely on the fact that
1011 shft xxx
, (Reg
& 255)
1012 so for Reg value
in (32..
.63) and (-1...
-31) we will get zero
(in the
1013 case of logical shifts
) or the sign
(for asr
).
*/
1026 FUNC_ALIAS aeabi_llsr lshrdi3
1044 movmi
al, al, lsr r2
1045 movpl
al, ah, lsr r3
1046 orrmi
al, al, ah, lsl ip
1058 FUNC_ALIAS aeabi_lasr ashrdi3
1065 @ If r2 is negative at
this point the following step would
OR
1066 @ the sign bit
into all of
AL. That
's not what we want...
1080 movmi al, al, lsr r2
1081 movpl al, ah, asr r3
1082 orrmi al, al, ah, lsl ip
1095 FUNC_ALIAS aeabi_llsl ashldi3
1113 movmi ah, ah, lsl r2
1114 movpl ah, al, lsl r3
1115 orrmi ah, ah, al, lsr ip
1124 /* ------------------------------------------------------------------------ */
1125 /* These next two sections are here despite the fact that they contain Thumb
1126 assembler because their presence allows interworked code to be linked even
1127 when the GCC library is this one. */
1129 /* Do not build the interworking functions when the target architecture does
1130 not support Thumb instructions. (This can be a multilib option). */
1131 #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
1132 || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
1133 || __ARM_ARCH__ >= 6
1135 #if defined L_call_via_rX
1137 /* These labels & instructions are used by the Arm/Thumb interworking code.
1138 The address of function to be called is loaded into a register and then
1139 one of these labels is called via a BL instruction. This puts the
1140 return address into the link register with the bottom bit set, and the
1141 code here switches to the correct mode before executing the function. */
1147 .macro call_via register
1148 THUMB_FUNC_START _call_via_\register
1153 SIZE (_call_via_\register)
1172 #endif /* L_call_via_rX */
1174 #if defined L_interwork_call_via_rX
1176 /* These labels & instructions are used by the Arm/Thumb interworking code,
1177 when the target address is in an unknown instruction set. The address
1178 of function to be called is loaded into a register and then one of these
1179 labels is called via a BL instruction. This puts the return address
1180 into the link register with the bottom bit set, and the code here
1181 switches to the correct mode before executing the function. Unfortunately
1182 the target code cannot be relied upon to return via a BX instruction, so
1183 instead we have to store the resturn address on the stack and allow the
1184 called function to return here instead. Upon return we recover the real
1185 return address and use a BX to get back to Thumb mode.
1187 There are three variations of this code. The first,
1188 _interwork_call_via_rN(), will push the return address onto the
1189 stack and pop it in _arm_return(). It should only be used if all
1190 arguments are passed in registers.
1192 The second, _interwork_r7_call_via_rN(), instead stores the return
1193 address at [r7, #-4]. It is the caller's responsibility to ensure
1194 that
this address is valid
and contains no useful data.
1196 The third
, _interwork_r11_call_via_rN
(), works
in the same way but
1197 uses r11 instead of r7. It is useful if the caller does
not really
1198 need a frame pointer.
*/
1205 LSYM
(Lstart_arm_return
):
1206 cfi_start LSYM
(Lstart_arm_return
) LSYM
(Lend_arm_return
)
1207 cfi_push
0, 0xe, -0x8, 0x8
1208 nop @
This nop is for the benefit of debuggers
, so that
1209 @ backtraces will use the correct unwind information.
1211 RETLDM unwind
=LSYM
(Lstart_arm_return
)
1212 cfi_end LSYM
(Lend_arm_return
)
1214 .globl _arm_return_r7
1219 .globl _arm_return_r11
1224 .
macro interwork_with_frame frame
, register
, name, return
1227 THUMB_FUNC_START
\name
1234 streq lr
, [\frame
, #
-4]
1235 adreq lr
, _arm_return_
\frame
1241 .
macro interwork register
1244 THUMB_FUNC_START _interwork_call_via_
\register
1250 .globl LSYM
(Lchange_
\register
)
1251 LSYM
(Lchange_
\register
):
1253 streq lr
, [sp, #
-8]!
1254 adreq lr
, _arm_return
1257 SIZE (_interwork_call_via_
\register
)
1259 interwork_with_frame r7
,\register
,_interwork_r7_call_via_
\register
1260 interwork_with_frame r11
,\register
,_interwork_r11_call_via_
\register
1278 /* The LR case has to be handled a little differently...
*/
1281 THUMB_FUNC_START _interwork_call_via_lr
1290 stmeqdb r13
!, {lr, pc}
1292 adreq lr
, _arm_return
1295 SIZE (_interwork_call_via_lr
)
1297 #endif
/* L_interwork_call_via_rX
*/
1298 #endif
/* Arch supports thumb.
*/
1301 #
include "ieee754-df.S"
1302 #
include "ieee754-sf.S"
1304 #endif
/* __symbian__
*/