1 /* Startup code for ZPU
2 Copyright (C) 2005 Free Software Foundation, Inc.
4 This file is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 2, or (at your option) any
9 In addition to the permissions in the GNU General Public License, the
10 Free Software Foundation gives you unlimited permission to link the
11 compiled version of this file with other programs, and to distribute
12 those programs without any restriction coming from the use of this
13 file. (The General Public License restrictions do apply in other
14 respects; for example, they cover modification of the file, and
15 distribution when not linked into another program.)
17 This file is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
31 ; .section ".fixed_vectors","ax"
32 ; KLUDGE!!! we remove the executable bit to avoid relaxation
33 .section ".fixed_vectors","a"
36 ; we need to align these code sections to 32 bytes, which
37 ; means we must not use any assembler instructions that are relaxed
48 im _memreg+4 ; save R1
50 im _memreg+8 ; save R2
94 ; destroy arguments on stack
101 ; poke the result into the right slot
122 ; create mask of lowest bit in A
131 add ; accumulate in C
138 ; shift A right 1 bit
156 ; intSp must be 0 when we jump to _premain
169 .globl _zpu_interrupt_vector
170 _zpu_interrupt_vector:
171 jmp ___zpu_interrupt_vector
173 /* instruction emulation code */
180 ; by not masking out bit 0, we cause a memory access error
181 ; on unaligned access
195 ; shift right addr&3 * 8
208 ; by not masking out bit 0, we cause a memory access error
209 ; on unaligned access
248 ; 0x80000000 will overflow when negated, so we need to mask
249 ; the result above with the compare positive to negative
259 ; handle case where we are comparing a negative number
260 ; and positve number. This can underflow. E.g. consider 0x8000000 < 0x1000
305 /* low: -1 if low bit dif is negative 0 otherwise: neg (not x&1 and (y&1))
306 x&1 y&1 neg (not x&1 and (y&1))
322 /* high: upper 31-bit diff is only wrong when diff is 0 and low=-1
323 high=x>>1 - y>>1 + low
328 low= neg(not 0 and 1) = 1111 (-1)
329 high=000+ neg(111) +low = 000 + 1001 + low = 1000
333 low=neg(not 1 and 0) = 0
334 high=111+neg(000) + low = 0111
353 ; if they are equal, then the last bit decides...
356 /* test if negative: result = flip(diff) & 1 */
361 ; destroy a&b which are on stack
441 ; handle signed value
447 not ; now we have an integer on the stack with the signed
448 ; bits in the right position
450 ; mask these bits with the signed bit.
461 ; stuff in the signed bits...
464 ; store result into correct stack slot
467 ; move up return value
481 ; store return address
487 pushsp ; flush internal stack
655 ; mask away destination
713 ; fetch boolean & neg mask
717 ; calc address & mask for branch
721 ; subtract 1 to find PC of branch instruction
768 ; fetch boolean & neg mask
772 ; calc address & mask for branch
776 ; find address of branch instruction
795 ; address of poppcrel
860 storesp 12 ; return address
862 pushsp ; this will flush the internal stack.
913 ; NB! this is not an EMULATE instruction. It is a varargs fn.
930 .byte (.LmoreMult-.Lbranch)&0x7f+0x80
945 .globl ___zpu_interrupt_vector
946 .weak ___zpu_interrupt_vector
948 ___zpu_interrupt_vector: