Test initialisation of MUIA_List_AdjustWidth and MUIA_List_AdjustHeight, and
[AROS.git] / arch / .unmaintained / m68k-emul / utility / smult64.s
blob5b065a8cd09d00488fde53be996918a8ff38daf4
1 /*
2 Copyright © 1995-2001, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: Utility 64 bit multiplication routines. m68k version.
6 Lang: english
7 */
9 #include "machine.i"
11 /* SMult64()/UMult64():
12 These are the signed/unsigned 64 bit multiplication routines.
13 There are two possibilities here because as of the 060 the
14 32*32->64 bit result instructions are not supported, and I haven't
15 quite figured out how to do this using the 32 bit ops yet (can't be
16 that hard though).
18 Still, emulating is faster than a unsup integer instruction except.
21 .text
22 .balign 16
24 .globl AROS_SLIB_ENTRY(SMult64,Utility)
25 .globl AROS_SLIB_ENTRY(SMult64_020,Utility)
27 /* Required by SMult64() */
28 .globl AROS_SLIB_ENTRY(UMult64,Utility)
30 .type AROS_SLIB_ENTRY(SMult64,Utility),@function
31 .type AROS_SLIB_ENTRY(SMult64_020,Utility),@function
34 AROS_SLIB_ENTRY(SMult64_020,Utility):
35 muls.l %d0,%d0:%d1
36 rts
38 /* How do I do this, again consider:
39 (a^16 + b) * (c^16 + d)
40 = ac^32 + (ad + bc)^16 + bd
42 I tried to think of a way of doing this with the mulu.l instr,
43 but I couldn't so I'll just use the mulu.w. Its quicker than
44 an unsupp integer instruction anyway :)
47 /* Have to change the sign... */
48 AROS_SLIB_ENTRY(SMult64,Utility):
49 move.l %d2,-(%a7)
50 moveq #0,%d2
51 tst.l %d0
52 jbpl .ispos1
53 neg.l %d0
54 addq.l #1,%d2
55 .ispos1:
56 tst.l %d1
57 jbpl .ispos2
58 neg.l %d1
59 subq.l #1,%d2
61 /* Ok, so if d2 != 0, then the sign was changed */
62 .ispos2:
63 bsr.s AROS_SLIB_ENTRY(UMult64,Utility)
64 tst.l %d0
65 jbeq .ispos
66 moveq #0,%d2
68 /* Ok we have to change the sign, 2's comp = 1's comp + 1 */
69 not.l %d0
70 not.l %d1
71 /* Add 1 to low order 32 bits */
72 addq.l #1,%d1
73 /* Add 0 and the carry to the high 32 bits */
74 addx.l %d2,%d0
75 .ispos:
76 move.l (%sp)+,%d2
77 rts