No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / m68k / fpe / fpu_arith.h
blobfeea4b4c08e108a71dfcc7cf1bf07bba880f5d94
1 /* $NetBSD: fpu_arith.h,v 1.4 2005/12/11 12:17:52 christos Exp $ */
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
40 * @(#)fpu_arith.h 8.1 (Berkeley) 6/11/93
44 * Extended-precision arithmetic.
46 * We hold the notion of a `carry register', which may or may not be a
47 * machine carry bit or register. On the SPARC, it is just the machine's
48 * carry bit.
50 * In the worst case, you can compute the carry from x+y as
51 * (unsigned)(x + y) < (unsigned)x
52 * and from x+y+c as
53 * ((unsigned)(x + y + c) <= (unsigned)x && (y|c) != 0)
54 * for example.
57 #ifndef FPE_USE_ASM
59 /* set up for extended-precision arithemtic */
60 #define FPU_DECL_CARRY quad_t fpu_carry, fpu_tmp;
63 * We have three kinds of add:
64 * add with carry: r = x + y + c
65 * add (ignoring current carry) and set carry: c'r = x + y + 0
66 * add with carry and set carry: c'r = x + y + c
67 * The macros use `C' for `use carry' and `S' for `set carry'.
68 * Note that the state of the carry is undefined after ADDC and SUBC,
69 * so if all you have for these is `add with carry and set carry',
70 * that is OK.
72 * The same goes for subtract, except that we compute x - y - c.
74 * Finally, we have a way to get the carry into a `regular' variable,
75 * or set it from a value. SET_CARRY turns 0 into no-carry, nonzero
76 * into carry; GET_CARRY sets its argument to 0 or 1.
78 #define FPU_ADDC(r, x, y) \
79 (r) = (x) + (y) + (!!fpu_carry)
80 #define FPU_ADDS(r, x, y) \
81 { \
82 fpu_tmp = (quad_t)(x) + (quad_t)(y); \
83 (r) = (u_int)fpu_tmp; \
84 fpu_carry = ((fpu_tmp & 0xffffffff00000000LL) != 0); \
86 #define FPU_ADDCS(r, x, y) \
87 { \
88 fpu_tmp = (quad_t)(x) + (quad_t)(y) + (!!fpu_carry); \
89 (r) = (u_int)fpu_tmp; \
90 fpu_carry = ((fpu_tmp & 0xffffffff00000000LL) != 0); \
92 #define FPU_SUBC(r, x, y) \
93 (r) = (x) - (y) - (!!fpu_carry)
94 #define FPU_SUBS(r, x, y) \
95 { \
96 fpu_tmp = (quad_t)(x) - (quad_t)(y); \
97 (r) = (u_int)fpu_tmp; \
98 fpu_carry = ((fpu_tmp & 0xffffffff00000000LL) != 0); \
100 #define FPU_SUBCS(r, x, y) \
102 fpu_tmp = (quad_t)(x) - (quad_t)(y) - (!!fpu_carry); \
103 (r) = (u_int)fpu_tmp; \
104 fpu_carry = ((fpu_tmp & 0xffffffff00000000LL) != 0); \
107 #define FPU_GET_CARRY(r) (r) = (!!fpu_carry)
108 #define FPU_SET_CARRY(v) fpu_carry = ((v) != 0)
110 #else
112 /* set up for extended-precision arithemtic */
113 #define FPU_DECL_CARRY register int fpu_tmp;
116 * We have three kinds of add:
117 * add with carry: r = x + y + c
118 * add (ignoring current carry) and set carry: c'r = x + y + 0
119 * add with carry and set carry: c'r = x + y + c
120 * The macros use `C' for `use carry' and `S' for `set carry'.
121 * Note that the state of the carry is undefined after ADDC and SUBC,
122 * so if all you have for these is `add with carry and set carry',
123 * that is OK.
125 * The same goes for subtract, except that we compute x - y - c.
127 * Finally, we have a way to get the carry into a `regular' variable,
128 * or set it from a value. SET_CARRY turns 0 into no-carry, nonzero
129 * into carry; GET_CARRY sets its argument to 0 or 1.
131 #define FPU_ADDC(r, x, y) \
133 __asm volatile("movel %1,%0" : "=d"(fpu_tmp) : "g"(x)); \
134 __asm volatile("addxl %1,%0" : "=d"(fpu_tmp) : "d"(y)); \
135 __asm volatile("movel %1,%0" : "=g"(r) : "r"(fpu_tmp)); \
137 #define FPU_ADDS(r, x, y) \
139 __asm volatile("movel %1,%0" : "=d"(fpu_tmp) : "g"(x)); \
140 __asm volatile("addl %1,%0" : "=d"(fpu_tmp) : "g"(y)); \
141 __asm volatile("movel %1,%0" : "=g"(r) : "r"(fpu_tmp)); \
143 #define FPU_ADDCS(r, x, y) FPU_ADDC(r, x, y)
145 #define FPU_SUBC(r, x, y) \
147 __asm volatile("movel %1,%0" : "=d"(fpu_tmp) : "g"(x)); \
148 __asm volatile("subxl %1,%0" : "=d"(fpu_tmp) : "d"(y)); \
149 __asm volatile("movel %1,%0" : "=g"(r) : "r"(fpu_tmp)); \
151 #define FPU_SUBS(r, x, y) \
153 __asm volatile("movel %1,%0" : "=d"(fpu_tmp) : "g"(x)); \
154 __asm volatile("subl %1,%0" : "=d"(fpu_tmp) : "g"(y)); \
155 __asm volatile("movel %1,%0" : "=g"(r) : "r"(fpu_tmp)); \
157 #define FPU_SUBCS(r, x, y) FPU_SUBC(r, x, y)
159 #define FPU_GET_CARRY(r) \
161 __asm volatile("moveq #0,%0" : "=d"(r)); \
162 __asm volatile("addxl %0,%0" : "+d"(r)); \
164 #define FPU_SET_CARRY(v) \
166 __asm volatile("moveq #0,%0" : "=d"(fpu_tmp)); \
167 __asm volatile("subl %1,%0" : "=d"(fpu_tmp) : "g"(v)); \
170 #endif /* FPE_USE_ASM */