Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / m68k / math-emu / fp_arith.c
blob799c450fe3223955d514945300d75ac5508c9a0e
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
4 fp_arith.c: floating-point math routines for the Linux-m68k
5 floating point emulator.
7 Copyright (c) 1998-1999 David Huggins-Daines.
9 Somewhat based on the AlphaLinux floating point emulator, by David
10 Mosberger-Tang.
14 #include "fp_emu.h"
15 #include "multi_arith.h"
16 #include "fp_arith.h"
18 const struct fp_ext fp_QNaN =
20 .exp = 0x7fff,
21 .mant = { .m64 = ~0 }
24 const struct fp_ext fp_Inf =
26 .exp = 0x7fff,
29 /* let's start with the easy ones */
31 struct fp_ext *fp_fabs(struct fp_ext *dest, struct fp_ext *src)
33 dprint(PINSTR, "fabs\n");
35 fp_monadic_check(dest, src);
37 dest->sign = 0;
39 return dest;
42 struct fp_ext *fp_fneg(struct fp_ext *dest, struct fp_ext *src)
44 dprint(PINSTR, "fneg\n");
46 fp_monadic_check(dest, src);
48 dest->sign = !dest->sign;
50 return dest;
53 /* Now, the slightly harder ones */
55 /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
56 FDSUB, and FCMP instructions. */
58 struct fp_ext *fp_fadd(struct fp_ext *dest, struct fp_ext *src)
60 int diff;
62 dprint(PINSTR, "fadd\n");
64 fp_dyadic_check(dest, src);
66 if (IS_INF(dest)) {
67 /* infinity - infinity == NaN */
68 if (IS_INF(src) && (src->sign != dest->sign))
69 fp_set_nan(dest);
70 return dest;
72 if (IS_INF(src)) {
73 fp_copy_ext(dest, src);
74 return dest;
77 if (IS_ZERO(dest)) {
78 if (IS_ZERO(src)) {
79 if (src->sign != dest->sign) {
80 if (FPDATA->rnd == FPCR_ROUND_RM)
81 dest->sign = 1;
82 else
83 dest->sign = 0;
85 } else
86 fp_copy_ext(dest, src);
87 return dest;
90 dest->lowmant = src->lowmant = 0;
92 if ((diff = dest->exp - src->exp) > 0)
93 fp_denormalize(src, diff);
94 else if ((diff = -diff) > 0)
95 fp_denormalize(dest, diff);
97 if (dest->sign == src->sign) {
98 if (fp_addmant(dest, src))
99 if (!fp_addcarry(dest))
100 return dest;
101 } else {
102 if (dest->mant.m64 < src->mant.m64) {
103 fp_submant(dest, src, dest);
104 dest->sign = !dest->sign;
105 } else
106 fp_submant(dest, dest, src);
109 return dest;
112 /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
113 instructions.
115 Remember that the arguments are in assembler-syntax order! */
117 struct fp_ext *fp_fsub(struct fp_ext *dest, struct fp_ext *src)
119 dprint(PINSTR, "fsub ");
121 src->sign = !src->sign;
122 return fp_fadd(dest, src);
126 struct fp_ext *fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
128 dprint(PINSTR, "fcmp ");
130 FPDATA->temp[1] = *dest;
131 src->sign = !src->sign;
132 return fp_fadd(&FPDATA->temp[1], src);
135 struct fp_ext *fp_ftst(struct fp_ext *dest, struct fp_ext *src)
137 dprint(PINSTR, "ftst\n");
139 (void)dest;
141 return src;
144 struct fp_ext *fp_fmul(struct fp_ext *dest, struct fp_ext *src)
146 union fp_mant128 temp;
147 int exp;
149 dprint(PINSTR, "fmul\n");
151 fp_dyadic_check(dest, src);
153 /* calculate the correct sign now, as it's necessary for infinities */
154 dest->sign = src->sign ^ dest->sign;
156 /* Handle infinities */
157 if (IS_INF(dest)) {
158 if (IS_ZERO(src))
159 fp_set_nan(dest);
160 return dest;
162 if (IS_INF(src)) {
163 if (IS_ZERO(dest))
164 fp_set_nan(dest);
165 else
166 fp_copy_ext(dest, src);
167 return dest;
170 /* Of course, as we all know, zero * anything = zero. You may
171 not have known that it might be a positive or negative
172 zero... */
173 if (IS_ZERO(dest) || IS_ZERO(src)) {
174 dest->exp = 0;
175 dest->mant.m64 = 0;
176 dest->lowmant = 0;
178 return dest;
181 exp = dest->exp + src->exp - 0x3ffe;
183 /* shift up the mantissa for denormalized numbers,
184 so that the highest bit is set, this makes the
185 shift of the result below easier */
186 if ((long)dest->mant.m32[0] >= 0)
187 exp -= fp_overnormalize(dest);
188 if ((long)src->mant.m32[0] >= 0)
189 exp -= fp_overnormalize(src);
191 /* now, do a 64-bit multiply with expansion */
192 fp_multiplymant(&temp, dest, src);
194 /* normalize it back to 64 bits and stuff it back into the
195 destination struct */
196 if ((long)temp.m32[0] > 0) {
197 exp--;
198 fp_putmant128(dest, &temp, 1);
199 } else
200 fp_putmant128(dest, &temp, 0);
202 if (exp >= 0x7fff) {
203 fp_set_ovrflw(dest);
204 return dest;
206 dest->exp = exp;
207 if (exp < 0) {
208 fp_set_sr(FPSR_EXC_UNFL);
209 fp_denormalize(dest, -exp);
212 return dest;
215 /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
216 FSGLDIV instructions.
218 Note that the order of the operands is counter-intuitive: instead
219 of src / dest, the result is actually dest / src. */
221 struct fp_ext *fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
223 union fp_mant128 temp;
224 int exp;
226 dprint(PINSTR, "fdiv\n");
228 fp_dyadic_check(dest, src);
230 /* calculate the correct sign now, as it's necessary for infinities */
231 dest->sign = src->sign ^ dest->sign;
233 /* Handle infinities */
234 if (IS_INF(dest)) {
235 /* infinity / infinity = NaN (quiet, as always) */
236 if (IS_INF(src))
237 fp_set_nan(dest);
238 /* infinity / anything else = infinity (with appropriate sign) */
239 return dest;
241 if (IS_INF(src)) {
242 /* anything / infinity = zero (with appropriate sign) */
243 dest->exp = 0;
244 dest->mant.m64 = 0;
245 dest->lowmant = 0;
247 return dest;
250 /* zeroes */
251 if (IS_ZERO(dest)) {
252 /* zero / zero = NaN */
253 if (IS_ZERO(src))
254 fp_set_nan(dest);
255 /* zero / anything else = zero */
256 return dest;
258 if (IS_ZERO(src)) {
259 /* anything / zero = infinity (with appropriate sign) */
260 fp_set_sr(FPSR_EXC_DZ);
261 dest->exp = 0x7fff;
262 dest->mant.m64 = 0;
264 return dest;
267 exp = dest->exp - src->exp + 0x3fff;
269 /* shift up the mantissa for denormalized numbers,
270 so that the highest bit is set, this makes lots
271 of things below easier */
272 if ((long)dest->mant.m32[0] >= 0)
273 exp -= fp_overnormalize(dest);
274 if ((long)src->mant.m32[0] >= 0)
275 exp -= fp_overnormalize(src);
277 /* now, do the 64-bit divide */
278 fp_dividemant(&temp, dest, src);
280 /* normalize it back to 64 bits and stuff it back into the
281 destination struct */
282 if (!temp.m32[0]) {
283 exp--;
284 fp_putmant128(dest, &temp, 32);
285 } else
286 fp_putmant128(dest, &temp, 31);
288 if (exp >= 0x7fff) {
289 fp_set_ovrflw(dest);
290 return dest;
292 dest->exp = exp;
293 if (exp < 0) {
294 fp_set_sr(FPSR_EXC_UNFL);
295 fp_denormalize(dest, -exp);
298 return dest;
301 struct fp_ext *fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
303 int exp;
305 dprint(PINSTR, "fsglmul\n");
307 fp_dyadic_check(dest, src);
309 /* calculate the correct sign now, as it's necessary for infinities */
310 dest->sign = src->sign ^ dest->sign;
312 /* Handle infinities */
313 if (IS_INF(dest)) {
314 if (IS_ZERO(src))
315 fp_set_nan(dest);
316 return dest;
318 if (IS_INF(src)) {
319 if (IS_ZERO(dest))
320 fp_set_nan(dest);
321 else
322 fp_copy_ext(dest, src);
323 return dest;
326 /* Of course, as we all know, zero * anything = zero. You may
327 not have known that it might be a positive or negative
328 zero... */
329 if (IS_ZERO(dest) || IS_ZERO(src)) {
330 dest->exp = 0;
331 dest->mant.m64 = 0;
332 dest->lowmant = 0;
334 return dest;
337 exp = dest->exp + src->exp - 0x3ffe;
339 /* do a 32-bit multiply */
340 fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
341 dest->mant.m32[0] & 0xffffff00,
342 src->mant.m32[0] & 0xffffff00);
344 if (exp >= 0x7fff) {
345 fp_set_ovrflw(dest);
346 return dest;
348 dest->exp = exp;
349 if (exp < 0) {
350 fp_set_sr(FPSR_EXC_UNFL);
351 fp_denormalize(dest, -exp);
354 return dest;
357 struct fp_ext *fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
359 int exp;
360 unsigned long quot, rem;
362 dprint(PINSTR, "fsgldiv\n");
364 fp_dyadic_check(dest, src);
366 /* calculate the correct sign now, as it's necessary for infinities */
367 dest->sign = src->sign ^ dest->sign;
369 /* Handle infinities */
370 if (IS_INF(dest)) {
371 /* infinity / infinity = NaN (quiet, as always) */
372 if (IS_INF(src))
373 fp_set_nan(dest);
374 /* infinity / anything else = infinity (with approprate sign) */
375 return dest;
377 if (IS_INF(src)) {
378 /* anything / infinity = zero (with appropriate sign) */
379 dest->exp = 0;
380 dest->mant.m64 = 0;
381 dest->lowmant = 0;
383 return dest;
386 /* zeroes */
387 if (IS_ZERO(dest)) {
388 /* zero / zero = NaN */
389 if (IS_ZERO(src))
390 fp_set_nan(dest);
391 /* zero / anything else = zero */
392 return dest;
394 if (IS_ZERO(src)) {
395 /* anything / zero = infinity (with appropriate sign) */
396 fp_set_sr(FPSR_EXC_DZ);
397 dest->exp = 0x7fff;
398 dest->mant.m64 = 0;
400 return dest;
403 exp = dest->exp - src->exp + 0x3fff;
405 dest->mant.m32[0] &= 0xffffff00;
406 src->mant.m32[0] &= 0xffffff00;
408 /* do the 32-bit divide */
409 if (dest->mant.m32[0] >= src->mant.m32[0]) {
410 fp_sub64(dest->mant, src->mant);
411 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
412 dest->mant.m32[0] = 0x80000000 | (quot >> 1);
413 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */
414 } else {
415 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
416 dest->mant.m32[0] = quot;
417 dest->mant.m32[1] = rem; /* only for rounding */
418 exp--;
421 if (exp >= 0x7fff) {
422 fp_set_ovrflw(dest);
423 return dest;
425 dest->exp = exp;
426 if (exp < 0) {
427 fp_set_sr(FPSR_EXC_UNFL);
428 fp_denormalize(dest, -exp);
431 return dest;
434 /* fp_roundint: Internal rounding function for use by several of these
435 emulated instructions.
437 This one rounds off the fractional part using the rounding mode
438 specified. */
440 static void fp_roundint(struct fp_ext *dest, int mode)
442 union fp_mant64 oldmant;
443 unsigned long mask;
445 if (!fp_normalize_ext(dest))
446 return;
448 /* infinities and zeroes */
449 if (IS_INF(dest) || IS_ZERO(dest))
450 return;
452 /* first truncate the lower bits */
453 oldmant = dest->mant;
454 switch (dest->exp) {
455 case 0 ... 0x3ffe:
456 dest->mant.m64 = 0;
457 break;
458 case 0x3fff ... 0x401e:
459 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
460 dest->mant.m32[1] = 0;
461 if (oldmant.m64 == dest->mant.m64)
462 return;
463 break;
464 case 0x401f ... 0x403e:
465 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
466 if (oldmant.m32[1] == dest->mant.m32[1])
467 return;
468 break;
469 default:
470 return;
472 fp_set_sr(FPSR_EXC_INEX2);
474 /* We might want to normalize upwards here... however, since
475 we know that this is only called on the output of fp_fdiv,
476 or with the input to fp_fint or fp_fintrz, and the inputs
477 to all these functions are either normal or denormalized
478 (no subnormals allowed!), there's really no need.
480 In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
481 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
482 smallest possible normal dividend and the largest possible normal
483 divisor will still produce a normal quotient, therefore, (normal
484 << 64) / normal is normal in all cases) */
486 switch (mode) {
487 case FPCR_ROUND_RN:
488 switch (dest->exp) {
489 case 0 ... 0x3ffd:
490 return;
491 case 0x3ffe:
492 /* As noted above, the input is always normal, so the
493 guard bit (bit 63) is always set. therefore, the
494 only case in which we will NOT round to 1.0 is when
495 the input is exactly 0.5. */
496 if (oldmant.m64 == (1ULL << 63))
497 return;
498 break;
499 case 0x3fff ... 0x401d:
500 mask = 1 << (0x401d - dest->exp);
501 if (!(oldmant.m32[0] & mask))
502 return;
503 if (oldmant.m32[0] & (mask << 1))
504 break;
505 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
506 !oldmant.m32[1])
507 return;
508 break;
509 case 0x401e:
510 if (oldmant.m32[1] & 0x80000000)
511 return;
512 if (oldmant.m32[0] & 1)
513 break;
514 if (!(oldmant.m32[1] << 1))
515 return;
516 break;
517 case 0x401f ... 0x403d:
518 mask = 1 << (0x403d - dest->exp);
519 if (!(oldmant.m32[1] & mask))
520 return;
521 if (oldmant.m32[1] & (mask << 1))
522 break;
523 if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
524 return;
525 break;
526 default:
527 return;
529 break;
530 case FPCR_ROUND_RZ:
531 return;
532 default:
533 if (dest->sign ^ (mode - FPCR_ROUND_RM))
534 break;
535 return;
538 switch (dest->exp) {
539 case 0 ... 0x3ffe:
540 dest->exp = 0x3fff;
541 dest->mant.m64 = 1ULL << 63;
542 break;
543 case 0x3fff ... 0x401e:
544 mask = 1 << (0x401e - dest->exp);
545 if (dest->mant.m32[0] += mask)
546 break;
547 dest->mant.m32[0] = 0x80000000;
548 dest->exp++;
549 break;
550 case 0x401f ... 0x403e:
551 mask = 1 << (0x403e - dest->exp);
552 if (dest->mant.m32[1] += mask)
553 break;
554 if (dest->mant.m32[0] += 1)
555 break;
556 dest->mant.m32[0] = 0x80000000;
557 dest->exp++;
558 break;
562 /* modrem_kernel: Implementation of the FREM and FMOD instructions
563 (which are exactly the same, except for the rounding used on the
564 intermediate value) */
566 static struct fp_ext *modrem_kernel(struct fp_ext *dest, struct fp_ext *src,
567 int mode)
569 struct fp_ext tmp;
571 fp_dyadic_check(dest, src);
573 /* Infinities and zeros */
574 if (IS_INF(dest) || IS_ZERO(src)) {
575 fp_set_nan(dest);
576 return dest;
578 if (IS_ZERO(dest) || IS_INF(src))
579 return dest;
581 /* FIXME: there is almost certainly a smarter way to do this */
582 fp_copy_ext(&tmp, dest);
583 fp_fdiv(&tmp, src); /* NOTE: src might be modified */
584 fp_roundint(&tmp, mode);
585 fp_fmul(&tmp, src);
586 fp_fsub(dest, &tmp);
588 /* set the quotient byte */
589 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
590 return dest;
593 /* fp_fmod: Implements the kernel of the FMOD instruction.
595 Again, the argument order is backwards. The result, as defined in
596 the Motorola manuals, is:
598 fmod(src,dest) = (dest - (src * floor(dest / src))) */
600 struct fp_ext *fp_fmod(struct fp_ext *dest, struct fp_ext *src)
602 dprint(PINSTR, "fmod\n");
603 return modrem_kernel(dest, src, FPCR_ROUND_RZ);
606 /* fp_frem: Implements the kernel of the FREM instruction.
608 frem(src,dest) = (dest - (src * round(dest / src)))
611 struct fp_ext *fp_frem(struct fp_ext *dest, struct fp_ext *src)
613 dprint(PINSTR, "frem\n");
614 return modrem_kernel(dest, src, FPCR_ROUND_RN);
617 struct fp_ext *fp_fint(struct fp_ext *dest, struct fp_ext *src)
619 dprint(PINSTR, "fint\n");
621 fp_copy_ext(dest, src);
623 fp_roundint(dest, FPDATA->rnd);
625 return dest;
628 struct fp_ext *fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
630 dprint(PINSTR, "fintrz\n");
632 fp_copy_ext(dest, src);
634 fp_roundint(dest, FPCR_ROUND_RZ);
636 return dest;
639 struct fp_ext *fp_fscale(struct fp_ext *dest, struct fp_ext *src)
641 int scale, oldround;
643 dprint(PINSTR, "fscale\n");
645 fp_dyadic_check(dest, src);
647 /* Infinities */
648 if (IS_INF(src)) {
649 fp_set_nan(dest);
650 return dest;
652 if (IS_INF(dest))
653 return dest;
655 /* zeroes */
656 if (IS_ZERO(src) || IS_ZERO(dest))
657 return dest;
659 /* Source exponent out of range */
660 if (src->exp >= 0x400c) {
661 fp_set_ovrflw(dest);
662 return dest;
665 /* src must be rounded with round to zero. */
666 oldround = FPDATA->rnd;
667 FPDATA->rnd = FPCR_ROUND_RZ;
668 scale = fp_conv_ext2long(src);
669 FPDATA->rnd = oldround;
671 /* new exponent */
672 scale += dest->exp;
674 if (scale >= 0x7fff) {
675 fp_set_ovrflw(dest);
676 } else if (scale <= 0) {
677 fp_set_sr(FPSR_EXC_UNFL);
678 fp_denormalize(dest, -scale);
679 } else
680 dest->exp = scale;
682 return dest;