1 /* 32 and 64-bit millicode, original author Hewlett-Packard
2 adapted for gcc by Paul Bame <bame@debian.org>
3 and Alan Modra <alan@linuxcare.com.au>.
5 Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
7 This file is part of GCC and is released under the terms of
8 of the GNU General Public License as published by the Free Software
9 Foundation; either version 2, or (at your option) any later version.
10 See the file COPYING in the top-level GCC source directory for a copy
18 /* Hardware General Registers. */
52 /* Hardware Space Registers. */
62 /* Hardware Floating Point Registers. */
80 /* Hardware Control Registers. */
82 sar: .reg %cr11 /* Shift Amount Register */
84 /* Software Architecture General Registers. */
85 rp: .reg r2 /* return pointer */
87 mrp: .reg r2 /* millicode return pointer */
89 mrp: .reg r31 /* millicode return pointer */
91 ret0: .reg r28 /* return value */
92 ret1: .reg r29 /* return value (high part of double) */
93 sp: .reg r30 /* stack pointer */
94 dp: .reg r27 /* data pointer */
95 arg0: .reg r26 /* argument */
96 arg1: .reg r25 /* argument or high part of double argument */
97 arg2: .reg r24 /* argument */
98 arg3: .reg r23 /* argument or high part of double argument */
100 /* Software Architecture Space Registers. */
101 /* sr0 ; return link from BLE */
102 sret: .reg sr1 /* return value */
103 sarg: .reg sr1 /* argument */
104 /* sr4 ; PC SPACE tracker */
105 /* sr5 ; process private data */
107 /* Frame Offsets (millicode convention!) Used when calling other
108 millicode routines. Stack unwinding is dependent upon these
110 r31_slot: .equ -20 /* "current RP" slot */
111 sr0_slot: .equ -16 /* "static link" slot */
113 mrp_slot: .equ -16 /* "current RP" slot */
114 psp_slot: .equ -8 /* "previous SP" slot */
116 mrp_slot: .equ -20 /* "current RP" slot (replacing "r31_slot") */
120 #define DEFINE(name,value)name: .EQU value
121 #define RDEFINE(name,value)name: .REG value
123 #define MILLI_BE(lbl) BE lbl(sr7,r0)
124 #define MILLI_BEN(lbl) BE,n lbl(sr7,r0)
125 #define MILLI_BLE(lbl) BLE lbl(sr7,r0)
126 #define MILLI_BLEN(lbl) BLE,n lbl(sr7,r0)
127 #define MILLIRETN BE,n 0(sr0,mrp)
128 #define MILLIRET BE 0(sr0,mrp)
129 #define MILLI_RETN BE,n 0(sr0,mrp)
130 #define MILLI_RET BE 0(sr0,mrp)
132 #define MILLI_BE(lbl) B lbl
133 #define MILLI_BEN(lbl) B,n lbl
134 #define MILLI_BLE(lbl) BL lbl,mrp
135 #define MILLI_BLEN(lbl) BL,n lbl,mrp
136 #define MILLIRETN BV,n 0(mrp)
137 #define MILLIRET BV 0(mrp)
138 #define MILLI_RETN BV,n 0(mrp)
139 #define MILLI_RET BV 0(mrp)
143 #define CAT(a,b) a##b
145 #define CAT(a,b) a/**/b
149 #define SUBSPA_MILLI .section .text
150 #define SUBSPA_MILLI_DIV .section .text.div,"ax",@progbits! .align 16
151 #define SUBSPA_MILLI_MUL .section .text.mul,"ax",@progbits! .align 16
153 #define SUBSPA_DATA .section .data
155 #define GLOBAL $global$
156 #define GSYM(sym) !sym:
157 #define LSYM(sym) !CAT(.L,sym:)
158 #define LREF(sym) CAT(.L,sym)
163 /* This used to be .milli but since link32 places different named
164 sections in different segments millicode ends up a long ways away
165 from .text (1meg?). This way they will be a lot closer.
167 The SUBSPA_MILLI_* specify locality sets for certain millicode
168 modules in order to ensure that modules that call one another are
169 placed close together. Without locality sets this is unlikely to
170 happen because of the Dynamite linker library search algorithm. We
171 want these modules close together so that short calls always reach
172 (we don't want to require long calls or use long call stubs). */
174 #define SUBSPA_MILLI .subspa .text
175 #define SUBSPA_MILLI_DIV .subspa .text$dv,align=16
176 #define SUBSPA_MILLI_MUL .subspa .text$mu,align=16
177 #define ATTR_MILLI .attr code,read,execute
178 #define SUBSPA_DATA .subspa .data
179 #define ATTR_DATA .attr init_data,read,write
182 #define SUBSPA_MILLI .subspa $MILLICODE$,QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=8
183 #define SUBSPA_MILLI_DIV SUBSPA_MILLI
184 #define SUBSPA_MILLI_MUL SUBSPA_MILLI
186 #define SUBSPA_DATA .subspa $BSS$,quad=1,align=8,access=0x1f,sort=80,zero
188 #define GLOBAL $global$
190 #define SPACE_DATA .space $PRIVATE$,spnum=1,sort=16
192 #define GSYM(sym) !sym
193 #define LSYM(sym) !CAT(L$,sym)
194 #define LREF(sym) CAT(L$,sym)
201 .export $$dyncall,millicode
205 bb,>=,n %r22,30,LREF(1) ; branch if not plabel address
206 depi 0,31,2,%r22 ; clear the two least significant bits
207 ldw 4(%r22),%r19 ; load new LTP value
208 ldw 0(%r22),%r22 ; load address of target
210 #if defined(LINUX) || defined(NETBSD)
211 bv %r0(%r22) ; branch to the real target
213 ldsid (%sr0,%r22),%r1 ; get the "space ident" selected by r22
214 mtsp %r1,%sr0 ; move that space identifier into sr0
215 be 0(%sr0,%r22) ; branch to the real target
217 stw %r2,-24(%r30) ; save return address into frame marker
223 /* ROUTINES: $$divI, $$divoI
225 Single precision divide for signed binary integers.
227 The quotient is truncated towards zero.
228 The sign of the quotient is the XOR of the signs of the dividend and
230 Divide by zero is trapped.
231 Divide of -2**31 by -1 is trapped for $$divoI but not for $$divI.
237 . sr0 == return space when called externally
244 OTHER REGISTERS AFFECTED:
248 . Causes a trap under the following conditions:
249 . divisor is zero (traps with ADDIT,= 0,25,0)
250 . dividend==-2**31 and divisor==-1 and routine is $$divoI
251 . (traps with ADDO 26,25,0)
252 . Changes memory at the following places:
257 . Suitable for internal or external millicode.
258 . Assumes the special millicode register conventions.
261 . Branchs to other millicode routines using BE
262 . $$div_# for # being 2,3,4,5,6,7,8,9,10,12,14,15
264 . For selected divisors, calls a divide by constant routine written by
265 . Karl Pettis. Eligible divisors are 1..15 excluding 11 and 13.
267 . The only overflow case is -2**31 divided by -1.
268 . Both routines return -2**31 but only $$divoI traps. */
271 RDEFINE(retreg,ret1) /* r29 */
275 .import $$divI_2,millicode
276 .import $$divI_3,millicode
277 .import $$divI_4,millicode
278 .import $$divI_5,millicode
279 .import $$divI_6,millicode
280 .import $$divI_7,millicode
281 .import $$divI_8,millicode
282 .import $$divI_9,millicode
283 .import $$divI_10,millicode
284 .import $$divI_12,millicode
285 .import $$divI_14,millicode
286 .import $$divI_15,millicode
287 .export $$divI,millicode
288 .export $$divoI,millicode
293 comib,=,n -1,arg1,LREF(negative1) /* when divisor == -1 */
295 ldo -1(arg1),temp /* is there at most one bit set ? */
296 and,<> arg1,temp,r0 /* if not, don't use power of 2 divide */
297 addi,> 0,arg1,r0 /* if divisor > 0, use power of 2 divide */
300 addi,>= 0,arg0,retreg /* if numerator is negative, add the */
301 add arg0,temp,retreg /* (denominaotr -1) to correct for shifts */
302 extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
303 extrs retreg,15,16,retreg /* retreg = retreg >> 16 */
304 or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
305 ldi 0xcc,temp1 /* setup 0xcc in temp1 */
306 extru,= arg1,23,8,temp /* test denominator with 0xff00 */
307 extrs retreg,23,24,retreg /* retreg = retreg >> 8 */
308 or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
309 ldi 0xaa,temp /* setup 0xaa in temp */
310 extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
311 extrs retreg,27,28,retreg /* retreg = retreg >> 4 */
312 and,= arg1,temp1,r0 /* test denominator with 0xcc */
313 extrs retreg,29,30,retreg /* retreg = retreg >> 2 */
314 and,= arg1,temp,r0 /* test denominator with 0xaa */
315 extrs retreg,30,31,retreg /* retreg = retreg >> 1 */
318 addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power of 2 */
319 b,n LREF(regular_seq)
320 sub r0,arg1,temp /* make denominator positive */
321 comb,=,n arg1,temp,LREF(regular_seq) /* test against 0x80000000 and 0 */
322 ldo -1(temp),retreg /* is there at most one bit set ? */
323 and,= temp,retreg,r0 /* if so, the denominator is power of 2 */
324 b,n LREF(regular_seq)
325 sub r0,arg0,retreg /* negate numerator */
326 comb,=,n arg0,retreg,LREF(regular_seq) /* test against 0x80000000 */
327 copy retreg,arg0 /* set up arg0, arg1 and temp */
328 copy temp,arg1 /* before branching to pow2 */
332 comib,>>=,n 15,arg1,LREF(small_divisor)
333 add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
335 subi 0,retreg,retreg /* make it positive */
336 sub 0,arg1,temp /* clear carry, */
337 /* negate the divisor */
338 ds 0,temp,0 /* set V-bit to the comple- */
339 /* ment of the divisor sign */
340 add retreg,retreg,retreg /* shift msb bit into carry */
341 ds r0,arg1,temp /* 1st divide step, if no carry */
342 addc retreg,retreg,retreg /* shift retreg with/into carry */
343 ds temp,arg1,temp /* 2nd divide step */
344 addc retreg,retreg,retreg /* shift retreg with/into carry */
345 ds temp,arg1,temp /* 3rd divide step */
346 addc retreg,retreg,retreg /* shift retreg with/into carry */
347 ds temp,arg1,temp /* 4th divide step */
348 addc retreg,retreg,retreg /* shift retreg with/into carry */
349 ds temp,arg1,temp /* 5th divide step */
350 addc retreg,retreg,retreg /* shift retreg with/into carry */
351 ds temp,arg1,temp /* 6th divide step */
352 addc retreg,retreg,retreg /* shift retreg with/into carry */
353 ds temp,arg1,temp /* 7th divide step */
354 addc retreg,retreg,retreg /* shift retreg with/into carry */
355 ds temp,arg1,temp /* 8th divide step */
356 addc retreg,retreg,retreg /* shift retreg with/into carry */
357 ds temp,arg1,temp /* 9th divide step */
358 addc retreg,retreg,retreg /* shift retreg with/into carry */
359 ds temp,arg1,temp /* 10th divide step */
360 addc retreg,retreg,retreg /* shift retreg with/into carry */
361 ds temp,arg1,temp /* 11th divide step */
362 addc retreg,retreg,retreg /* shift retreg with/into carry */
363 ds temp,arg1,temp /* 12th divide step */
364 addc retreg,retreg,retreg /* shift retreg with/into carry */
365 ds temp,arg1,temp /* 13th divide step */
366 addc retreg,retreg,retreg /* shift retreg with/into carry */
367 ds temp,arg1,temp /* 14th divide step */
368 addc retreg,retreg,retreg /* shift retreg with/into carry */
369 ds temp,arg1,temp /* 15th divide step */
370 addc retreg,retreg,retreg /* shift retreg with/into carry */
371 ds temp,arg1,temp /* 16th divide step */
372 addc retreg,retreg,retreg /* shift retreg with/into carry */
373 ds temp,arg1,temp /* 17th divide step */
374 addc retreg,retreg,retreg /* shift retreg with/into carry */
375 ds temp,arg1,temp /* 18th divide step */
376 addc retreg,retreg,retreg /* shift retreg with/into carry */
377 ds temp,arg1,temp /* 19th divide step */
378 addc retreg,retreg,retreg /* shift retreg with/into carry */
379 ds temp,arg1,temp /* 20th divide step */
380 addc retreg,retreg,retreg /* shift retreg with/into carry */
381 ds temp,arg1,temp /* 21st divide step */
382 addc retreg,retreg,retreg /* shift retreg with/into carry */
383 ds temp,arg1,temp /* 22nd divide step */
384 addc retreg,retreg,retreg /* shift retreg with/into carry */
385 ds temp,arg1,temp /* 23rd divide step */
386 addc retreg,retreg,retreg /* shift retreg with/into carry */
387 ds temp,arg1,temp /* 24th divide step */
388 addc retreg,retreg,retreg /* shift retreg with/into carry */
389 ds temp,arg1,temp /* 25th divide step */
390 addc retreg,retreg,retreg /* shift retreg with/into carry */
391 ds temp,arg1,temp /* 26th divide step */
392 addc retreg,retreg,retreg /* shift retreg with/into carry */
393 ds temp,arg1,temp /* 27th divide step */
394 addc retreg,retreg,retreg /* shift retreg with/into carry */
395 ds temp,arg1,temp /* 28th divide step */
396 addc retreg,retreg,retreg /* shift retreg with/into carry */
397 ds temp,arg1,temp /* 29th divide step */
398 addc retreg,retreg,retreg /* shift retreg with/into carry */
399 ds temp,arg1,temp /* 30th divide step */
400 addc retreg,retreg,retreg /* shift retreg with/into carry */
401 ds temp,arg1,temp /* 31st divide step */
402 addc retreg,retreg,retreg /* shift retreg with/into carry */
403 ds temp,arg1,temp /* 32nd divide step, */
404 addc retreg,retreg,retreg /* shift last retreg bit into retreg */
405 xor,>= arg0,arg1,0 /* get correct sign of quotient */
406 sub 0,retreg,retreg /* based on operand signs */
413 /* Clear the upper 32 bits of the arg1 register. We are working with */
414 /* small divisors (and 32 bit integers) We must not be mislead */
415 /* by "1" bits left in the upper 32 bits. */
420 /* table for divisor == 0,1, ... ,15 */
421 addit,= 0,arg1,r0 /* trap if divisor == 0 */
423 MILLIRET /* divisor == 1 */
425 MILLI_BEN($$divI_2) /* divisor == 2 */
427 MILLI_BEN($$divI_3) /* divisor == 3 */
429 MILLI_BEN($$divI_4) /* divisor == 4 */
431 MILLI_BEN($$divI_5) /* divisor == 5 */
433 MILLI_BEN($$divI_6) /* divisor == 6 */
435 MILLI_BEN($$divI_7) /* divisor == 7 */
437 MILLI_BEN($$divI_8) /* divisor == 8 */
439 MILLI_BEN($$divI_9) /* divisor == 9 */
441 MILLI_BEN($$divI_10) /* divisor == 10 */
443 b LREF(normal) /* divisor == 11 */
445 MILLI_BEN($$divI_12) /* divisor == 12 */
447 b LREF(normal) /* divisor == 13 */
449 MILLI_BEN($$divI_14) /* divisor == 14 */
451 MILLI_BEN($$divI_15) /* divisor == 15 */
455 sub 0,arg0,retreg /* result is negation of dividend */
457 addo arg0,arg1,r0 /* trap iff dividend==0x80000000 && divisor==-1 */
466 . Single precision divide for unsigned integers.
468 . Quotient is truncated towards zero.
469 . Traps on divide by zero.
475 . sr0 == return space when called externally
482 OTHER REGISTERS AFFECTED:
486 . Causes a trap under the following conditions:
488 . Changes memory at the following places:
493 . Does not create a stack frame.
494 . Suitable for internal or external millicode.
495 . Assumes the special millicode register conventions.
498 . Branchs to other millicode routines using BE:
499 . $$divU_# for 3,5,6,7,9,10,12,14,15
501 . For selected small divisors calls the special divide by constant
502 . routines written by Karl Pettis. These are: 3,5,6,7,9,10,12,14,15. */
505 RDEFINE(retreg,ret1) /* r29 */
509 .export $$divU,millicode
510 .import $$divU_3,millicode
511 .import $$divU_5,millicode
512 .import $$divU_6,millicode
513 .import $$divU_7,millicode
514 .import $$divU_9,millicode
515 .import $$divU_10,millicode
516 .import $$divU_12,millicode
517 .import $$divU_14,millicode
518 .import $$divU_15,millicode
523 /* The subtract is not nullified since it does no harm and can be used
524 by the two cases that branch back to "normal". */
525 ldo -1(arg1),temp /* is there at most one bit set ? */
526 and,= arg1,temp,r0 /* if so, denominator is power of 2 */
528 addit,= 0,arg1,0 /* trap for zero dvr */
530 extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
531 extru retreg,15,16,retreg /* retreg = retreg >> 16 */
532 or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
533 ldi 0xcc,temp1 /* setup 0xcc in temp1 */
534 extru,= arg1,23,8,temp /* test denominator with 0xff00 */
535 extru retreg,23,24,retreg /* retreg = retreg >> 8 */
536 or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
537 ldi 0xaa,temp /* setup 0xaa in temp */
538 extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
539 extru retreg,27,28,retreg /* retreg = retreg >> 4 */
540 and,= arg1,temp1,r0 /* test denominator with 0xcc */
541 extru retreg,29,30,retreg /* retreg = retreg >> 2 */
542 and,= arg1,temp,r0 /* test denominator with 0xaa */
543 extru retreg,30,31,retreg /* retreg = retreg >> 1 */
547 comib,>= 15,arg1,LREF(special_divisor)
548 subi 0,arg1,temp /* clear carry, negate the divisor */
549 ds r0,temp,r0 /* set V-bit to 1 */
551 add arg0,arg0,retreg /* shift msb bit into carry */
552 ds r0,arg1,temp /* 1st divide step, if no carry */
553 addc retreg,retreg,retreg /* shift retreg with/into carry */
554 ds temp,arg1,temp /* 2nd divide step */
555 addc retreg,retreg,retreg /* shift retreg with/into carry */
556 ds temp,arg1,temp /* 3rd divide step */
557 addc retreg,retreg,retreg /* shift retreg with/into carry */
558 ds temp,arg1,temp /* 4th divide step */
559 addc retreg,retreg,retreg /* shift retreg with/into carry */
560 ds temp,arg1,temp /* 5th divide step */
561 addc retreg,retreg,retreg /* shift retreg with/into carry */
562 ds temp,arg1,temp /* 6th divide step */
563 addc retreg,retreg,retreg /* shift retreg with/into carry */
564 ds temp,arg1,temp /* 7th divide step */
565 addc retreg,retreg,retreg /* shift retreg with/into carry */
566 ds temp,arg1,temp /* 8th divide step */
567 addc retreg,retreg,retreg /* shift retreg with/into carry */
568 ds temp,arg1,temp /* 9th divide step */
569 addc retreg,retreg,retreg /* shift retreg with/into carry */
570 ds temp,arg1,temp /* 10th divide step */
571 addc retreg,retreg,retreg /* shift retreg with/into carry */
572 ds temp,arg1,temp /* 11th divide step */
573 addc retreg,retreg,retreg /* shift retreg with/into carry */
574 ds temp,arg1,temp /* 12th divide step */
575 addc retreg,retreg,retreg /* shift retreg with/into carry */
576 ds temp,arg1,temp /* 13th divide step */
577 addc retreg,retreg,retreg /* shift retreg with/into carry */
578 ds temp,arg1,temp /* 14th divide step */
579 addc retreg,retreg,retreg /* shift retreg with/into carry */
580 ds temp,arg1,temp /* 15th divide step */
581 addc retreg,retreg,retreg /* shift retreg with/into carry */
582 ds temp,arg1,temp /* 16th divide step */
583 addc retreg,retreg,retreg /* shift retreg with/into carry */
584 ds temp,arg1,temp /* 17th divide step */
585 addc retreg,retreg,retreg /* shift retreg with/into carry */
586 ds temp,arg1,temp /* 18th divide step */
587 addc retreg,retreg,retreg /* shift retreg with/into carry */
588 ds temp,arg1,temp /* 19th divide step */
589 addc retreg,retreg,retreg /* shift retreg with/into carry */
590 ds temp,arg1,temp /* 20th divide step */
591 addc retreg,retreg,retreg /* shift retreg with/into carry */
592 ds temp,arg1,temp /* 21st divide step */
593 addc retreg,retreg,retreg /* shift retreg with/into carry */
594 ds temp,arg1,temp /* 22nd divide step */
595 addc retreg,retreg,retreg /* shift retreg with/into carry */
596 ds temp,arg1,temp /* 23rd divide step */
597 addc retreg,retreg,retreg /* shift retreg with/into carry */
598 ds temp,arg1,temp /* 24th divide step */
599 addc retreg,retreg,retreg /* shift retreg with/into carry */
600 ds temp,arg1,temp /* 25th divide step */
601 addc retreg,retreg,retreg /* shift retreg with/into carry */
602 ds temp,arg1,temp /* 26th divide step */
603 addc retreg,retreg,retreg /* shift retreg with/into carry */
604 ds temp,arg1,temp /* 27th divide step */
605 addc retreg,retreg,retreg /* shift retreg with/into carry */
606 ds temp,arg1,temp /* 28th divide step */
607 addc retreg,retreg,retreg /* shift retreg with/into carry */
608 ds temp,arg1,temp /* 29th divide step */
609 addc retreg,retreg,retreg /* shift retreg with/into carry */
610 ds temp,arg1,temp /* 30th divide step */
611 addc retreg,retreg,retreg /* shift retreg with/into carry */
612 ds temp,arg1,temp /* 31st divide step */
613 addc retreg,retreg,retreg /* shift retreg with/into carry */
614 ds temp,arg1,temp /* 32nd divide step, */
616 addc retreg,retreg,retreg /* shift last retreg bit into retreg */
618 /* Handle the cases where divisor is a small constant or has high bit on. */
619 LSYM(special_divisor)
621 /* comib,>,n 0,arg1,LREF(big_divisor) ; nullify previous instruction */
623 /* Pratap 8/13/90. The 815 Stirling chip set has a bug that prevents us from
624 generating such a blr, comib sequence. A problem in nullification. So I
625 rewrote this code. */
628 /* Clear the upper 32 bits of the arg1 register. We are working with
629 small divisors (and 32 bit unsigned integers) We must not be mislead
630 by "1" bits left in the upper 32 bits. */
633 comib,> 0,arg1,LREF(big_divisor)
638 LSYM(zero_divisor) /* this label is here to provide external visibility */
639 addit,= 0,arg1,0 /* trap for zero dvr */
641 MILLIRET /* divisor == 1 */
643 MILLIRET /* divisor == 2 */
644 extru arg0,30,31,retreg
645 MILLI_BEN($$divU_3) /* divisor == 3 */
647 MILLIRET /* divisor == 4 */
648 extru arg0,29,30,retreg
649 MILLI_BEN($$divU_5) /* divisor == 5 */
651 MILLI_BEN($$divU_6) /* divisor == 6 */
653 MILLI_BEN($$divU_7) /* divisor == 7 */
655 MILLIRET /* divisor == 8 */
656 extru arg0,28,29,retreg
657 MILLI_BEN($$divU_9) /* divisor == 9 */
659 MILLI_BEN($$divU_10) /* divisor == 10 */
661 b LREF(normal) /* divisor == 11 */
662 ds r0,temp,r0 /* set V-bit to 1 */
663 MILLI_BEN($$divU_12) /* divisor == 12 */
665 b LREF(normal) /* divisor == 13 */
666 ds r0,temp,r0 /* set V-bit to 1 */
667 MILLI_BEN($$divU_14) /* divisor == 14 */
669 MILLI_BEN($$divU_15) /* divisor == 15 */
672 /* Handle the case where the high bit is on in the divisor.
673 Compute: if( dividend>=divisor) quotient=1; else quotient=0;
674 Note: dividend>==divisor iff dividend-divisor does not borrow
675 and not borrow iff carry. */
689 . $$remI returns the remainder of the division of two signed 32-bit
690 . integers. The sign of the remainder is the same as the sign of
698 . sr0 == return space when called externally
705 OTHER REGISTERS AFFECTED:
709 . Causes a trap under the following conditions: DIVIDE BY ZERO
710 . Changes memory at the following places: NONE
714 . Does not create a stack frame
715 . Is usable for internal or external microcode
718 . Calls other millicode routines via mrp: NONE
719 . Calls other millicode routines: NONE */
731 .export $$remI,MILLICODE
732 .export $$remoI,MILLICODE
733 ldo -1(arg1),tmp /* is there at most one bit set ? */
734 and,<> arg1,tmp,r0 /* if not, don't use power of 2 */
735 addi,> 0,arg1,r0 /* if denominator > 0, use power */
739 comb,>,n 0,arg0,LREF(neg_num) /* is numerator < 0 ? */
740 and arg0,tmp,retreg /* get the result */
743 subi 0,arg0,arg0 /* negate numerator */
744 and arg0,tmp,retreg /* get the result */
745 subi 0,retreg,retreg /* negate result */
748 addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power */
750 b,n LREF(regular_seq)
751 sub r0,arg1,tmp /* make denominator positive */
752 comb,=,n arg1,tmp,LREF(regular_seq) /* test against 0x80000000 and 0 */
753 ldo -1(tmp),retreg /* is there at most one bit set ? */
754 and,= tmp,retreg,r0 /* if not, go to regular_seq */
755 b,n LREF(regular_seq)
756 comb,>,n 0,arg0,LREF(neg_num_2) /* if arg0 < 0, negate it */
757 and arg0,retreg,retreg
760 subi 0,arg0,tmp /* test against 0x80000000 */
761 and tmp,retreg,retreg
765 addit,= 0,arg1,0 /* trap if div by zero */
766 add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
767 sub 0,retreg,retreg /* make it positive */
768 sub 0,arg1, tmp /* clear carry, */
769 /* negate the divisor */
770 ds 0, tmp,0 /* set V-bit to the comple- */
771 /* ment of the divisor sign */
772 or 0,0, tmp /* clear tmp */
773 add retreg,retreg,retreg /* shift msb bit into carry */
774 ds tmp,arg1, tmp /* 1st divide step, if no carry */
775 /* out, msb of quotient = 0 */
776 addc retreg,retreg,retreg /* shift retreg with/into carry */
778 ds tmp,arg1, tmp /* 2nd divide step */
779 addc retreg,retreg,retreg /* shift retreg with/into carry */
780 ds tmp,arg1, tmp /* 3rd divide step */
781 addc retreg,retreg,retreg /* shift retreg with/into carry */
782 ds tmp,arg1, tmp /* 4th divide step */
783 addc retreg,retreg,retreg /* shift retreg with/into carry */
784 ds tmp,arg1, tmp /* 5th divide step */
785 addc retreg,retreg,retreg /* shift retreg with/into carry */
786 ds tmp,arg1, tmp /* 6th divide step */
787 addc retreg,retreg,retreg /* shift retreg with/into carry */
788 ds tmp,arg1, tmp /* 7th divide step */
789 addc retreg,retreg,retreg /* shift retreg with/into carry */
790 ds tmp,arg1, tmp /* 8th divide step */
791 addc retreg,retreg,retreg /* shift retreg with/into carry */
792 ds tmp,arg1, tmp /* 9th divide step */
793 addc retreg,retreg,retreg /* shift retreg with/into carry */
794 ds tmp,arg1, tmp /* 10th divide step */
795 addc retreg,retreg,retreg /* shift retreg with/into carry */
796 ds tmp,arg1, tmp /* 11th divide step */
797 addc retreg,retreg,retreg /* shift retreg with/into carry */
798 ds tmp,arg1, tmp /* 12th divide step */
799 addc retreg,retreg,retreg /* shift retreg with/into carry */
800 ds tmp,arg1, tmp /* 13th divide step */
801 addc retreg,retreg,retreg /* shift retreg with/into carry */
802 ds tmp,arg1, tmp /* 14th divide step */
803 addc retreg,retreg,retreg /* shift retreg with/into carry */
804 ds tmp,arg1, tmp /* 15th divide step */
805 addc retreg,retreg,retreg /* shift retreg with/into carry */
806 ds tmp,arg1, tmp /* 16th divide step */
807 addc retreg,retreg,retreg /* shift retreg with/into carry */
808 ds tmp,arg1, tmp /* 17th divide step */
809 addc retreg,retreg,retreg /* shift retreg with/into carry */
810 ds tmp,arg1, tmp /* 18th divide step */
811 addc retreg,retreg,retreg /* shift retreg with/into carry */
812 ds tmp,arg1, tmp /* 19th divide step */
813 addc retreg,retreg,retreg /* shift retreg with/into carry */
814 ds tmp,arg1, tmp /* 20th divide step */
815 addc retreg,retreg,retreg /* shift retreg with/into carry */
816 ds tmp,arg1, tmp /* 21st divide step */
817 addc retreg,retreg,retreg /* shift retreg with/into carry */
818 ds tmp,arg1, tmp /* 22nd divide step */
819 addc retreg,retreg,retreg /* shift retreg with/into carry */
820 ds tmp,arg1, tmp /* 23rd divide step */
821 addc retreg,retreg,retreg /* shift retreg with/into carry */
822 ds tmp,arg1, tmp /* 24th divide step */
823 addc retreg,retreg,retreg /* shift retreg with/into carry */
824 ds tmp,arg1, tmp /* 25th divide step */
825 addc retreg,retreg,retreg /* shift retreg with/into carry */
826 ds tmp,arg1, tmp /* 26th divide step */
827 addc retreg,retreg,retreg /* shift retreg with/into carry */
828 ds tmp,arg1, tmp /* 27th divide step */
829 addc retreg,retreg,retreg /* shift retreg with/into carry */
830 ds tmp,arg1, tmp /* 28th divide step */
831 addc retreg,retreg,retreg /* shift retreg with/into carry */
832 ds tmp,arg1, tmp /* 29th divide step */
833 addc retreg,retreg,retreg /* shift retreg with/into carry */
834 ds tmp,arg1, tmp /* 30th divide step */
835 addc retreg,retreg,retreg /* shift retreg with/into carry */
836 ds tmp,arg1, tmp /* 31st divide step */
837 addc retreg,retreg,retreg /* shift retreg with/into carry */
838 ds tmp,arg1, tmp /* 32nd divide step, */
839 addc retreg,retreg,retreg /* shift last bit into retreg */
840 movb,>=,n tmp,retreg,LREF(finish) /* branch if pos. tmp */
841 add,< arg1,0,0 /* if arg1 > 0, add arg1 */
842 add,tr tmp,arg1,retreg /* for correcting remainder tmp */
843 sub tmp,arg1,retreg /* else add absolute value arg1 */
845 add,>= arg0,0,0 /* set sign of remainder */
846 sub 0,retreg,retreg /* to sign of dividend */
859 . Single precision divide for remainder with unsigned binary integers.
861 . The remainder must be dividend-(dividend/divisor)*divisor.
862 . Divide by zero is trapped.
868 . sr0 == return space when called externally
875 OTHER REGISTERS AFFECTED:
879 . Causes a trap under the following conditions: DIVIDE BY ZERO
880 . Changes memory at the following places: NONE
884 . Does not create a stack frame.
885 . Suitable for internal or external millicode.
886 . Assumes the special millicode register conventions.
889 . Calls other millicode routines using mrp: NONE
890 . Calls other millicode routines: NONE */
894 RDEFINE(rmndr,ret1) /* r29 */
897 .export $$remU,millicode
902 ldo -1(arg1),temp /* is there at most one bit set ? */
903 and,= arg1,temp,r0 /* if not, don't use power of 2 */
905 addit,= 0,arg1,r0 /* trap on div by zero */
906 and arg0,temp,rmndr /* get the result for power of 2 */
909 comib,>=,n 0,arg1,LREF(special_case)
910 subi 0,arg1,rmndr /* clear carry, negate the divisor */
911 ds r0,rmndr,r0 /* set V-bit to 1 */
912 add arg0,arg0,temp /* shift msb bit into carry */
913 ds r0,arg1,rmndr /* 1st divide step, if no carry */
914 addc temp,temp,temp /* shift temp with/into carry */
915 ds rmndr,arg1,rmndr /* 2nd divide step */
916 addc temp,temp,temp /* shift temp with/into carry */
917 ds rmndr,arg1,rmndr /* 3rd divide step */
918 addc temp,temp,temp /* shift temp with/into carry */
919 ds rmndr,arg1,rmndr /* 4th divide step */
920 addc temp,temp,temp /* shift temp with/into carry */
921 ds rmndr,arg1,rmndr /* 5th divide step */
922 addc temp,temp,temp /* shift temp with/into carry */
923 ds rmndr,arg1,rmndr /* 6th divide step */
924 addc temp,temp,temp /* shift temp with/into carry */
925 ds rmndr,arg1,rmndr /* 7th divide step */
926 addc temp,temp,temp /* shift temp with/into carry */
927 ds rmndr,arg1,rmndr /* 8th divide step */
928 addc temp,temp,temp /* shift temp with/into carry */
929 ds rmndr,arg1,rmndr /* 9th divide step */
930 addc temp,temp,temp /* shift temp with/into carry */
931 ds rmndr,arg1,rmndr /* 10th divide step */
932 addc temp,temp,temp /* shift temp with/into carry */
933 ds rmndr,arg1,rmndr /* 11th divide step */
934 addc temp,temp,temp /* shift temp with/into carry */
935 ds rmndr,arg1,rmndr /* 12th divide step */
936 addc temp,temp,temp /* shift temp with/into carry */
937 ds rmndr,arg1,rmndr /* 13th divide step */
938 addc temp,temp,temp /* shift temp with/into carry */
939 ds rmndr,arg1,rmndr /* 14th divide step */
940 addc temp,temp,temp /* shift temp with/into carry */
941 ds rmndr,arg1,rmndr /* 15th divide step */
942 addc temp,temp,temp /* shift temp with/into carry */
943 ds rmndr,arg1,rmndr /* 16th divide step */
944 addc temp,temp,temp /* shift temp with/into carry */
945 ds rmndr,arg1,rmndr /* 17th divide step */
946 addc temp,temp,temp /* shift temp with/into carry */
947 ds rmndr,arg1,rmndr /* 18th divide step */
948 addc temp,temp,temp /* shift temp with/into carry */
949 ds rmndr,arg1,rmndr /* 19th divide step */
950 addc temp,temp,temp /* shift temp with/into carry */
951 ds rmndr,arg1,rmndr /* 20th divide step */
952 addc temp,temp,temp /* shift temp with/into carry */
953 ds rmndr,arg1,rmndr /* 21st divide step */
954 addc temp,temp,temp /* shift temp with/into carry */
955 ds rmndr,arg1,rmndr /* 22nd divide step */
956 addc temp,temp,temp /* shift temp with/into carry */
957 ds rmndr,arg1,rmndr /* 23rd divide step */
958 addc temp,temp,temp /* shift temp with/into carry */
959 ds rmndr,arg1,rmndr /* 24th divide step */
960 addc temp,temp,temp /* shift temp with/into carry */
961 ds rmndr,arg1,rmndr /* 25th divide step */
962 addc temp,temp,temp /* shift temp with/into carry */
963 ds rmndr,arg1,rmndr /* 26th divide step */
964 addc temp,temp,temp /* shift temp with/into carry */
965 ds rmndr,arg1,rmndr /* 27th divide step */
966 addc temp,temp,temp /* shift temp with/into carry */
967 ds rmndr,arg1,rmndr /* 28th divide step */
968 addc temp,temp,temp /* shift temp with/into carry */
969 ds rmndr,arg1,rmndr /* 29th divide step */
970 addc temp,temp,temp /* shift temp with/into carry */
971 ds rmndr,arg1,rmndr /* 30th divide step */
972 addc temp,temp,temp /* shift temp with/into carry */
973 ds rmndr,arg1,rmndr /* 31st divide step */
974 addc temp,temp,temp /* shift temp with/into carry */
975 ds rmndr,arg1,rmndr /* 32nd divide step, */
976 comiclr,<= 0,rmndr,r0
977 add rmndr,arg1,rmndr /* correction */
981 /* Putting >= on the last DS and deleting COMICLR does not work! */
983 sub,>>= arg0,arg1,rmndr
1001 . $$divI_10 $$divU_10
1003 . $$divI_12 $$divU_12
1005 . $$divI_14 $$divU_14
1006 . $$divI_15 $$divU_15
1008 . $$divI_17 $$divU_17
1010 . Divide by selected constants for single precision binary integers.
1015 . sr0 == return space when called externally
1022 OTHER REGISTERS AFFECTED:
1026 . Causes a trap under the following conditions: NONE
1027 . Changes memory at the following places: NONE
1029 PERMISSIBLE CONTEXT:
1031 . Does not create a stack frame.
1032 . Suitable for internal or external millicode.
1033 . Assumes the special millicode register conventions.
1036 . Calls other millicode routines using mrp: NONE
1037 . Calls other millicode routines: NONE */
1040 /* TRUNCATED DIVISION BY SMALL INTEGERS
1042 We are interested in q(x) = floor(x/y), where x >= 0 and y > 0
1045 Let a = floor(z/y), for some choice of z. Note that z will be
1046 chosen so that division by z is cheap.
1048 Let r be the remainder(z/y). In other words, r = z - ay.
1050 Now, our method is to choose a value for b such that
1052 q'(x) = floor((ax+b)/z)
1054 is equal to q(x) over as large a range of x as possible. If the
1055 two are equal over a sufficiently large range, and if it is easy to
1056 form the product (ax), and it is easy to divide by z, then we can
1057 perform the division much faster than the general division algorithm.
1059 So, we want the following to be true:
1061 . For x in the following range:
1067 . k <= (ax+b)/z < (k+1)
1069 We want to determine b such that this is true for all k in the
1070 range {0..K} for some maximum K.
1072 Since (ax+b) is an increasing function of x, we can take each
1073 bound separately to determine the "best" value for b.
1075 (ax+b)/z < (k+1) implies
1077 (a((k+1)y-1)+b < (k+1)z implies
1079 b < a + (k+1)(z-ay) implies
1083 This needs to be true for all k in the range {0..K}. In
1084 particular, it is true for k = 0 and this leads to a maximum
1085 acceptable value for b.
1087 b < a+r or b <= a+r-1
1089 Taking the other bound, we have
1091 k <= (ax+b)/z implies
1093 k <= (aky+b)/z implies
1095 k(z-ay) <= b implies
1099 Clearly, the largest range for k will be achieved by maximizing b,
1100 when r is not zero. When r is zero, then the simplest choice for b
1101 is 0. When r is not 0, set
1105 Now, by construction, q'(x) = floor((ax+b)/z) = q(x) = floor(x/y)
1106 for all x in the range:
1110 We need to determine what K is. Of our two bounds,
1112 . b < a+(k+1)r is satisfied for all k >= 0, by construction.
1118 This is always true if r = 0. If r is not 0 (the usual case), then
1119 K = floor((a+r-1)/r), is the maximum value for k.
1121 Therefore, the formula q'(x) = floor((ax+b)/z) yields the correct
1122 answer for q(x) = floor(x/y) when x is in the range
1124 (0,(K+1)y-1) K = floor((a+r-1)/r)
1126 To be most useful, we want (K+1)y-1 = (max x) >= 2**32-1 so that
1127 the formula for q'(x) yields the correct value of q(x) for all x
1128 representable by a single word in HPPA.
1130 We are also constrained in that computing the product (ax), adding
1131 b, and dividing by z must all be done quickly, otherwise we will be
1132 better off going through the general algorithm using the DS
1133 instruction, which uses approximately 70 cycles.
1135 For each y, there is a choice of z which satisfies the constraints
1136 for (K+1)y >= 2**32. We may not, however, be able to satisfy the
1137 timing constraints for arbitrary y. It seems that z being equal to
1138 a power of 2 or a power of 2 minus 1 is as good as we can do, since
1139 it minimizes the time to do division by z. We want the choice of z
1140 to also result in a value for (a) that minimizes the computation of
1141 the product (ax). This is best achieved if (a) has a regular bit
1142 pattern (so the multiplication can be done with shifts and adds).
1143 The value of (a) also needs to be less than 2**32 so the product is
1144 always guaranteed to fit in 2 words.
1146 In actual practice, the following should be done:
1148 1) For negative x, you should take the absolute value and remember
1149 . the fact so that the result can be negated. This obviously does
1150 . not apply in the unsigned case.
1151 2) For even y, you should factor out the power of 2 that divides y
1152 . and divide x by it. You can then proceed by dividing by the
1155 Here is a table of some odd values of y, and corresponding choices
1156 for z which are "good".
1158 y z r a (hex) max x (hex)
1160 3 2**32 1 55555555 100000001
1161 5 2**32 1 33333333 100000003
1162 7 2**24-1 0 249249 (infinite)
1163 9 2**24-1 0 1c71c7 (infinite)
1164 11 2**20-1 0 1745d (infinite)
1165 13 2**24-1 0 13b13b (infinite)
1166 15 2**32 1 11111111 10000000d
1167 17 2**32 1 f0f0f0f 10000000f
1169 If r is 1, then b = a+r-1 = a. This simplifies the computation
1170 of (ax+b), since you can compute (x+1)(a) instead. If r is 0,
1171 then b = 0 is ok to use which simplifies (ax+b).
1173 The bit patterns for 55555555, 33333333, and 11111111 are obviously
1174 very regular. The bit patterns for the other values of a above are:
1178 7 249249 001001001001001001001001 << regular >>
1179 9 1c71c7 000111000111000111000111 << regular >>
1180 11 1745d 000000010111010001011101 << irregular >>
1181 13 13b13b 000100111011000100111011 << irregular >>
1183 The bit patterns for (a) corresponding to (y) of 11 and 13 may be
1184 too irregular to warrant using this method.
1186 When z is a power of 2 minus 1, then the division by z is slightly
1187 more complicated, involving an iterative solution.
1189 The code presented here solves division by 1 through 17, except for
1190 11 and 13. There are algorithms for both signed and unsigned
1195 divisor positive negative unsigned
1210 Now, the algorithm for 7, 9, and 14 is an iterative one. That is,
1211 a loop body is executed until the tentative quotient is 0. The
1212 number of times the loop body is executed varies depending on the
1213 dividend, but is never more than two times. If the dividend is
1214 less than the divisor, then the loop body is not executed at all.
1215 Each iteration adds 4 cycles to the timings.
1217 divisor positive negative unsigned
1219 . 7 19+4n 20+4n 20+4n n = number of iterations
1220 . 9 21+4n 22+4n 21+4n
1221 . 14 21+4n 22+4n 20+4n
1223 To give an idea of how the number of iterations varies, here is a
1224 table of dividend versus number of iterations when dividing by 7.
1226 smallest largest required
1227 dividend dividend iterations
1231 0x1000006 0xffffffff 2
1233 There is some overlap in the range of numbers requiring 1 and 2
1237 RDEFINE(x2,arg0) /* r26 */
1238 RDEFINE(t1,arg1) /* r25 */
1239 RDEFINE(x1,ret1) /* r29 */
1247 /* NONE of these routines require a stack frame
1248 ALL of these routines are unwindable from millicode */
1250 GSYM($$divide_by_constant)
1251 .export $$divide_by_constant,millicode
1252 /* Provides a "nice" label for the code covered by the unwind descriptor
1253 for things like gprof. */
1255 /* DIVISION BY 2 (shift by 1) */
1257 .export $$divI_2,millicode
1261 extrs arg0,30,31,ret1
1264 /* DIVISION BY 4 (shift by 2) */
1266 .export $$divI_4,millicode
1270 extrs arg0,29,30,ret1
1273 /* DIVISION BY 8 (shift by 3) */
1275 .export $$divI_8,millicode
1279 extrs arg0,28,29,ret1
1281 /* DIVISION BY 16 (shift by 4) */
1283 .export $$divI_16,millicode
1287 extrs arg0,27,28,ret1
1289 /****************************************************************************
1291 * DIVISION BY DIVISORS OF FFFFFFFF, and powers of 2 times these
1293 * includes 3,5,15,17 and also 6,10,12
1295 ****************************************************************************/
1297 /* DIVISION BY 3 (use z = 2**32; a = 55555555) */
1300 .export $$divI_3,millicode
1301 comb,<,N x2,0,LREF(neg3)
1303 addi 1,x2,x2 /* this cannot overflow */
1304 extru x2,1,2,x1 /* multiply by 5 to get started */
1310 subi 1,x2,x2 /* this cannot overflow */
1311 extru x2,1,2,x1 /* multiply by 5 to get started */
1317 .export $$divU_3,millicode
1318 addi 1,x2,x2 /* this CAN overflow */
1320 shd x1,x2,30,t1 /* multiply by 5 to get started */
1325 /* DIVISION BY 5 (use z = 2**32; a = 33333333) */
1328 .export $$divI_5,millicode
1329 comb,<,N x2,0,LREF(neg5)
1331 addi 3,x2,t1 /* this cannot overflow */
1332 sh1add x2,t1,x2 /* multiply by 3 to get started */
1337 sub 0,x2,x2 /* negate x2 */
1338 addi 1,x2,x2 /* this cannot overflow */
1339 shd 0,x2,31,x1 /* get top bit (can be 1) */
1340 sh1add x2,x2,x2 /* multiply by 3 to get started */
1345 .export $$divU_5,millicode
1346 addi 1,x2,x2 /* this CAN overflow */
1348 shd x1,x2,31,t1 /* multiply by 3 to get started */
1353 /* DIVISION BY 6 (shift to divide by 2 then divide by 3) */
1355 .export $$divI_6,millicode
1356 comb,<,N x2,0,LREF(neg6)
1357 extru x2,30,31,x2 /* divide by 2 */
1358 addi 5,x2,t1 /* compute 5*(x2+1) = 5*x2+5 */
1359 sh2add x2,t1,x2 /* multiply by 5 to get started */
1364 subi 2,x2,x2 /* negate, divide by 2, and add 1 */
1365 /* negation and adding 1 are done */
1366 /* at the same time by the SUBI */
1369 sh2add x2,x2,x2 /* multiply by 5 to get started */
1374 .export $$divU_6,millicode
1375 extru x2,30,31,x2 /* divide by 2 */
1376 addi 1,x2,x2 /* cannot carry */
1377 shd 0,x2,30,x1 /* multiply by 5 to get started */
1382 /* DIVISION BY 10 (shift to divide by 2 then divide by 5) */
1384 .export $$divU_10,millicode
1385 extru x2,30,31,x2 /* divide by 2 */
1386 addi 3,x2,t1 /* compute 3*(x2+1) = (3*x2)+3 */
1387 sh1add x2,t1,x2 /* multiply by 3 to get started */
1390 shd x1,x2,28,t1 /* multiply by 0x11 */
1395 shd x1,x2,24,t1 /* multiply by 0x101 */
1400 shd x1,x2,16,t1 /* multiply by 0x10001 */
1407 .export $$divI_10,millicode
1408 comb,< x2,0,LREF(neg10)
1410 extru x2,30,31,x2 /* divide by 2 */
1411 addib,TR 1,x2,LREF(pos) /* add 1 (cannot overflow) */
1412 sh1add x2,x2,x2 /* multiply by 3 to get started */
1415 subi 2,x2,x2 /* negate, divide by 2, and add 1 */
1416 /* negation and adding 1 are done */
1417 /* at the same time by the SUBI */
1419 sh1add x2,x2,x2 /* multiply by 3 to get started */
1421 shd x1,x2,28,t1 /* multiply by 0x11 */
1426 shd x1,x2,24,t1 /* multiply by 0x101 */
1431 shd x1,x2,16,t1 /* multiply by 0x10001 */
1438 /* DIVISION BY 12 (shift to divide by 4 then divide by 3) */
1440 .export $$divI_12,millicode
1441 comb,< x2,0,LREF(neg12)
1443 extru x2,29,30,x2 /* divide by 4 */
1444 addib,tr 1,x2,LREF(pos) /* compute 5*(x2+1) = 5*x2+5 */
1445 sh2add x2,x2,x2 /* multiply by 5 to get started */
1448 subi 4,x2,x2 /* negate, divide by 4, and add 1 */
1449 /* negation and adding 1 are done */
1450 /* at the same time by the SUBI */
1453 sh2add x2,x2,x2 /* multiply by 5 to get started */
1456 .export $$divU_12,millicode
1457 extru x2,29,30,x2 /* divide by 4 */
1458 addi 5,x2,t1 /* cannot carry */
1459 sh2add x2,t1,x2 /* multiply by 5 to get started */
1463 /* DIVISION BY 15 (use z = 2**32; a = 11111111) */
1465 .export $$divI_15,millicode
1466 comb,< x2,0,LREF(neg15)
1468 addib,tr 1,x2,LREF(pos)+4
1476 .export $$divU_15,millicode
1477 addi 1,x2,x2 /* this CAN overflow */
1481 /* DIVISION BY 17 (use z = 2**32; a = f0f0f0f) */
1483 .export $$divI_17,millicode
1484 comb,<,n x2,0,LREF(neg17)
1485 addi 1,x2,x2 /* this cannot overflow */
1486 shd 0,x2,28,t1 /* multiply by 0xf to get started */
1493 subi 1,x2,x2 /* this cannot overflow */
1494 shd 0,x2,28,t1 /* multiply by 0xf to get started */
1501 .export $$divU_17,millicode
1502 addi 1,x2,x2 /* this CAN overflow */
1504 shd x1,x2,28,t1 /* multiply by 0xf to get started */
1512 /* DIVISION BY DIVISORS OF FFFFFF, and powers of 2 times these
1513 includes 7,9 and also 14
1521 Also, in order to divide by z = 2**24-1, we approximate by dividing
1522 by (z+1) = 2**24 (which is easy), and then correcting.
1527 So to compute (ax)/z, compute q' = (ax)/(z+1) and r = (ax) mod (z+1)
1528 Then the true remainder of (ax)/z is (q'+r). Repeat the process
1529 with this new remainder, adding the tentative quotients together,
1530 until a tentative quotient is 0 (and then we are done). There is
1531 one last correction to be done. It is possible that (q'+r) = z.
1532 If so, then (q'+r)/(z+1) = 0 and it looks like we are done. But,
1533 in fact, we need to add 1 more to the quotient. Now, it turns
1534 out that this happens if and only if the original value x is
1535 an exact multiple of y. So, to avoid a three instruction test at
1536 the end, instead use 1 instruction to add 1 to x at the beginning. */
1538 /* DIVISION BY 7 (use z = 2**24-1; a = 249249) */
1540 .export $$divI_7,millicode
1541 comb,<,n x2,0,LREF(neg7)
1543 addi 1,x2,x2 /* cannot overflow */
1558 /* computed <t1,x2>. Now divide it by (2**24 - 1) */
1561 shd,= t1,x2,24,t1 /* tentative quotient */
1563 addb,tr t1,x1,LREF(2) /* add to previous quotient */
1564 extru x2,31,24,x2 /* new remainder (unadjusted) */
1569 addb,tr t1,x2,LREF(1) /* adjust remainder */
1570 extru,= x2,7,8,t1 /* new quotient */
1573 subi 1,x2,x2 /* negate x2 and add 1 */
1590 /* computed <t1,x2>. Now divide it by (2**24 - 1) */
1593 shd,= t1,x2,24,t1 /* tentative quotient */
1595 addb,tr t1,x1,LREF(4) /* add to previous quotient */
1596 extru x2,31,24,x2 /* new remainder (unadjusted) */
1599 sub 0,x1,x1 /* negate result */
1602 addb,tr t1,x2,LREF(3) /* adjust remainder */
1603 extru,= x2,7,8,t1 /* new quotient */
1606 .export $$divU_7,millicode
1607 addi 1,x2,x2 /* can carry */
1614 /* DIVISION BY 9 (use z = 2**24-1; a = 1c71c7) */
1616 .export $$divI_9,millicode
1617 comb,<,n x2,0,LREF(neg9)
1618 addi 1,x2,x2 /* cannot overflow */
1626 subi 1,x2,x2 /* negate and add 1 */
1634 .export $$divU_9,millicode
1635 addi 1,x2,x2 /* can carry */
1643 /* DIVISION BY 14 (shift to divide by 2 then divide by 7) */
1645 .export $$divI_14,millicode
1646 comb,<,n x2,0,LREF(neg14)
1648 .export $$divU_14,millicode
1649 b LREF(7) /* go to 7 case */
1650 extru x2,30,31,x2 /* divide by 2 */
1653 subi 2,x2,x2 /* negate (and add 2) */
1655 extru x2,30,31,x2 /* divide by 2 */
1662 /* VERSION "@(#)$$mulI $ Revision: 12.4 $ $ Date: 94/03/17 17:18:51 $" */
1663 /******************************************************************************
1664 This routine is used on PA2.0 processors when gcc -mno-fpregs is used
1671 $$mulI multiplies two single word integers, giving a single
1680 sr0 == return space when called externally
1689 OTHER REGISTERS AFFECTED:
1695 Causes a trap under the following conditions: NONE
1696 Changes memory at the following places: NONE
1698 PERMISSIBLE CONTEXT:
1701 Does not create a stack frame
1702 Is usable for internal or external microcode
1706 Calls other millicode routines via mrp: NONE
1707 Calls other millicode routines: NONE
1709 ***************************************************************************/
1717 #define a0__128a0 zdep a0,24,25,a0
1718 #define a0__256a0 zdep a0,23,24,a0
1719 #define a1_ne_0_b_l0 comb,<> a1,0,LREF(l0)
1720 #define a1_ne_0_b_l1 comb,<> a1,0,LREF(l1)
1721 #define a1_ne_0_b_l2 comb,<> a1,0,LREF(l2)
1722 #define b_n_ret_t0 b,n LREF(ret_t0)
1723 #define b_e_shift b LREF(e_shift)
1724 #define b_e_t0ma0 b LREF(e_t0ma0)
1725 #define b_e_t0 b LREF(e_t0)
1726 #define b_e_t0a0 b LREF(e_t0a0)
1727 #define b_e_t02a0 b LREF(e_t02a0)
1728 #define b_e_t04a0 b LREF(e_t04a0)
1729 #define b_e_2t0 b LREF(e_2t0)
1730 #define b_e_2t0a0 b LREF(e_2t0a0)
1731 #define b_e_2t04a0 b LREF(e2t04a0)
1732 #define b_e_3t0 b LREF(e_3t0)
1733 #define b_e_4t0 b LREF(e_4t0)
1734 #define b_e_4t0a0 b LREF(e_4t0a0)
1735 #define b_e_4t08a0 b LREF(e4t08a0)
1736 #define b_e_5t0 b LREF(e_5t0)
1737 #define b_e_8t0 b LREF(e_8t0)
1738 #define b_e_8t0a0 b LREF(e_8t0a0)
1739 #define r__r_a0 add r,a0,r
1740 #define r__r_2a0 sh1add a0,r,r
1741 #define r__r_4a0 sh2add a0,r,r
1742 #define r__r_8a0 sh3add a0,r,r
1743 #define r__r_t0 add r,t0,r
1744 #define r__r_2t0 sh1add t0,r,r
1745 #define r__r_4t0 sh2add t0,r,r
1746 #define r__r_8t0 sh3add t0,r,r
1747 #define t0__3a0 sh1add a0,a0,t0
1748 #define t0__4a0 sh2add a0,0,t0
1749 #define t0__5a0 sh2add a0,a0,t0
1750 #define t0__8a0 sh3add a0,0,t0
1751 #define t0__9a0 sh3add a0,a0,t0
1752 #define t0__16a0 zdep a0,27,28,t0
1753 #define t0__32a0 zdep a0,26,27,t0
1754 #define t0__64a0 zdep a0,25,26,t0
1755 #define t0__128a0 zdep a0,24,25,t0
1756 #define t0__t0ma0 sub t0,a0,t0
1757 #define t0__t0_a0 add t0,a0,t0
1758 #define t0__t0_2a0 sh1add a0,t0,t0
1759 #define t0__t0_4a0 sh2add a0,t0,t0
1760 #define t0__t0_8a0 sh3add a0,t0,t0
1761 #define t0__2t0_a0 sh1add t0,a0,t0
1762 #define t0__3t0 sh1add t0,t0,t0
1763 #define t0__4t0 sh2add t0,0,t0
1764 #define t0__4t0_a0 sh2add t0,a0,t0
1765 #define t0__5t0 sh2add t0,t0,t0
1766 #define t0__8t0 sh3add t0,0,t0
1767 #define t0__8t0_a0 sh3add t0,a0,t0
1768 #define t0__9t0 sh3add t0,t0,t0
1769 #define t0__16t0 zdep t0,27,28,t0
1770 #define t0__32t0 zdep t0,26,27,t0
1771 #define t0__256a0 zdep a0,23,24,t0
1779 .export $$mulI,millicode
1781 combt,<<= a1,a0,LREF(l4) /* swap args if unsigned a1>a0 */
1782 copy 0,r /* zero out the result */
1783 xor a0,a1,a0 /* swap a0 & a1 using the */
1784 xor a0,a1,a1 /* old xor trick */
1787 combt,<= 0,a0,LREF(l3) /* if a0>=0 then proceed like unsigned */
1788 zdep a1,30,8,t0 /* t0 = (a1&0xff)<<1 ********* */
1789 sub,> 0,a1,t0 /* otherwise negate both and */
1790 combt,<=,n a0,t0,LREF(l2) /* swap back if |a0|<|a1| */
1792 movb,tr,n t0,a0,LREF(l2) /* 10th inst. */
1794 LSYM(l0) r__r_t0 /* add in this partial product */
1795 LSYM(l1) a0__256a0 /* a0 <<= 8 ****************** */
1796 LSYM(l2) zdep a1,30,8,t0 /* t0 = (a1&0xff)<<1 ********* */
1797 LSYM(l3) blr t0,0 /* case on these 8 bits ****** */
1798 extru a1,23,24,a1 /* a1 >>= 8 ****************** */
1800 /*16 insts before this. */
1801 /* a0 <<= 8 ************************** */
1802 LSYM(x0) a1_ne_0_b_l2 ! a0__256a0 ! MILLIRETN ! nop
1803 LSYM(x1) a1_ne_0_b_l1 ! r__r_a0 ! MILLIRETN ! nop
1804 LSYM(x2) a1_ne_0_b_l1 ! r__r_2a0 ! MILLIRETN ! nop
1805 LSYM(x3) a1_ne_0_b_l0 ! t0__3a0 ! MILLIRET ! r__r_t0
1806 LSYM(x4) a1_ne_0_b_l1 ! r__r_4a0 ! MILLIRETN ! nop
1807 LSYM(x5) a1_ne_0_b_l0 ! t0__5a0 ! MILLIRET ! r__r_t0
1808 LSYM(x6) t0__3a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
1809 LSYM(x7) t0__3a0 ! a1_ne_0_b_l0 ! r__r_4a0 ! b_n_ret_t0
1810 LSYM(x8) a1_ne_0_b_l1 ! r__r_8a0 ! MILLIRETN ! nop
1811 LSYM(x9) a1_ne_0_b_l0 ! t0__9a0 ! MILLIRET ! r__r_t0
1812 LSYM(x10) t0__5a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
1813 LSYM(x11) t0__3a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
1814 LSYM(x12) t0__3a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
1815 LSYM(x13) t0__5a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
1816 LSYM(x14) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
1817 LSYM(x15) t0__5a0 ! a1_ne_0_b_l0 ! t0__3t0 ! b_n_ret_t0
1818 LSYM(x16) t0__16a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
1819 LSYM(x17) t0__9a0 ! a1_ne_0_b_l0 ! t0__t0_8a0 ! b_n_ret_t0
1820 LSYM(x18) t0__9a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
1821 LSYM(x19) t0__9a0 ! a1_ne_0_b_l0 ! t0__2t0_a0 ! b_n_ret_t0
1822 LSYM(x20) t0__5a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
1823 LSYM(x21) t0__5a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
1824 LSYM(x22) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
1825 LSYM(x23) t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
1826 LSYM(x24) t0__3a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
1827 LSYM(x25) t0__5a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
1828 LSYM(x26) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
1829 LSYM(x27) t0__3a0 ! a1_ne_0_b_l0 ! t0__9t0 ! b_n_ret_t0
1830 LSYM(x28) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
1831 LSYM(x29) t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
1832 LSYM(x30) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
1833 LSYM(x31) t0__32a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
1834 LSYM(x32) t0__32a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
1835 LSYM(x33) t0__8a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
1836 LSYM(x34) t0__16a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
1837 LSYM(x35) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__t0_8a0
1838 LSYM(x36) t0__9a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
1839 LSYM(x37) t0__9a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
1840 LSYM(x38) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
1841 LSYM(x39) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
1842 LSYM(x40) t0__5a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
1843 LSYM(x41) t0__5a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
1844 LSYM(x42) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
1845 LSYM(x43) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
1846 LSYM(x44) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
1847 LSYM(x45) t0__9a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
1848 LSYM(x46) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_a0
1849 LSYM(x47) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_2a0
1850 LSYM(x48) t0__3a0 ! a1_ne_0_b_l0 ! t0__16t0 ! b_n_ret_t0
1851 LSYM(x49) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_4a0
1852 LSYM(x50) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
1853 LSYM(x51) t0__9a0 ! t0__t0_8a0 ! b_e_t0 ! t0__3t0
1854 LSYM(x52) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
1855 LSYM(x53) t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
1856 LSYM(x54) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
1857 LSYM(x55) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__2t0_a0
1858 LSYM(x56) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
1859 LSYM(x57) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__3t0
1860 LSYM(x58) t0__3a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
1861 LSYM(x59) t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__3t0
1862 LSYM(x60) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
1863 LSYM(x61) t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
1864 LSYM(x62) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
1865 LSYM(x63) t0__64a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
1866 LSYM(x64) t0__64a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
1867 LSYM(x65) t0__8a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
1868 LSYM(x66) t0__32a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
1869 LSYM(x67) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
1870 LSYM(x68) t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
1871 LSYM(x69) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
1872 LSYM(x70) t0__64a0 ! t0__t0_4a0 ! b_e_t0 ! t0__t0_2a0
1873 LSYM(x71) t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__t0ma0
1874 LSYM(x72) t0__9a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
1875 LSYM(x73) t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_t0
1876 LSYM(x74) t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
1877 LSYM(x75) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
1878 LSYM(x76) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
1879 LSYM(x77) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
1880 LSYM(x78) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__2t0_a0
1881 LSYM(x79) t0__16a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
1882 LSYM(x80) t0__16a0 ! t0__5t0 ! b_e_shift ! r__r_t0
1883 LSYM(x81) t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_t0
1884 LSYM(x82) t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
1885 LSYM(x83) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
1886 LSYM(x84) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
1887 LSYM(x85) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
1888 LSYM(x86) t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
1889 LSYM(x87) t0__9a0 ! t0__9t0 ! b_e_t02a0 ! t0__t0_4a0
1890 LSYM(x88) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
1891 LSYM(x89) t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
1892 LSYM(x90) t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
1893 LSYM(x91) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__2t0_a0
1894 LSYM(x92) t0__5a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
1895 LSYM(x93) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__3t0
1896 LSYM(x94) t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__t0_2a0
1897 LSYM(x95) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
1898 LSYM(x96) t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
1899 LSYM(x97) t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
1900 LSYM(x98) t0__32a0 ! t0__3t0 ! b_e_t0 ! t0__t0_2a0
1901 LSYM(x99) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
1902 LSYM(x100) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
1903 LSYM(x101) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
1904 LSYM(x102) t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
1905 LSYM(x103) t0__5a0 ! t0__5t0 ! b_e_t02a0 ! t0__4t0_a0
1906 LSYM(x104) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
1907 LSYM(x105) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
1908 LSYM(x106) t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__4t0_a0
1909 LSYM(x107) t0__9a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__8t0_a0
1910 LSYM(x108) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
1911 LSYM(x109) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
1912 LSYM(x110) t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__2t0_a0
1913 LSYM(x111) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
1914 LSYM(x112) t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__16t0
1915 LSYM(x113) t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__3t0
1916 LSYM(x114) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__3t0
1917 LSYM(x115) t0__9a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__3t0
1918 LSYM(x116) t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__4t0_a0
1919 LSYM(x117) t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
1920 LSYM(x118) t0__3a0 ! t0__4t0_a0 ! b_e_t0a0 ! t0__9t0
1921 LSYM(x119) t0__3a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__9t0
1922 LSYM(x120) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
1923 LSYM(x121) t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
1924 LSYM(x122) t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
1925 LSYM(x123) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
1926 LSYM(x124) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
1927 LSYM(x125) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
1928 LSYM(x126) t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
1929 LSYM(x127) t0__128a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
1930 LSYM(x128) t0__128a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
1931 LSYM(x129) t0__128a0 ! a1_ne_0_b_l0 ! t0__t0_a0 ! b_n_ret_t0
1932 LSYM(x130) t0__64a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
1933 LSYM(x131) t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
1934 LSYM(x132) t0__8a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
1935 LSYM(x133) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
1936 LSYM(x134) t0__8a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
1937 LSYM(x135) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__3t0
1938 LSYM(x136) t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
1939 LSYM(x137) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
1940 LSYM(x138) t0__8a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
1941 LSYM(x139) t0__8a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__4t0_a0
1942 LSYM(x140) t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__5t0
1943 LSYM(x141) t0__8a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__2t0_a0
1944 LSYM(x142) t0__9a0 ! t0__8t0 ! b_e_2t0 ! t0__t0ma0
1945 LSYM(x143) t0__16a0 ! t0__9t0 ! b_e_t0 ! t0__t0ma0
1946 LSYM(x144) t0__9a0 ! t0__8t0 ! b_e_shift ! r__r_2t0
1947 LSYM(x145) t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__2t0_a0
1948 LSYM(x146) t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
1949 LSYM(x147) t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
1950 LSYM(x148) t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
1951 LSYM(x149) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
1952 LSYM(x150) t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
1953 LSYM(x151) t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
1954 LSYM(x152) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
1955 LSYM(x153) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
1956 LSYM(x154) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
1957 LSYM(x155) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__5t0
1958 LSYM(x156) t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
1959 LSYM(x157) t0__32a0 ! t0__t0ma0 ! b_e_t02a0 ! t0__5t0
1960 LSYM(x158) t0__16a0 ! t0__5t0 ! b_e_2t0 ! t0__t0ma0
1961 LSYM(x159) t0__32a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
1962 LSYM(x160) t0__5a0 ! t0__4t0 ! b_e_shift ! r__r_8t0
1963 LSYM(x161) t0__8a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
1964 LSYM(x162) t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_2t0
1965 LSYM(x163) t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__2t0_a0
1966 LSYM(x164) t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_4t0
1967 LSYM(x165) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
1968 LSYM(x166) t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__2t0_a0
1969 LSYM(x167) t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
1970 LSYM(x168) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
1971 LSYM(x169) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__8t0_a0
1972 LSYM(x170) t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__5t0
1973 LSYM(x171) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__9t0
1974 LSYM(x172) t0__5a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__2t0_a0
1975 LSYM(x173) t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__9t0
1976 LSYM(x174) t0__32a0 ! t0__t0_2a0 ! b_e_t04a0 ! t0__5t0
1977 LSYM(x175) t0__8a0 ! t0__2t0_a0 ! b_e_5t0 ! t0__2t0_a0
1978 LSYM(x176) t0__5a0 ! t0__4t0_a0 ! b_e_8t0 ! t0__t0_a0
1979 LSYM(x177) t0__5a0 ! t0__4t0_a0 ! b_e_8t0a0 ! t0__t0_a0
1980 LSYM(x178) t0__5a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__8t0_a0
1981 LSYM(x179) t0__5a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__8t0_a0
1982 LSYM(x180) t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
1983 LSYM(x181) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
1984 LSYM(x182) t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__2t0_a0
1985 LSYM(x183) t0__9a0 ! t0__5t0 ! b_e_2t0a0 ! t0__2t0_a0
1986 LSYM(x184) t0__5a0 ! t0__9t0 ! b_e_4t0 ! t0__t0_a0
1987 LSYM(x185) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
1988 LSYM(x186) t0__32a0 ! t0__t0ma0 ! b_e_2t0 ! t0__3t0
1989 LSYM(x187) t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__5t0
1990 LSYM(x188) t0__9a0 ! t0__5t0 ! b_e_4t0 ! t0__t0_2a0
1991 LSYM(x189) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
1992 LSYM(x190) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__5t0
1993 LSYM(x191) t0__64a0 ! t0__3t0 ! b_e_t0 ! t0__t0ma0
1994 LSYM(x192) t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
1995 LSYM(x193) t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
1996 LSYM(x194) t0__8a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
1997 LSYM(x195) t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
1998 LSYM(x196) t0__8a0 ! t0__3t0 ! b_e_4t0 ! t0__2t0_a0
1999 LSYM(x197) t0__8a0 ! t0__3t0 ! b_e_4t0a0 ! t0__2t0_a0
2000 LSYM(x198) t0__64a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
2001 LSYM(x199) t0__8a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
2002 LSYM(x200) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_8t0
2003 LSYM(x201) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__8t0_a0
2004 LSYM(x202) t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__4t0_a0
2005 LSYM(x203) t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__4t0_a0
2006 LSYM(x204) t0__8a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
2007 LSYM(x205) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__5t0
2008 LSYM(x206) t0__64a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__3t0
2009 LSYM(x207) t0__8a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
2010 LSYM(x208) t0__5a0 ! t0__5t0 ! b_e_8t0 ! t0__t0_a0
2011 LSYM(x209) t0__5a0 ! t0__5t0 ! b_e_8t0a0 ! t0__t0_a0
2012 LSYM(x210) t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__5t0
2013 LSYM(x211) t0__5a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__5t0
2014 LSYM(x212) t0__3a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__4t0_a0
2015 LSYM(x213) t0__3a0 ! t0__4t0_a0 ! b_e_4t0a0 ! t0__4t0_a0
2016 LSYM(x214) t0__9a0 ! t0__t0_4a0 ! b_e_2t04a0 ! t0__8t0_a0
2017 LSYM(x215) t0__5a0 ! t0__4t0_a0 ! b_e_5t0 ! t0__2t0_a0
2018 LSYM(x216) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
2019 LSYM(x217) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
2020 LSYM(x218) t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
2021 LSYM(x219) t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
2022 LSYM(x220) t0__3a0 ! t0__9t0 ! b_e_4t0 ! t0__2t0_a0
2023 LSYM(x221) t0__3a0 ! t0__9t0 ! b_e_4t0a0 ! t0__2t0_a0
2024 LSYM(x222) t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__3t0
2025 LSYM(x223) t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
2026 LSYM(x224) t0__9a0 ! t0__3t0 ! b_e_8t0 ! t0__t0_a0
2027 LSYM(x225) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
2028 LSYM(x226) t0__3a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__32t0
2029 LSYM(x227) t0__9a0 ! t0__5t0 ! b_e_t02a0 ! t0__5t0
2030 LSYM(x228) t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
2031 LSYM(x229) t0__9a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__3t0
2032 LSYM(x230) t0__9a0 ! t0__5t0 ! b_e_5t0 ! t0__t0_a0
2033 LSYM(x231) t0__9a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
2034 LSYM(x232) t0__3a0 ! t0__2t0_a0 ! b_e_8t0 ! t0__4t0_a0
2035 LSYM(x233) t0__3a0 ! t0__2t0_a0 ! b_e_8t0a0 ! t0__4t0_a0
2036 LSYM(x234) t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__9t0
2037 LSYM(x235) t0__3a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__9t0
2038 LSYM(x236) t0__9a0 ! t0__2t0_a0 ! b_e_4t08a0 ! t0__3t0
2039 LSYM(x237) t0__16a0 ! t0__5t0 ! b_e_3t0 ! t0__t0ma0
2040 LSYM(x238) t0__3a0 ! t0__4t0_a0 ! b_e_2t04a0 ! t0__9t0
2041 LSYM(x239) t0__16a0 ! t0__5t0 ! b_e_t0ma0 ! t0__3t0
2042 LSYM(x240) t0__9a0 ! t0__t0_a0 ! b_e_8t0 ! t0__3t0
2043 LSYM(x241) t0__9a0 ! t0__t0_a0 ! b_e_8t0a0 ! t0__3t0
2044 LSYM(x242) t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__8t0_a0
2045 LSYM(x243) t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__3t0
2046 LSYM(x244) t0__5a0 ! t0__3t0 ! b_e_4t0 ! t0__4t0_a0
2047 LSYM(x245) t0__8a0 ! t0__3t0 ! b_e_5t0 ! t0__2t0_a0
2048 LSYM(x246) t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__3t0
2049 LSYM(x247) t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__3t0
2050 LSYM(x248) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_8t0
2051 LSYM(x249) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__8t0_a0
2052 LSYM(x250) t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__5t0
2053 LSYM(x251) t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__5t0
2054 LSYM(x252) t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
2055 LSYM(x253) t0__64a0 ! t0__t0ma0 ! b_e_t0 ! t0__4t0_a0
2056 LSYM(x254) t0__128a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
2057 LSYM(x255) t0__256a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
2058 /*1040 insts before this. */
2059 LSYM(ret_t0) MILLIRET
2061 LSYM(e_shift) a1_ne_0_b_l2
2062 a0__256a0 /* a0 <<= 8 *********** */
2064 LSYM(e_t0ma0) a1_ne_0_b_l0
2068 LSYM(e_t0a0) a1_ne_0_b_l0
2072 LSYM(e_t02a0) a1_ne_0_b_l0
2076 LSYM(e_t04a0) a1_ne_0_b_l0
2080 LSYM(e_2t0) a1_ne_0_b_l1
2083 LSYM(e_2t0a0) a1_ne_0_b_l0
2087 LSYM(e2t04a0) t0__t0_2a0
2091 LSYM(e_3t0) a1_ne_0_b_l0
2095 LSYM(e_4t0) a1_ne_0_b_l1
2098 LSYM(e_4t0a0) a1_ne_0_b_l0
2102 LSYM(e4t08a0) t0__t0_2a0
2106 LSYM(e_5t0) a1_ne_0_b_l0
2110 LSYM(e_8t0) a1_ne_0_b_l1
2113 LSYM(e_8t0a0) a1_ne_0_b_l0