3 ; $OpenBSD: milli.S,v 1.5 2001/03/29 04:08:20 mickey Exp $
5 ; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
7 ; To anyone who acknowledges that this file is provided "AS IS"
8 ; without any express or implied warranty:
9 ; permission to use, copy, modify, and distribute this file
10 ; for any purpose is hereby granted without fee, provided that
11 ; the above copyright notice and this notice appears in all
12 ; copies, and that the name of Hewlett-Packard Company not be
13 ; used in advertising or publicity pertaining to distribution
14 ; of the software without specific, written prior permission.
15 ; Hewlett-Packard Company makes no representations about the
16 ; suitability of this software for any purpose.
19 ; Standard Hardware Register Definitions for Use with Assembler
21 ; - fr16-31 added at Utah
22 ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 ; Hardware General Registers
88 ; Hardware Space Registers
105 ; Hardware Floating Point Registers
170 ; Hardware Control Registers
173 rctr: .equ 0 ; Recovery Counter Register
175 cr8: .equ 8 ; Protection ID 1
179 cr9: .equ 9 ; Protection ID 2
185 ccr: .equ 10 ; Coprocessor Confiquration Register
189 sar: .equ 11 ; Shift Amount Register
193 pidr3: .equ 12 ; Protection ID 3
197 pidr4: .equ 13 ; Protection ID 4
201 iva: .equ 14 ; Interrupt Vector Address
205 eiem: .equ 15 ; External Interrupt Enable Mask
209 itmr: .equ 16 ; Interval Timer
213 pcsq: .equ 17 ; Program Counter Space queue
217 pcoq: .equ 18 ; Program Counter Offset queue
221 iir: .equ 19 ; Interruption Instruction Register
225 isr: .equ 20 ; Interruption Space Register
229 ior: .equ 21 ; Interruption Offset Register
233 ipsw: .equ 22 ; Interrpution Processor Status Word
237 eirr: .equ 23 ; External Interrupt Request
241 ppda: .equ 24 ; Physcial Page Directory Address
243 tr0: .equ 24 ; Temporary register 0
247 hta: .equ 25 ; Hash Table Address
249 tr1: .equ 25 ; Temporary register 1
253 tr2: .equ 26 ; Temporary register 2
257 tr3: .equ 27 ; Temporary register 3
261 tr4: .equ 28 ; Temporary register 4
265 tr5: .equ 29 ; Temporary register 5
269 tr6: .equ 30 ; Temporary register 6
273 tr7: .equ 31 ; Temporary register 7
275 ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
276 ; Procedure Call Convention ~
277 ; Register Definitions for Use with Assembler ~
279 ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
280 ; Software Architecture General Registers
281 rp: .equ r2 ; return pointer
283 mrp: .equ r31 ; millicode return pointer
285 ret0: .equ r28 ; return value
287 ret1: .equ r29 ; return value (high part of double)
289 sl: .equ r29 ; static link
291 sp: .equ r30 ; stack pointer
293 dp: .equ r27 ; data pointer
295 arg0: .equ r26 ; argument
297 arg1: .equ r25 ; argument or high part of double argument
299 arg2: .equ r24 ; argument
301 arg3: .equ r23 ; argument or high part of double argument
303 ;_____________________________________________________________________________
304 ; Software Architecture Space Registers
305 ; sr0 ; return link form BLE
306 sret: .equ sr1 ; return value
308 sarg: .equ sr1 ; argument
310 ; sr4 ; PC SPACE tracker
311 ; sr5 ; process private data
312 ;_____________________________________________________________________________
313 ; Software Architecture Pseudo Registers
314 previous_sp: .equ 64 ; old stack pointer (locates previous frame)
316 ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
317 ; Standard space and subspace definitions. version A.08.06
318 ; These are generally suitable for programs on HP_UX and HPE.
319 ; Statements commented out are used when building such things as operating
322 ; Additional code subspaces should have ALIGN=8 for an interspace BV
323 ; and should have SORT=24.
325 ; For an incomplete executable (program bound to shared libraries),
326 ; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
327 ; and $PLT$ subspaces respectively.
331 .EXPORT $$remI,millicode
408 movb,>=,n r1,ret1,remI300
412 remI300: add,>= arg0,r0,r0
433 .export $$dyncall,MILLICODE
435 bb,>=,n 22,bit30,noshlibs
451 .export $$sh_func_adrs, millicode
462 retreg: .EQU ret1 ; r29
464 .export $$divU,millicode
465 .import $$divU_3,millicode
466 .import $$divU_5,millicode
467 .import $$divU_6,millicode
468 .import $$divU_7,millicode
469 .import $$divU_9,millicode
470 .import $$divU_10,millicode
471 .import $$divU_12,millicode
472 .import $$divU_14,millicode
473 .import $$divU_15,millicode
477 ; The subtract is not nullified since it does no harm and can be used
478 ; by the two cases that branch back to "normal".
479 comib,>= 15,arg1,special_divisor
480 sub r0,arg1,temp ; clear carry, negate the divisor
481 ds r0,temp,r0 ; set V-bit to 1
483 add arg0,arg0,retreg ; shift msb bit into carry
484 ds r0,arg1,temp ; 1st divide step, if no carry
485 addc retreg,retreg,retreg ; shift retreg with/into carry
486 ds temp,arg1,temp ; 2nd divide step
487 addc retreg,retreg,retreg ; shift retreg with/into carry
488 ds temp,arg1,temp ; 3rd divide step
489 addc retreg,retreg,retreg ; shift retreg with/into carry
490 ds temp,arg1,temp ; 4th divide step
491 addc retreg,retreg,retreg ; shift retreg with/into carry
492 ds temp,arg1,temp ; 5th divide step
493 addc retreg,retreg,retreg ; shift retreg with/into carry
494 ds temp,arg1,temp ; 6th divide step
495 addc retreg,retreg,retreg ; shift retreg with/into carry
496 ds temp,arg1,temp ; 7th divide step
497 addc retreg,retreg,retreg ; shift retreg with/into carry
498 ds temp,arg1,temp ; 8th divide step
499 addc retreg,retreg,retreg ; shift retreg with/into carry
500 ds temp,arg1,temp ; 9th divide step
501 addc retreg,retreg,retreg ; shift retreg with/into carry
502 ds temp,arg1,temp ; 10th divide step
503 addc retreg,retreg,retreg ; shift retreg with/into carry
504 ds temp,arg1,temp ; 11th divide step
505 addc retreg,retreg,retreg ; shift retreg with/into carry
506 ds temp,arg1,temp ; 12th divide step
507 addc retreg,retreg,retreg ; shift retreg with/into carry
508 ds temp,arg1,temp ; 13th divide step
509 addc retreg,retreg,retreg ; shift retreg with/into carry
510 ds temp,arg1,temp ; 14th divide step
511 addc retreg,retreg,retreg ; shift retreg with/into carry
512 ds temp,arg1,temp ; 15th divide step
513 addc retreg,retreg,retreg ; shift retreg with/into carry
514 ds temp,arg1,temp ; 16th divide step
515 addc retreg,retreg,retreg ; shift retreg with/into carry
516 ds temp,arg1,temp ; 17th divide step
517 addc retreg,retreg,retreg ; shift retreg with/into carry
518 ds temp,arg1,temp ; 18th divide step
519 addc retreg,retreg,retreg ; shift retreg with/into carry
520 ds temp,arg1,temp ; 19th divide step
521 addc retreg,retreg,retreg ; shift retreg with/into carry
522 ds temp,arg1,temp ; 20th divide step
523 addc retreg,retreg,retreg ; shift retreg with/into carry
524 ds temp,arg1,temp ; 21st divide step
525 addc retreg,retreg,retreg ; shift retreg with/into carry
526 ds temp,arg1,temp ; 22nd divide step
527 addc retreg,retreg,retreg ; shift retreg with/into carry
528 ds temp,arg1,temp ; 23rd divide step
529 addc retreg,retreg,retreg ; shift retreg with/into carry
530 ds temp,arg1,temp ; 24th divide step
531 addc retreg,retreg,retreg ; shift retreg with/into carry
532 ds temp,arg1,temp ; 25th divide step
533 addc retreg,retreg,retreg ; shift retreg with/into carry
534 ds temp,arg1,temp ; 26th divide step
535 addc retreg,retreg,retreg ; shift retreg with/into carry
536 ds temp,arg1,temp ; 27th divide step
537 addc retreg,retreg,retreg ; shift retreg with/into carry
538 ds temp,arg1,temp ; 28th divide step
539 addc retreg,retreg,retreg ; shift retreg with/into carry
540 ds temp,arg1,temp ; 29th divide step
541 addc retreg,retreg,retreg ; shift retreg with/into carry
542 ds temp,arg1,temp ; 30th divide step
543 addc retreg,retreg,retreg ; shift retreg with/into carry
544 ds temp,arg1,temp ; 31st divide step
545 addc retreg,retreg,retreg ; shift retreg with/into carry
546 ds temp,arg1,temp ; 32nd divide step,
548 addc retreg,retreg,retreg ; shift last retreg bit into retreg
549 ;_____________________________________________________________________________
550 ; handle the cases where divisor is a small constant or has high bit on
553 comib,>,n 0,arg1,big_divisor ; nullify previous instruction
554 zero_divisor: ; this label is here to provide external visibility
556 addit,= 0,arg1,0 ; trap for zero dvr
558 bv 0(r31) ; divisor == 1
560 bv 0(r31) ; divisor == 2
561 extru arg0,30,31,retreg
562 b,n $$divU_3 ; divisor == 3
564 bv 0(r31) ; divisor == 4
565 extru arg0,29,30,retreg
566 b,n $$divU_5 ; divisor == 5
568 b,n $$divU_6 ; divisor == 6
570 b,n $$divU_7 ; divisor == 7
572 bv 0(r31) ; divisor == 8
573 extru arg0,28,29,retreg
574 b,n $$divU_9 ; divisor == 9
576 b,n $$divU_10 ; divisor == 10
578 b normal ; divisor == 11
579 ds r0,temp,r0 ; set V-bit to 1
580 b,n $$divU_12 ; divisor == 12
582 b normal ; divisor == 13
583 ds r0,temp,r0 ; set V-bit to 1
584 b,n $$divU_14 ; divisor == 14
586 b,n $$divU_15 ; divisor == 15
588 ;_____________________________________________________________________________
589 ; Handle the case where the high bit is on in the divisor.
590 ; Compute: if( dividend>=divisor) quotient=1; else quotient=0;
591 ; Note: dividend>==divisor iff dividend-divisor does not borrow
592 ; and not borrow iff carry
606 ;_____________________________________________________________________________
608 $$divide_by_constant:
611 .export $$divide_by_constant,millicode
612 ; Provides a "nice" label for the code covered by the unwind descriptor
613 ; for things like gprof.
616 .EXPORT $$divI_2,MILLICODE
620 EXTRS arg0,30,31,ret1
623 .EXPORT $$divI_4,MILLICODE
627 EXTRS arg0,29,30,ret1
630 .EXPORT $$divI_8,MILLICODE
634 EXTRS arg0,28,29,ret1
637 .EXPORT $$divI_16,MILLICODE
641 EXTRS arg0,27,28,ret1
644 .EXPORT $$divI_3,MILLICODE
645 COMB,<,N arg0,0,$neg3
649 SH2ADD arg0,arg0,arg0
656 SH2ADD arg0,arg0,arg0
661 .EXPORT $$divU_3,MILLICODE
665 SH2ADD arg0,arg0,arg0
670 .EXPORT $$divI_5,MILLICODE
671 COMB,<,N arg0,0,$neg5
681 SH1ADD arg0,arg0,arg0
686 .EXPORT $$divU_5,MILLICODE
690 SH1ADD arg0,arg0,arg0
695 .EXPORT $$divI_6,MILLICODE
696 COMB,<,N arg0,0,$neg6
697 EXTRU arg0,30,31,arg0
705 EXTRU arg0,30,31,arg0
707 SH2ADD arg0,arg0,arg0
712 .EXPORT $$divU_6,MILLICODE
713 EXTRU arg0,30,31,arg0
716 SH2ADD arg0,arg0,arg0
721 .EXPORT $$divU_10,MILLICODE
722 EXTRU arg0,30,31,arg0
744 .EXPORT $$divI_10,MILLICODE
747 EXTRU arg0,30,31,arg0
749 SH1ADD arg0,arg0,arg0
753 EXTRU arg0,30,31,arg0
754 SH1ADD arg0,arg0,arg0
774 .EXPORT $$divI_12,MILLICODE
777 EXTRU arg0,29,30,arg0
779 SH2ADD arg0,arg0,arg0
783 EXTRU arg0,29,30,arg0
785 SH2ADD arg0,arg0,arg0
788 .EXPORT $$divU_12,MILLICODE
789 EXTRU arg0,29,30,arg0
796 .EXPORT $$divI_15,MILLICODE
799 ADDIB,TR 1,arg0,$pos+4
807 .EXPORT $$divU_15,MILLICODE
813 .EXPORT $$divI_17,MILLICODE
814 COMB,<,N arg0,0,$neg17
831 .EXPORT $$divU_17,MILLICODE
842 .EXPORT $$divI_7,MILLICODE
843 COMB,<,N arg0,0,$neg7
847 SH3ADD arg0,arg0,arg0
864 EXTRU arg0,31,24,arg0
876 SH3ADD arg0,arg0,arg0
894 EXTRU arg0,31,24,arg0
904 .EXPORT $$divU_7,MILLICODE
908 SH3ADD arg0,arg0,arg0
913 .EXPORT $$divI_9,MILLICODE
914 COMB,<,N arg0,0,$neg9
931 .EXPORT $$divU_9,MILLICODE
941 .EXPORT $$divI_14,MILLICODE
942 COMB,<,N arg0,0,$neg14
944 .EXPORT $$divU_14,MILLICODE
946 EXTRU arg0,30,31,arg0
951 EXTRU arg0,30,31,arg0
956 rmndr: .EQU ret1 ; r29
958 .export $$remU,millicode
964 comib,>=,n 0,arg1,special_case
965 sub r0,arg1,rmndr ; clear carry, negate the divisor
966 ds r0,rmndr,r0 ; set V-bit to 1
967 add arg0,arg0,temp ; shift msb bit into carry
968 ds r0,arg1,rmndr ; 1st divide step, if no carry
969 addc temp,temp,temp ; shift temp with/into carry
970 ds rmndr,arg1,rmndr ; 2nd divide step
971 addc temp,temp,temp ; shift temp with/into carry
972 ds rmndr,arg1,rmndr ; 3rd divide step
973 addc temp,temp,temp ; shift temp with/into carry
974 ds rmndr,arg1,rmndr ; 4th divide step
975 addc temp,temp,temp ; shift temp with/into carry
976 ds rmndr,arg1,rmndr ; 5th divide step
977 addc temp,temp,temp ; shift temp with/into carry
978 ds rmndr,arg1,rmndr ; 6th divide step
979 addc temp,temp,temp ; shift temp with/into carry
980 ds rmndr,arg1,rmndr ; 7th divide step
981 addc temp,temp,temp ; shift temp with/into carry
982 ds rmndr,arg1,rmndr ; 8th divide step
983 addc temp,temp,temp ; shift temp with/into carry
984 ds rmndr,arg1,rmndr ; 9th divide step
985 addc temp,temp,temp ; shift temp with/into carry
986 ds rmndr,arg1,rmndr ; 10th divide step
987 addc temp,temp,temp ; shift temp with/into carry
988 ds rmndr,arg1,rmndr ; 11th divide step
989 addc temp,temp,temp ; shift temp with/into carry
990 ds rmndr,arg1,rmndr ; 12th divide step
991 addc temp,temp,temp ; shift temp with/into carry
992 ds rmndr,arg1,rmndr ; 13th divide step
993 addc temp,temp,temp ; shift temp with/into carry
994 ds rmndr,arg1,rmndr ; 14th divide step
995 addc temp,temp,temp ; shift temp with/into carry
996 ds rmndr,arg1,rmndr ; 15th divide step
997 addc temp,temp,temp ; shift temp with/into carry
998 ds rmndr,arg1,rmndr ; 16th divide step
999 addc temp,temp,temp ; shift temp with/into carry
1000 ds rmndr,arg1,rmndr ; 17th divide step
1001 addc temp,temp,temp ; shift temp with/into carry
1002 ds rmndr,arg1,rmndr ; 18th divide step
1003 addc temp,temp,temp ; shift temp with/into carry
1004 ds rmndr,arg1,rmndr ; 19th divide step
1005 addc temp,temp,temp ; shift temp with/into carry
1006 ds rmndr,arg1,rmndr ; 20th divide step
1007 addc temp,temp,temp ; shift temp with/into carry
1008 ds rmndr,arg1,rmndr ; 21st divide step
1009 addc temp,temp,temp ; shift temp with/into carry
1010 ds rmndr,arg1,rmndr ; 22nd divide step
1011 addc temp,temp,temp ; shift temp with/into carry
1012 ds rmndr,arg1,rmndr ; 23rd divide step
1013 addc temp,temp,temp ; shift temp with/into carry
1014 ds rmndr,arg1,rmndr ; 24th divide step
1015 addc temp,temp,temp ; shift temp with/into carry
1016 ds rmndr,arg1,rmndr ; 25th divide step
1017 addc temp,temp,temp ; shift temp with/into carry
1018 ds rmndr,arg1,rmndr ; 26th divide step
1019 addc temp,temp,temp ; shift temp with/into carry
1020 ds rmndr,arg1,rmndr ; 27th divide step
1021 addc temp,temp,temp ; shift temp with/into carry
1022 ds rmndr,arg1,rmndr ; 28th divide step
1023 addc temp,temp,temp ; shift temp with/into carry
1024 ds rmndr,arg1,rmndr ; 29th divide step
1025 addc temp,temp,temp ; shift temp with/into carry
1026 ds rmndr,arg1,rmndr ; 30th divide step
1027 addc temp,temp,temp ; shift temp with/into carry
1028 ds rmndr,arg1,rmndr ; 31st divide step
1029 addc temp,temp,temp ; shift temp with/into carry
1030 ds rmndr,arg1,rmndr ; 32nd divide step,
1031 comiclr,<= 0,rmndr,r0
1032 add rmndr,arg1,rmndr ; correction
1036 ; Putting >= on the last DS and deleting COMICLR does not work!
1037 ;_____________________________________________________________________________
1039 addit,= 0,arg1,r0 ; trap on div by zero
1040 sub,>>= arg0,arg1,rmndr
1048 ; Use bv 0(r31) and bv,n 0(r31) instead.
1049 ; #define return bv 0(%mrp)
1050 ; #define return_n bv,n 0(%mrp)
1057 .export $$mulI, millicode
1058 combt,<<= %r25,%r26,l4 ; swap args if unsigned %r25>%r26
1059 copy 0,%r29 ; zero out the result
1060 xor %r26,%r25,%r26 ; swap %r26 & %r25 using the
1061 xor %r26,%r25,%r25 ; old xor trick
1063 l4: combt,<= 0,%r26,l3 ; if %r26>=0 then proceed like unsigned
1065 zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
1066 sub,> 0,%r25,%r1 ; otherwise negate both and
1067 combt,<=,n %r26,%r1,l2 ; swap back if |%r26|<|%r25|
1069 movb,tr,n %r1,%r26,l2 ; 10th inst.
1071 l0: add %r29,%r1,%r29 ; add in this partial product
1073 l1: zdep %r26,23,24,%r26 ; %r26 <<= 8 ******************
1075 l2: zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
1077 l3: blr %r1,0 ; case on these 8 bits ******
1079 extru %r25,23,24,%r25 ; %r25 >>= 8 ******************
1081 ;16 insts before this.
1082 ; %r26 <<= 8 **************************
1083 x0: comb,<> %r25,0,l2 ! zdep %r26,23,24,%r26 ! bv,n 0(r31) ! nop
1085 x1: comb,<> %r25,0,l1 ! add %r29,%r26,%r29 ! bv,n 0(r31) ! nop
1087 x2: comb,<> %r25,0,l1 ! sh1add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
1089 x3: comb,<> %r25,0,l0 ! sh1add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
1091 x4: comb,<> %r25,0,l1 ! sh2add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
1093 x5: comb,<> %r25,0,l0 ! sh2add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
1095 x6: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
1097 x7: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r26,%r29,%r29 ! b,n ret_t0
1099 x8: comb,<> %r25,0,l1 ! sh3add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
1101 x9: comb,<> %r25,0,l0 ! sh3add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
1103 x10: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
1105 x11: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
1107 x12: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
1109 x13: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
1111 x14: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1113 x15: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r1,%r1 ! b,n ret_t0
1115 x16: zdep %r26,27,28,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1117 x17: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r1,%r1 ! b,n ret_t0
1119 x18: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
1121 x19: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r26,%r1 ! b,n ret_t0
1123 x20: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
1125 x21: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
1127 x22: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1129 x23: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1131 x24: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
1133 x25: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
1135 x26: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1137 x27: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r1,%r1 ! b,n ret_t0
1139 x28: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1141 x29: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1143 x30: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1145 x31: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1147 x32: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1149 x33: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
1151 x34: zdep %r26,27,28,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1153 x35: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r26,%r1,%r1
1155 x36: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
1157 x37: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
1159 x38: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1161 x39: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1163 x40: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
1165 x41: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
1167 x42: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1169 x43: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1171 x44: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1173 x45: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
1175 x46: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! add %r1,%r26,%r1
1177 x47: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
1179 x48: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! zdep %r1,27,28,%r1 ! b,n ret_t0
1181 x49: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r26,%r1,%r1
1183 x50: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1185 x51: sh3add %r26,%r26,%r1 ! sh3add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1187 x52: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1189 x53: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1191 x54: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1193 x55: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1195 x56: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1197 x57: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1199 x58: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1201 x59: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
1203 x60: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1205 x61: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1207 x62: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1209 x63: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1211 x64: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1213 x65: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
1215 x66: zdep %r26,26,27,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1217 x67: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1219 x68: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1221 x69: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1223 x70: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
1225 x71: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1227 x72: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
1229 x73: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! add %r29,%r1,%r29
1231 x74: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1233 x75: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1235 x76: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1237 x77: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1239 x78: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1241 x79: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1243 x80: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
1245 x81: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
1247 x82: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1249 x83: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1251 x84: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1253 x85: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1255 x86: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1257 x87: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r26,%r1,%r1
1259 x88: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1261 x89: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1263 x90: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1265 x91: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1267 x92: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1269 x93: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1271 x94: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r26,%r1,%r1
1273 x95: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1275 x96: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1277 x97: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1279 x98: zdep %r26,26,27,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
1281 x99: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1283 x100: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1285 x101: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1287 x102: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1289 x103: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r26,%r1
1291 x104: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1293 x105: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1295 x106: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1297 x107: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh3add %r1,%r26,%r1
1299 x108: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1301 x109: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1303 x110: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1305 x111: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1307 x112: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! zdep %r1,27,28,%r1
1309 x113: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
1311 x114: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1313 x115: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1315 x116: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
1317 x117: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
1319 x118: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0a0 ! sh3add %r1,%r1,%r1
1321 x119: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
1323 x120: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1325 x121: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1327 x122: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1329 x123: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1331 x124: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1333 x125: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1335 x126: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1337 x127: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1339 x128: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
1341 x129: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! add %r1,%r26,%r1 ! b,n ret_t0
1343 x130: zdep %r26,25,26,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1345 x131: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1347 x132: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1349 x133: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1351 x134: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1353 x135: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1355 x136: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1357 x137: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1359 x138: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1361 x139: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
1363 x140: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r1,%r1
1365 x141: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
1367 x142: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
1369 x143: zdep %r26,27,28,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1371 x144: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1373 x145: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1375 x146: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1377 x147: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1379 x148: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1381 x149: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1383 x150: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1385 x151: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
1387 x152: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1389 x153: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1391 x154: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1393 x155: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1395 x156: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1397 x157: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
1399 x158: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
1401 x159: zdep %r26,26,27,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1403 x160: sh2add %r26,%r26,%r1 ! sh2add %r1,0,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1405 x161: sh3add %r26,0,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1407 x162: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1409 x163: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
1411 x164: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1413 x165: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1415 x166: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1417 x167: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
1419 x168: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1421 x169: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1423 x170: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1425 x171: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
1427 x172: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1429 x173: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
1431 x174: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t04a0 ! sh2add %r1,%r1,%r1
1433 x175: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
1435 x176: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0 ! add %r1,%r26,%r1
1437 x177: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
1439 x178: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
1441 x179: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r26,%r1
1443 x180: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1445 x181: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1447 x182: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
1449 x183: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
1451 x184: sh2add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! add %r1,%r26,%r1
1453 x185: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1455 x186: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1457 x187: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
1459 x188: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r26,%r1,%r1
1461 x189: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
1463 x190: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
1465 x191: zdep %r26,25,26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
1467 x192: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1469 x193: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1471 x194: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1473 x195: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1475 x196: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1477 x197: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
1479 x198: zdep %r26,25,26,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1481 x199: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1483 x200: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1485 x201: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1487 x202: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1489 x203: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
1491 x204: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
1493 x205: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1495 x206: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
1497 x207: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
1499 x208: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
1501 x209: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
1503 x210: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
1505 x211: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
1507 x212: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
1509 x213: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0a0 ! sh2add %r1,%r26,%r1
1511 x214: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e2t04a0 ! sh3add %r1,%r26,%r1
1513 x215: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
1515 x216: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1517 x217: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1519 x218: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
1521 x219: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1523 x220: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
1525 x221: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
1527 x222: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1529 x223: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1531 x224: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
1533 x225: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
1535 x226: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! zdep %r1,26,27,%r1
1537 x227: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
1539 x228: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
1541 x229: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r1,%r1
1543 x230: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_5t0 ! add %r1,%r26,%r1
1545 x231: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
1547 x232: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0 ! sh2add %r1,%r26,%r1
1549 x233: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0a0 ! sh2add %r1,%r26,%r1
1551 x234: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r1,%r1
1553 x235: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r1,%r1
1555 x236: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e4t08a0 ! sh1add %r1,%r1,%r1
1557 x237: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_3t0 ! sub %r1,%r26,%r1
1559 x238: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e2t04a0 ! sh3add %r1,%r1,%r1
1561 x239: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0ma0 ! sh1add %r1,%r1,%r1
1563 x240: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0 ! sh1add %r1,%r1,%r1
1565 x241: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0a0 ! sh1add %r1,%r1,%r1
1567 x242: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
1569 x243: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
1571 x244: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
1573 x245: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
1575 x246: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
1577 x247: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
1579 x248: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
1581 x249: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
1583 x250: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
1585 x251: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
1587 x252: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
1589 x253: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
1591 x254: zdep %r26,24,25,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
1593 x255: zdep %r26,23,24,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
1595 ;1040 insts before this.
1598 e_t0: add %r29,%r1,%r29
1600 e_shift: comb,<> %r25,0,l2
1602 zdep %r26,23,24,%r26 ; %r26 <<= 8 ***********
1604 e_t0ma0: comb,<> %r25,0,l0
1609 e_t0a0: comb,<> %r25,0,l0
1614 e_t02a0: comb,<> %r25,0,l0
1619 e_t04a0: comb,<> %r25,0,l0
1624 e_2t0: comb,<> %r25,0,l1
1626 sh1add %r1,%r29,%r29
1628 e_2t0a0: comb,<> %r25,0,l0
1633 e2t04a0: sh1add %r26,%r1,%r1
1636 sh1add %r1,%r29,%r29
1638 e_3t0: comb,<> %r25,0,l0
1643 e_4t0: comb,<> %r25,0,l1
1645 sh2add %r1,%r29,%r29
1647 e_4t0a0: comb,<> %r25,0,l0
1652 e4t08a0: sh1add %r26,%r1,%r1
1655 sh2add %r1,%r29,%r29
1657 e_5t0: comb,<> %r25,0,l0
1662 e_8t0: comb,<> %r25,0,l1
1664 sh3add %r1,%r29,%r29
1666 e_8t0a0: comb,<> %r25,0,l0
1675 .import $$divI_2,millicode
1676 .import $$divI_3,millicode
1677 .import $$divI_4,millicode
1678 .import $$divI_5,millicode
1679 .import $$divI_6,millicode
1680 .import $$divI_7,millicode
1681 .import $$divI_8,millicode
1682 .import $$divI_9,millicode
1683 .import $$divI_10,millicode
1684 .import $$divI_12,millicode
1685 .import $$divI_14,millicode
1686 .import $$divI_15,millicode
1687 .export $$divI,millicode
1688 .export $$divoI,millicode
1692 comib,=,n -1,arg1,negative1 ; when divisor == -1
1694 comib,>>=,n 15,arg1,small_divisor
1695 add,>= 0,arg0,retreg ; move dividend, if retreg < 0,
1697 sub 0,retreg,retreg ; make it positive
1698 sub 0,arg1,temp ; clear carry,
1699 ; negate the divisor
1700 ds 0,temp,0 ; set V-bit to the comple-
1701 ; ment of the divisor sign
1702 add retreg,retreg,retreg ; shift msb bit into carry
1703 ds r0,arg1,temp ; 1st divide step, if no carry
1704 addc retreg,retreg,retreg ; shift retreg with/into carry
1705 ds temp,arg1,temp ; 2nd divide step
1706 addc retreg,retreg,retreg ; shift retreg with/into carry
1707 ds temp,arg1,temp ; 3rd divide step
1708 addc retreg,retreg,retreg ; shift retreg with/into carry
1709 ds temp,arg1,temp ; 4th divide step
1710 addc retreg,retreg,retreg ; shift retreg with/into carry
1711 ds temp,arg1,temp ; 5th divide step
1712 addc retreg,retreg,retreg ; shift retreg with/into carry
1713 ds temp,arg1,temp ; 6th divide step
1714 addc retreg,retreg,retreg ; shift retreg with/into carry
1715 ds temp,arg1,temp ; 7th divide step
1716 addc retreg,retreg,retreg ; shift retreg with/into carry
1717 ds temp,arg1,temp ; 8th divide step
1718 addc retreg,retreg,retreg ; shift retreg with/into carry
1719 ds temp,arg1,temp ; 9th divide step
1720 addc retreg,retreg,retreg ; shift retreg with/into carry
1721 ds temp,arg1,temp ; 10th divide step
1722 addc retreg,retreg,retreg ; shift retreg with/into carry
1723 ds temp,arg1,temp ; 11th divide step
1724 addc retreg,retreg,retreg ; shift retreg with/into carry
1725 ds temp,arg1,temp ; 12th divide step
1726 addc retreg,retreg,retreg ; shift retreg with/into carry
1727 ds temp,arg1,temp ; 13th divide step
1728 addc retreg,retreg,retreg ; shift retreg with/into carry
1729 ds temp,arg1,temp ; 14th divide step
1730 addc retreg,retreg,retreg ; shift retreg with/into carry
1731 ds temp,arg1,temp ; 15th divide step
1732 addc retreg,retreg,retreg ; shift retreg with/into carry
1733 ds temp,arg1,temp ; 16th divide step
1734 addc retreg,retreg,retreg ; shift retreg with/into carry
1735 ds temp,arg1,temp ; 17th divide step
1736 addc retreg,retreg,retreg ; shift retreg with/into carry
1737 ds temp,arg1,temp ; 18th divide step
1738 addc retreg,retreg,retreg ; shift retreg with/into carry
1739 ds temp,arg1,temp ; 19th divide step
1740 addc retreg,retreg,retreg ; shift retreg with/into carry
1741 ds temp,arg1,temp ; 20th divide step
1742 addc retreg,retreg,retreg ; shift retreg with/into carry
1743 ds temp,arg1,temp ; 21st divide step
1744 addc retreg,retreg,retreg ; shift retreg with/into carry
1745 ds temp,arg1,temp ; 22nd divide step
1746 addc retreg,retreg,retreg ; shift retreg with/into carry
1747 ds temp,arg1,temp ; 23rd divide step
1748 addc retreg,retreg,retreg ; shift retreg with/into carry
1749 ds temp,arg1,temp ; 24th divide step
1750 addc retreg,retreg,retreg ; shift retreg with/into carry
1751 ds temp,arg1,temp ; 25th divide step
1752 addc retreg,retreg,retreg ; shift retreg with/into carry
1753 ds temp,arg1,temp ; 26th divide step
1754 addc retreg,retreg,retreg ; shift retreg with/into carry
1755 ds temp,arg1,temp ; 27th divide step
1756 addc retreg,retreg,retreg ; shift retreg with/into carry
1757 ds temp,arg1,temp ; 28th divide step
1758 addc retreg,retreg,retreg ; shift retreg with/into carry
1759 ds temp,arg1,temp ; 29th divide step
1760 addc retreg,retreg,retreg ; shift retreg with/into carry
1761 ds temp,arg1,temp ; 30th divide step
1762 addc retreg,retreg,retreg ; shift retreg with/into carry
1763 ds temp,arg1,temp ; 31st divide step
1764 addc retreg,retreg,retreg ; shift retreg with/into carry
1765 ds temp,arg1,temp ; 32nd divide step,
1766 addc retreg,retreg,retreg ; shift last retreg bit into retreg
1767 xor,>= arg0,arg1,0 ; get correct sign of quotient
1768 sub 0,retreg,retreg ; based on operand signs
1771 ;______________________________________________________________________
1775 ; table for divisor == 0,1, ... ,15
1776 addit,= 0,arg1,r0 ; trap if divisor == 0
1778 bv 0(r31) ; divisor == 1
1780 b,n $$divI_2 ; divisor == 2
1782 b,n $$divI_3 ; divisor == 3
1784 b,n $$divI_4 ; divisor == 4
1786 b,n $$divI_5 ; divisor == 5
1788 b,n $$divI_6 ; divisor == 6
1790 b,n $$divI_7 ; divisor == 7
1792 b,n $$divI_8 ; divisor == 8
1794 b,n $$divI_9 ; divisor == 9
1796 b,n $$divI_10 ; divisor == 10
1798 b normal1 ; divisor == 11
1799 add,>= 0,arg0,retreg
1800 b,n $$divI_12 ; divisor == 12
1802 b normal1 ; divisor == 13
1803 add,>= 0,arg0,retreg
1804 b,n $$divI_14 ; divisor == 14
1806 b,n $$divI_15 ; divisor == 15
1808 ;______________________________________________________________________
1810 sub 0,arg0,retreg ; result is negation of dividend
1812 addo arg0,arg1,r0 ; trap iff dividend==0x80000000 && divisor==-1