x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / arch / x86 / math-emu / round_Xsig.S
blobbbe0e87718e428351e5150915a3c738e2da1f7f7
1 /*---------------------------------------------------------------------------+
2  |  round_Xsig.S                                                             |
3  |                                                                           |
4  | Copyright (C) 1992,1993,1994,1995                                         |
5  |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
6  |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
7  |                                                                           |
8  | Normalize and round a 12 byte quantity.                                   |
9  | Call from C as:                                                           |
10  |   int round_Xsig(Xsig *n)                                                 |
11  |                                                                           |
12  | Normalize a 12 byte quantity.                                             |
13  | Call from C as:                                                           |
14  |   int norm_Xsig(Xsig *n)                                                  |
15  |                                                                           |
16  | Each function returns the size of the shift (nr of bits).                 |
17  |                                                                           |
18  +---------------------------------------------------------------------------*/
19         .file   "round_Xsig.S"
21 #include "fpu_emu.h"
24 .text
25 ENTRY(round_Xsig)
26         pushl   %ebp
27         movl    %esp,%ebp
28         pushl   %ebx            /* Reserve some space */
29         pushl   %ebx
30         pushl   %esi
32         movl    PARAM1,%esi
34         movl    8(%esi),%edx
35         movl    4(%esi),%ebx
36         movl    (%esi),%eax
38         movl    $0,-4(%ebp)
40         orl     %edx,%edx       /* ms bits */
41         js      L_round         /* Already normalized */
42         jnz     L_shift_1       /* Shift left 1 - 31 bits */
44         movl    %ebx,%edx
45         movl    %eax,%ebx
46         xorl    %eax,%eax
47         movl    $-32,-4(%ebp)
49 /* We need to shift left by 1 - 31 bits */
50 L_shift_1:
51         bsrl    %edx,%ecx       /* get the required shift in %ecx */
52         subl    $31,%ecx
53         negl    %ecx
54         subl    %ecx,-4(%ebp)
55         shld    %cl,%ebx,%edx
56         shld    %cl,%eax,%ebx
57         shl     %cl,%eax
59 L_round:
60         testl   $0x80000000,%eax
61         jz      L_exit
63         addl    $1,%ebx
64         adcl    $0,%edx
65         jnz     L_exit
67         movl    $0x80000000,%edx
68         incl    -4(%ebp)
70 L_exit:
71         movl    %edx,8(%esi)
72         movl    %ebx,4(%esi)
73         movl    %eax,(%esi)
75         movl    -4(%ebp),%eax
77         popl    %esi
78         popl    %ebx
79         leave
80         ret
85 ENTRY(norm_Xsig)
86         pushl   %ebp
87         movl    %esp,%ebp
88         pushl   %ebx            /* Reserve some space */
89         pushl   %ebx
90         pushl   %esi
92         movl    PARAM1,%esi
94         movl    8(%esi),%edx
95         movl    4(%esi),%ebx
96         movl    (%esi),%eax
98         movl    $0,-4(%ebp)
100         orl     %edx,%edx       /* ms bits */
101         js      L_n_exit                /* Already normalized */
102         jnz     L_n_shift_1     /* Shift left 1 - 31 bits */
104         movl    %ebx,%edx
105         movl    %eax,%ebx
106         xorl    %eax,%eax
107         movl    $-32,-4(%ebp)
109         orl     %edx,%edx       /* ms bits */
110         js      L_n_exit        /* Normalized now */
111         jnz     L_n_shift_1     /* Shift left 1 - 31 bits */
113         movl    %ebx,%edx
114         movl    %eax,%ebx
115         xorl    %eax,%eax
116         addl    $-32,-4(%ebp)
117         jmp     L_n_exit        /* Might not be normalized,
118                                    but shift no more. */
120 /* We need to shift left by 1 - 31 bits */
121 L_n_shift_1:
122         bsrl    %edx,%ecx       /* get the required shift in %ecx */
123         subl    $31,%ecx
124         negl    %ecx
125         subl    %ecx,-4(%ebp)
126         shld    %cl,%ebx,%edx
127         shld    %cl,%eax,%ebx
128         shl     %cl,%eax
130 L_n_exit:
131         movl    %edx,8(%esi)
132         movl    %ebx,4(%esi)
133         movl    %eax,(%esi)
135         movl    -4(%ebp),%eax
137         popl    %esi
138         popl    %ebx
139         leave
140         ret