FS#8961 - Anti-Aliased Fonts.
[kugel-rb/myfork.git] / apps / codecs / lib / asm_arm.h
blob0db868dcb3b7153b06f8888d6e382f780eba26ff
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
17 #ifdef CPU_ARM
19 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
20 #define _V_WIDE_MATH
22 static inline int32_t MULT32(int32_t x, int32_t y) {
23 int lo,hi;
24 asm volatile("smull\t%0, %1, %2, %3"
25 : "=&r"(lo),"=&r"(hi)
26 : "%r"(x),"r"(y)
27 : "cc");
28 return(hi);
31 static inline int32_t MULT31(int32_t x, int32_t y) {
32 return MULT32(x,y)<<1;
35 static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
36 int lo,hi;
37 asm volatile("smull %0, %1, %2, %3\n\t"
38 "movs %0, %0, lsr #15\n\t"
39 "adc %1, %0, %1, lsl #17\n\t"
40 : "=&r"(lo),"=&r"(hi)
41 : "%r"(x),"r"(y)
42 : "cc");
43 return(hi);
46 #define MB() asm volatile ("" : : : "memory")
48 #define XPROD32(a, b, t, v, x, y) \
49 { \
50 long l; \
51 asm( "smull %0, %1, %4, %6\n\t" \
52 "smlal %0, %1, %5, %7\n\t" \
53 "rsb %3, %4, #0\n\t" \
54 "smull %0, %2, %5, %6\n\t" \
55 "smlal %0, %2, %3, %7" \
56 : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
57 : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \
58 : "cc" ); \
61 static inline void XPROD31(int32_t a, int32_t b,
62 int32_t t, int32_t v,
63 int32_t *x, int32_t *y)
65 int x1, y1, l;
66 asm( "smull %0, %1, %4, %6\n\t"
67 "smlal %0, %1, %5, %7\n\t"
68 "rsb %3, %4, #0\n\t"
69 "smull %0, %2, %5, %6\n\t"
70 "smlal %0, %2, %3, %7"
71 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
72 : "3" (a), "r" (b), "r" (t), "r" (v)
73 : "cc" );
74 *x = x1 << 1;
75 MB();
76 *y = y1 << 1;
79 static inline void XNPROD31(int32_t a, int32_t b,
80 int32_t t, int32_t v,
81 int32_t *x, int32_t *y)
83 int x1, y1, l;
84 asm( "rsb %2, %4, #0\n\t"
85 "smull %0, %1, %3, %5\n\t"
86 "smlal %0, %1, %2, %6\n\t"
87 "smull %0, %2, %4, %5\n\t"
88 "smlal %0, %2, %3, %6"
89 : "=&r" (l), "=&r" (x1), "=&r" (y1)
90 : "r" (a), "r" (b), "r" (t), "r" (v)
91 : "cc" );
92 *x = x1 << 1;
93 MB();
94 *y = y1 << 1;
97 #ifndef _V_VECT_OPS
98 #define _V_VECT_OPS
100 /* asm versions of vector operations for block.c, window.c */
101 static inline
102 void vect_add(int32_t *x, int32_t *y, int n)
104 while (n>=4) {
105 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
106 "ldmia %[y]!, {r4, r5, r6, r7};"
107 "add r0, r0, r4;"
108 "add r1, r1, r5;"
109 "add r2, r2, r6;"
110 "add r3, r3, r7;"
111 "stmia %[x]!, {r0, r1, r2, r3};"
112 : [x] "+r" (x), [y] "+r" (y)
113 : : "r0", "r1", "r2", "r3",
114 "r4", "r5", "r6", "r7",
115 "memory");
116 n -= 4;
118 /* add final elements */
119 while (n>0) {
120 *x++ += *y++;
121 n--;
125 static inline
126 void vect_copy(int32_t *x, int32_t *y, int n)
128 while (n>=4) {
129 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
130 "stmia %[x]!, {r0, r1, r2, r3};"
131 : [x] "+r" (x), [y] "+r" (y)
132 : : "r0", "r1", "r2", "r3",
133 "memory");
134 n -= 4;
136 /* copy final elements */
137 while (n>0) {
138 *x++ = *y++;
139 n--;
143 static inline
144 void vect_mult_fw(int32_t *data, int32_t *window, int n)
146 while (n>=4) {
147 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
148 "ldmia %[w]!, {r4, r5, r6, r7};"
149 "smull r8, r9, r0, r4;"
150 "mov r0, r9, lsl #1;"
151 "smull r8, r9, r1, r5;"
152 "mov r1, r9, lsl #1;"
153 "smull r8, r9, r2, r6;"
154 "mov r2, r9, lsl #1;"
155 "smull r8, r9, r3, r7;"
156 "mov r3, r9, lsl #1;"
157 "stmia %[d]!, {r0, r1, r2, r3};"
158 : [d] "+r" (data), [w] "+r" (window)
159 : : "r0", "r1", "r2", "r3",
160 "r4", "r5", "r6", "r7", "r8", "r9",
161 "memory", "cc");
162 n -= 4;
164 while(n>0) {
165 *data = MULT31(*data, *window);
166 data++;
167 window++;
168 n--;
172 static inline
173 void vect_mult_bw(int32_t *data, int32_t *window, int n)
175 while (n>=4) {
176 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
177 "ldmda %[w]!, {r4, r5, r6, r7};"
178 "smull r8, r9, r0, r7;"
179 "mov r0, r9, lsl #1;"
180 "smull r8, r9, r1, r6;"
181 "mov r1, r9, lsl #1;"
182 "smull r8, r9, r2, r5;"
183 "mov r2, r9, lsl #1;"
184 "smull r8, r9, r3, r4;"
185 "mov r3, r9, lsl #1;"
186 "stmia %[d]!, {r0, r1, r2, r3};"
187 : [d] "+r" (data), [w] "+r" (window)
188 : : "r0", "r1", "r2", "r3",
189 "r4", "r5", "r6", "r7", "r8", "r9",
190 "memory", "cc");
191 n -= 4;
193 while(n>0) {
194 *data = MULT31(*data, *window);
195 data++;
196 window--;
197 n--;
201 #endif
203 #endif
205 #ifndef _V_CLIP_MATH
206 #define _V_CLIP_MATH
208 static inline int32_t CLIP_TO_15(int32_t x) {
209 int tmp;
210 asm volatile("subs %1, %0, #32768\n\t"
211 "movpl %0, #0x7f00\n\t"
212 "orrpl %0, %0, #0xff\n"
213 "adds %1, %0, #32768\n\t"
214 "movmi %0, #0x8000"
215 : "+r"(x),"=r"(tmp)
217 : "cc");
218 return(x);
221 #endif
223 #ifndef _V_LSP_MATH_ASM
224 #define _V_LSP_MATH_ASM
228 #endif
229 #endif