1 /********************************************************************
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
19 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
22 static inline int32_t MULT32(int32_t x
, int32_t y
) {
24 asm volatile("smull\t%0, %1, %2, %3"
31 static inline int32_t MULT31(int32_t x
, int32_t y
) {
32 return MULT32(x
,y
)<<1;
35 static inline int32_t MULT31_SHIFT15(int32_t x
, int32_t y
) {
37 asm volatile("smull %0, %1, %2, %3\n\t"
38 "movs %0, %0, lsr #15\n\t"
39 "adc %1, %0, %1, lsl #17\n\t"
46 #define MB() asm volatile ("" : : : "memory")
48 #define XPROD32(a, b, t, v, x, y) \
51 asm( "smull %0, %1, %4, %6\n\t" \
52 "smlal %0, %1, %5, %7\n\t" \
53 "rsb %3, %4, #0\n\t" \
54 "smull %0, %2, %5, %6\n\t" \
55 "smlal %0, %2, %3, %7" \
56 : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
57 : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \
61 static inline void XPROD31(int32_t a
, int32_t b
,
63 int32_t *x
, int32_t *y
)
66 asm( "smull %0, %1, %4, %6\n\t"
67 "smlal %0, %1, %5, %7\n\t"
69 "smull %0, %2, %5, %6\n\t"
70 "smlal %0, %2, %3, %7"
71 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
), "=r" (a
)
72 : "3" (a
), "r" (b
), "r" (t
), "r" (v
)
79 static inline void XNPROD31(int32_t a
, int32_t b
,
81 int32_t *x
, int32_t *y
)
84 asm( "rsb %2, %4, #0\n\t"
85 "smull %0, %1, %3, %5\n\t"
86 "smlal %0, %1, %2, %6\n\t"
87 "smull %0, %2, %4, %5\n\t"
88 "smlal %0, %2, %3, %6"
89 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
)
90 : "r" (a
), "r" (b
), "r" (t
), "r" (v
)
100 /* asm versions of vector operations for block.c, window.c */
102 void vect_add(int32_t *x
, int32_t *y
, int n
)
105 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
106 "ldmia %[y]!, {r4, r5, r6, r7};"
111 "stmia %[x]!, {r0, r1, r2, r3};"
112 : [x
] "+r" (x
), [y
] "+r" (y
)
113 : : "r0", "r1", "r2", "r3",
114 "r4", "r5", "r6", "r7",
118 /* add final elements */
126 void vect_copy(int32_t *x
, int32_t *y
, int n
)
129 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
130 "stmia %[x]!, {r0, r1, r2, r3};"
131 : [x
] "+r" (x
), [y
] "+r" (y
)
132 : : "r0", "r1", "r2", "r3",
136 /* copy final elements */
144 void vect_mult_fw(int32_t *data
, int32_t *window
, int n
)
147 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
148 "ldmia %[w]!, {r4, r5, r6, r7};"
149 "smull r8, r9, r0, r4;"
150 "mov r0, r9, lsl #1;"
151 "smull r8, r9, r1, r5;"
152 "mov r1, r9, lsl #1;"
153 "smull r8, r9, r2, r6;"
154 "mov r2, r9, lsl #1;"
155 "smull r8, r9, r3, r7;"
156 "mov r3, r9, lsl #1;"
157 "stmia %[d]!, {r0, r1, r2, r3};"
158 : [d
] "+r" (data
), [w
] "+r" (window
)
159 : : "r0", "r1", "r2", "r3",
160 "r4", "r5", "r6", "r7", "r8", "r9",
165 *data
= MULT31(*data
, *window
);
173 void vect_mult_bw(int32_t *data
, int32_t *window
, int n
)
176 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
177 "ldmda %[w]!, {r4, r5, r6, r7};"
178 "smull r8, r9, r0, r7;"
179 "mov r0, r9, lsl #1;"
180 "smull r8, r9, r1, r6;"
181 "mov r1, r9, lsl #1;"
182 "smull r8, r9, r2, r5;"
183 "mov r2, r9, lsl #1;"
184 "smull r8, r9, r3, r4;"
185 "mov r3, r9, lsl #1;"
186 "stmia %[d]!, {r0, r1, r2, r3};"
187 : [d
] "+r" (data
), [w
] "+r" (window
)
188 : : "r0", "r1", "r2", "r3",
189 "r4", "r5", "r6", "r7", "r8", "r9",
194 *data
= MULT31(*data
, *window
);
208 static inline int32_t CLIP_TO_15(int32_t x
) {
210 asm volatile("subs %1, %0, #32768\n\t"
211 "movpl %0, #0x7f00\n\t"
212 "orrpl %0, %0, #0xff\n"
213 "adds %1, %0, #32768\n\t"
223 #ifndef _V_LSP_MATH_ASM
224 #define _V_LSP_MATH_ASM