1 /********************************************************************
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
12 ********************************************************************
14 function: miscellaneous math and prototypes
16 ********************************************************************/
18 //#include "config-tremor.h"
22 //#include "ivorbiscodec.h"
23 //#include "os_types.h"
25 //#include "asm_arm.h"
26 //#include "asm_mcf5249.h"
29 /* Some prototypes that were not defined elsewhere */
30 //void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
31 //void _vorbis_block_ripcord(vorbis_block *vb);
32 //extern int _ilog(unsigned int v);
41 #ifndef _LOW_ACCURACY_
43 /* #include <sys/types.h> */
45 #if ROCKBOX_LITTLE_ENDIAN == 1
53 #elif ROCKBOX_BIG_ENDIAN == 1
63 static inline int32_t MULT32(int32_t x
, int32_t y
) {
65 magic
.whole
= (int64_t)x
* y
;
66 return magic
.halves
.hi
;
68 static inline int32_t MULT31(int32_t x
, int32_t y
) {
69 return MULT32(x
,y
)<<1;
72 static inline int32_t MULT31_SHIFT15(int32_t x
, int32_t y
) {
74 magic
.whole
= (int64_t)x
* y
;
75 return ((uint32_t)(magic
.halves
.lo
)>>15) | ((magic
.halves
.hi
)<<17);
79 /* 32 bit multiply, more portable but less accurate */
82 * Note: Precision is biased towards the first argument therefore ordering
83 * is important. Shift values were chosen for the best sound quality after
84 * many listening tests.
88 * For MULT32 and MULT31: The second argument is always a lookup table
89 * value already preshifted from 31 to 8 bits. We therefore take the
90 * opportunity to save on text space and use unsigned char for those
91 * tables in this case.
94 static inline int32_t MULT32(int32_t x
, int32_t y
) {
95 return (x
>> 9) * y
; /* y preshifted >>23 */
98 static inline int32_t MULT31(int32_t x
, int32_t y
) {
99 return (x
>> 8) * y
; /* y preshifted >>23 */
102 static inline int32_t MULT31_SHIFT15(int32_t x
, int32_t y
) {
103 return (x
>> 6) * y
; /* y preshifted >>9 */
108 * This should be used as a memory barrier, forcing all cached values in
109 * registers to wr writen back to memory. Might or might not be beneficial
110 * depending on the architecture and compiler.
115 * The XPROD functions are meant to optimize the cross products found all
116 * over the place in mdct.c by forcing memory operation ordering to avoid
117 * unnecessary register reloads as soon as memory is being written to.
118 * However this is only beneficial on CPUs with a sane number of general
119 * purpose registers which exclude the Intel x86. On Intel, better let the
120 * compiler actually reload registers directly from original memory by using
124 /* replaced XPROD32 with a macro to avoid memory reference
125 _x, _y are the results (must be l-values) */
126 #define XPROD32(_a, _b, _t, _v, _x, _y) \
127 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
128 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
133 #define XPROD31(_a, _b, _t, _v, _x, _y) \
134 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
135 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
136 #define XNPROD31(_a, _b, _t, _v, _x, _y) \
137 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
138 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
142 static inline void XPROD31(int32_t a
, int32_t b
,
143 int32_t t
, int32_t v
,
144 int32_t *x
, int32_t *y
)
146 *x
= MULT31(a
, t
) + MULT31(b
, v
);
147 *y
= MULT31(b
, t
) - MULT31(a
, v
);
150 static inline void XNPROD31(int32_t a
, int32_t b
,
151 int32_t t
, int32_t v
,
152 int32_t *x
, int32_t *y
)
154 *x
= MULT31(a
, t
) - MULT31(b
, v
);
155 *y
= MULT31(b
, t
) + MULT31(a
, v
);
163 void vect_add(int32_t *x
, int32_t *y
, int n
)
172 void vect_copy(int32_t *x
, int32_t *y
, int n
)
181 void vect_mult_fw(int32_t *data
, int32_t *window
, int n
)
184 *data
= MULT31(*data
, *window
);
192 void vect_mult_bw(int32_t *data
, int32_t *window
, int n
)
195 *data
= MULT31(*data
, *window
);
208 static inline int32_t CLIP_TO_15(int32_t x
) {
210 ret
-= ((x
<=32767)-1)&(x
-32767);
211 ret
-= ((x
>=-32768)-1)&(x
+32768);
217 static inline int32_t VFLOAT_MULT(int32_t a
,int32_t ap
,
218 int32_t b
,int32_t bp
,
221 #ifndef _LOW_ACCURACY_
226 return (a
>>15)*(b
>>16);
232 /*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap,
236 int ip=_ilog(abs(i))-31;
237 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
240 static inline int32_t VFLOAT_ADD(int32_t a
,int32_t ap
,
241 int32_t b
,int32_t bp
,
252 /* yes, this can leak a bit. */
258 b
=(b
+(1<<(shift
-1)))>>shift
;
267 a
=(a
+(1<<(shift
-1)))>>shift
;
274 if((a
&0xc0000000)==0xc0000000 ||
284 #define EXPECT(a, b) __builtin_expect((a), (b))
286 #define EXPECT(a, b) (a)
289 #define EXPECT(a, b) (a)