Recognizes if input is ogg or not.
[xiph.git] / Tremor / misc.h
bloba4bc82fb2b2bce5f31e9fd608e4ec92b5b059b92
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: miscellaneous math and prototypes
16 ********************************************************************/
18 #ifndef _V_RANDOM_H_
19 #define _V_RANDOM_H_
20 #include "ivorbiscodec.h"
21 #include "os.h"
23 #include "asm_arm.h"
24 #include <stdlib.h> /* for abs() */
26 #ifndef _V_WIDE_MATH
27 #define _V_WIDE_MATH
29 #ifndef _LOW_ACCURACY_
30 /* 64 bit multiply */
32 #if !(defined WIN32 && defined WINCE)
33 #include <sys/types.h>
34 #endif
36 #if BYTE_ORDER==LITTLE_ENDIAN
37 union magic {
38 struct {
39 ogg_int32_t lo;
40 ogg_int32_t hi;
41 } halves;
42 ogg_int64_t whole;
44 #endif
46 #if BYTE_ORDER==BIG_ENDIAN
47 union magic {
48 struct {
49 ogg_int32_t hi;
50 ogg_int32_t lo;
51 } halves;
52 ogg_int64_t whole;
54 #endif
56 STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
57 union magic magic;
58 magic.whole = (ogg_int64_t)x * y;
59 return magic.halves.hi;
62 STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
63 return MULT32(x,y)<<1;
66 STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
67 union magic magic;
68 magic.whole = (ogg_int64_t)x * y;
69 return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
72 #else
73 /* 32 bit multiply, more portable but less accurate */
76 * Note: Precision is biased towards the first argument therefore ordering
77 * is important. Shift values were chosen for the best sound quality after
78 * many listening tests.
82 * For MULT32 and MULT31: The second argument is always a lookup table
83 * value already preshifted from 31 to 8 bits. We therefore take the
84 * opportunity to save on text space and use unsigned char for those
85 * tables in this case.
88 STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
89 return (x >> 9) * y; /* y preshifted >>23 */
92 STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
93 return (x >> 8) * y; /* y preshifted >>23 */
96 STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
97 return (x >> 6) * y; /* y preshifted >>9 */
100 #endif
103 * This should be used as a memory barrier, forcing all cached values in
104 * registers to wr writen back to memory. Might or might not be beneficial
105 * depending on the architecture and compiler.
107 #define MB()
110 * The XPROD functions are meant to optimize the cross products found all
111 * over the place in mdct.c by forcing memory operation ordering to avoid
112 * unnecessary register reloads as soon as memory is being written to.
113 * However this is only beneficial on CPUs with a sane number of general
114 * purpose registers which exclude the Intel x86. On Intel, better let the
115 * compiler actually reload registers directly from original memory by using
116 * macros.
119 #ifdef __i386__
121 #define XPROD32(_a, _b, _t, _v, _x, _y) \
122 { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \
123 *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
124 #define XPROD31(_a, _b, _t, _v, _x, _y) \
125 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
126 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
127 #define XNPROD31(_a, _b, _t, _v, _x, _y) \
128 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
129 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
131 #else
133 STIN void XPROD32(ogg_int32_t a, ogg_int32_t b,
134 ogg_int32_t t, ogg_int32_t v,
135 ogg_int32_t *x, ogg_int32_t *y)
137 *x = MULT32(a, t) + MULT32(b, v);
138 *y = MULT32(b, t) - MULT32(a, v);
141 STIN void XPROD31(ogg_int32_t a, ogg_int32_t b,
142 ogg_int32_t t, ogg_int32_t v,
143 ogg_int32_t *x, ogg_int32_t *y)
145 *x = MULT31(a, t) + MULT31(b, v);
146 *y = MULT31(b, t) - MULT31(a, v);
149 STIN void XNPROD31(ogg_int32_t a, ogg_int32_t b,
150 ogg_int32_t t, ogg_int32_t v,
151 ogg_int32_t *x, ogg_int32_t *y)
153 *x = MULT31(a, t) - MULT31(b, v);
154 *y = MULT31(b, t) + MULT31(a, v);
157 #endif
159 #endif
161 #ifndef _V_CLIP_MATH
162 #define _V_CLIP_MATH
164 STIN ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
165 int ret=x;
166 ret-= ((x<=32767)-1)&(x-32767);
167 ret-= ((x>=-32768)-1)&(x+32768);
168 return(ret);
171 #endif
173 STIN ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
174 ogg_int32_t b,ogg_int32_t bp,
175 ogg_int32_t *p){
176 if(a && b){
177 #ifndef _LOW_ACCURACY_
178 *p=ap+bp+32;
179 return MULT32(a,b);
180 #else
181 *p=ap+bp+31;
182 return (a>>15)*(b>>16);
183 #endif
184 }else
185 return 0;
188 int _ilog(unsigned int);
190 STIN ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
191 ogg_int32_t i,
192 ogg_int32_t *p){
194 int ip=_ilog(abs(i))-31;
195 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
198 STIN ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
199 ogg_int32_t b,ogg_int32_t bp,
200 ogg_int32_t *p){
202 if(!a){
203 *p=bp;
204 return b;
205 }else if(!b){
206 *p=ap;
207 return a;
210 /* yes, this can leak a bit. */
211 if(ap>bp){
212 int shift=ap-bp+1;
213 *p=ap+1;
214 a>>=1;
215 if(shift<32){
216 b=(b+(1<<(shift-1)))>>shift;
217 }else{
218 b=0;
220 }else{
221 int shift=bp-ap+1;
222 *p=bp+1;
223 b>>=1;
224 if(shift<32){
225 a=(a+(1<<(shift-1)))>>shift;
226 }else{
227 a=0;
231 a+=b;
232 if((a&0xc0000000)==0xc0000000 ||
233 (a&0xc0000000)==0){
234 a<<=1;
235 (*p)--;
237 return(a);
240 #endif