Follow-on fix for bug 457825. Use sheet principal for agent and user sheets. r=dbaron...
[wine-gecko.git] / media / libtheora / lib / dec / x86 / mmxstate.c
blob694a531596291e2b4649e558a3b1e4a2d1b62d65
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
4 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
5 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
6 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
7 * *
8 * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2007 *
9 * by the Xiph.Org Foundation http://www.xiph.org/ *
10 * *
11 ********************************************************************
13 function:
14 last mod: $Id: mmxstate.c 14385 2008-01-09 19:53:18Z giles $
16 ********************************************************************/
18 /*MMX acceleration of complete fragment reconstruction algorithm.
19 Originally written by Rudolf Marek.*/
20 #include "x86int.h"
21 #include "../../internal.h"
23 #if defined(USE_ASM)
25 static const __attribute__((aligned(8),used)) int OC_FZIG_ZAGMMX[64]={
26 0, 8, 1, 2, 9,16,24,17,
27 10, 3,32,11,18,25, 4,12,
28 5,26,19,40,33,34,41,48,
29 27, 6,13,20,28,21,14, 7,
30 56,49,42,35,43,50,57,36,
31 15,22,29,30,23,44,37,58,
32 51,59,38,45,52,31,60,53,
33 46,39,47,54,61,62,55,63
38 void oc_state_frag_recon_mmx(oc_theora_state *_state,oc_fragment *_frag,
39 int _pli,ogg_int16_t _dct_coeffs[128],int _last_zzi,int _ncoefs,
40 ogg_uint16_t _dc_iquant,const ogg_uint16_t _ac_iquant[64]){
41 ogg_int16_t __attribute__((aligned(8))) res_buf[64];
42 int dst_framei;
43 int dst_ystride;
44 int zzi;
45 /*_last_zzi is subtly different from an actual count of the number of
46 coefficients we decoded for this block.
47 It contains the value of zzi BEFORE the final token in the block was
48 decoded.
49 In most cases this is an EOB token (the continuation of an EOB run from a
50 previous block counts), and so this is the same as the coefficient count.
51 However, in the case that the last token was NOT an EOB token, but filled
52 the block up with exactly 64 coefficients, _last_zzi will be less than 64.
53 Provided the last token was not a pure zero run, the minimum value it can
54 be is 46, and so that doesn't affect any of the cases in this routine.
55 However, if the last token WAS a pure zero run of length 63, then _last_zzi
56 will be 1 while the number of coefficients decoded is 64.
57 Thus, we will trigger the following special case, where the real
58 coefficient count would not.
59 Note also that a zero run of length 64 will give _last_zzi a value of 0,
60 but we still process the DC coefficient, which might have a non-zero value
61 due to DC prediction.
62 Although convoluted, this is arguably the correct behavior: it allows us to
63 dequantize fewer coefficients and use a smaller transform when the block
64 ends with a long zero run instead of a normal EOB token.
65 It could be smarter... multiple separate zero runs at the end of a block
66 will fool it, but an encoder that generates these really deserves what it
67 gets.
68 Needless to say we inherited this approach from VP3.*/
69 /*Special case only having a DC component.*/
70 if(_last_zzi<2){
71 ogg_uint16_t p;
72 /*Why is the iquant product rounded in this case and no others?
73 Who knows.*/
74 p=(ogg_int16_t)((ogg_int32_t)_frag->dc*_dc_iquant+15>>5);
75 /*Fill res_buf with p.*/
76 __asm__ __volatile__(
77 /*mm0=0000 0000 0000 AAAA*/
78 "movd %[p],%%mm0\n\t"
79 /*mm1=0000 0000 0000 AAAA*/
80 "movd %[p],%%mm1\n\t"
81 /*mm0=0000 0000 AAAA 0000*/
82 "pslld $16,%%mm0\n\t"
83 /*mm0=0000 0000 AAAA AAAA*/
84 "por %%mm1,%%mm0\n\t"
85 /*mm0=AAAA AAAA AAAA AAAA*/
86 "punpcklwd %%mm0,%%mm0\n\t"
87 "movq %%mm0,(%[res_buf])\n\t"
88 "movq %%mm0,8(%[res_buf])\n\t"
89 "movq %%mm0,16(%[res_buf])\n\t"
90 "movq %%mm0,24(%[res_buf])\n\t"
91 "movq %%mm0,32(%[res_buf])\n\t"
92 "movq %%mm0,40(%[res_buf])\n\t"
93 "movq %%mm0,48(%[res_buf])\n\t"
94 "movq %%mm0,56(%[res_buf])\n\t"
95 "movq %%mm0,64(%[res_buf])\n\t"
96 "movq %%mm0,72(%[res_buf])\n\t"
97 "movq %%mm0,80(%[res_buf])\n\t"
98 "movq %%mm0,88(%[res_buf])\n\t"
99 "movq %%mm0,96(%[res_buf])\n\t"
100 "movq %%mm0,104(%[res_buf])\n\t"
101 "movq %%mm0,112(%[res_buf])\n\t"
102 "movq %%mm0,120(%[res_buf])\n\t"
104 :[res_buf]"r"(res_buf),[p]"r"((unsigned)p)
105 :"memory"
108 else{
109 /*Then, fill in the remainder of the coefficients with 0's, and perform
110 the iDCT.*/
111 /*First zero the buffer.*/
112 /*On K7, etc., this could be replaced with movntq and sfence.*/
113 __asm__ __volatile__(
114 "pxor %%mm0,%%mm0\n\t"
115 "movq %%mm0,(%[res_buf])\n\t"
116 "movq %%mm0,8(%[res_buf])\n\t"
117 "movq %%mm0,16(%[res_buf])\n\t"
118 "movq %%mm0,24(%[res_buf])\n\t"
119 "movq %%mm0,32(%[res_buf])\n\t"
120 "movq %%mm0,40(%[res_buf])\n\t"
121 "movq %%mm0,48(%[res_buf])\n\t"
122 "movq %%mm0,56(%[res_buf])\n\t"
123 "movq %%mm0,64(%[res_buf])\n\t"
124 "movq %%mm0,72(%[res_buf])\n\t"
125 "movq %%mm0,80(%[res_buf])\n\t"
126 "movq %%mm0,88(%[res_buf])\n\t"
127 "movq %%mm0,96(%[res_buf])\n\t"
128 "movq %%mm0,104(%[res_buf])\n\t"
129 "movq %%mm0,112(%[res_buf])\n\t"
130 "movq %%mm0,120(%[res_buf])\n\t"
132 :[res_buf]"r"(res_buf)
133 :"memory"
135 res_buf[0]=(ogg_int16_t)((ogg_int32_t)_frag->dc*_dc_iquant);
136 /*This is planned to be rewritten in MMX.*/
137 for(zzi=1;zzi<_ncoefs;zzi++){
138 int ci;
139 ci=OC_FZIG_ZAG[zzi];
140 res_buf[OC_FZIG_ZAGMMX[zzi]]=(ogg_int16_t)((ogg_int32_t)_dct_coeffs[zzi]*
141 _ac_iquant[ci]);
143 if(_last_zzi<10)oc_idct8x8_10_mmx(res_buf);
144 else oc_idct8x8_mmx(res_buf);
146 /*Fill in the target buffer.*/
147 dst_framei=_state->ref_frame_idx[OC_FRAME_SELF];
148 dst_ystride=_state->ref_frame_bufs[dst_framei][_pli].stride;
149 /*For now ystride values in all ref frames assumed to be equal.*/
150 if(_frag->mbmode==OC_MODE_INTRA){
151 oc_frag_recon_intra_mmx(_frag->buffer[dst_framei],dst_ystride,res_buf);
153 else{
154 int ref_framei;
155 int ref_ystride;
156 int mvoffsets[2];
157 ref_framei=_state->ref_frame_idx[OC_FRAME_FOR_MODE[_frag->mbmode]];
158 ref_ystride=_state->ref_frame_bufs[ref_framei][_pli].stride;
159 if(oc_state_get_mv_offsets(_state,mvoffsets,_frag->mv[0],_frag->mv[1],
160 ref_ystride,_pli)>1){
161 oc_frag_recon_inter2_mmx(_frag->buffer[dst_framei],dst_ystride,
162 _frag->buffer[ref_framei]+mvoffsets[0],ref_ystride,
163 _frag->buffer[ref_framei]+mvoffsets[1],ref_ystride,res_buf);
165 else{
166 oc_frag_recon_inter_mmx(_frag->buffer[dst_framei],dst_ystride,
167 _frag->buffer[ref_framei]+mvoffsets[0],ref_ystride,res_buf);
170 oc_restore_fpu(_state);
173 /*Copies the fragments specified by the lists of fragment indices from one
174 frame to another.
175 _fragis: A pointer to a list of fragment indices.
176 _nfragis: The number of fragment indices to copy.
177 _dst_frame: The reference frame to copy to.
178 _src_frame: The reference frame to copy from.
179 _pli: The color plane the fragments lie in.*/
180 void oc_state_frag_copy_mmx(const oc_theora_state *_state,const int *_fragis,
181 int _nfragis,int _dst_frame,int _src_frame,int _pli){
182 const int *fragi;
183 const int *fragi_end;
184 int dst_framei;
185 long dst_ystride;
186 int src_framei;
187 long src_ystride;
188 dst_framei=_state->ref_frame_idx[_dst_frame];
189 src_framei=_state->ref_frame_idx[_src_frame];
190 dst_ystride=_state->ref_frame_bufs[dst_framei][_pli].stride;
191 src_ystride=_state->ref_frame_bufs[src_framei][_pli].stride;
192 fragi_end=_fragis+_nfragis;
193 for(fragi=_fragis;fragi<fragi_end;fragi++){
194 oc_fragment *frag;
195 unsigned char *dst;
196 unsigned char *src;
197 long esi;
198 frag=_state->frags+*fragi;
199 dst=frag->buffer[dst_framei];
200 src=frag->buffer[src_framei];
201 __asm__ __volatile__(
202 /*src+0*src_ystride*/
203 "movq (%[src]),%%mm0\n\t"
204 /*esi=src_ystride*3*/
205 "lea (%[src_ystride],%[src_ystride],2),%[s]\n\t"
206 /*src+1*src_ystride*/
207 "movq (%[src],%[src_ystride]),%%mm1\n\t"
208 /*src+2*src_ystride*/
209 "movq (%[src],%[src_ystride],2),%%mm2\n\t"
210 /*src+3*src_ystride*/
211 "movq (%[src],%[s]),%%mm3\n\t"
212 /*dst+0*dst_ystride*/
213 "movq %%mm0,(%[dst])\n\t"
214 /*esi=dst_ystride*3*/
215 "lea (%[dst_ystride],%[dst_ystride],2),%[s]\n\t"
216 /*dst+1*dst_ystride*/
217 "movq %%mm1,(%[dst],%[dst_ystride])\n\t"
218 /*Pointer to next 4.*/
219 "lea (%[src],%[src_ystride],4),%[src]\n\t"
220 /*dst+2*dst_ystride*/
221 "movq %%mm2,(%[dst],%[dst_ystride],2)\n\t"
222 /*dst+3*dst_ystride*/
223 "movq %%mm3,(%[dst],%[s])\n\t"
224 /*Pointer to next 4.*/
225 "lea (%[dst],%[dst_ystride],4),%[dst]\n\t"
226 /*src+0*src_ystride*/
227 "movq (%[src]),%%mm0\n\t"
228 /*esi=src_ystride*3*/
229 "lea (%[src_ystride],%[src_ystride],2),%[s]\n\t"
230 /*src+1*src_ystride*/
231 "movq (%[src],%[src_ystride]),%%mm1\n\t"
232 /*src+2*src_ystride*/
233 "movq (%[src],%[src_ystride],2),%%mm2\n\t"
234 /*src+3*src_ystride*/
235 "movq (%[src],%[s]),%%mm3\n\t"
236 /*dst+0*dst_ystride*/
237 "movq %%mm0,(%[dst])\n\t"
238 /*esi=dst_ystride*3*/
239 "lea (%[dst_ystride],%[dst_ystride],2),%[s]\n\t"
240 /*dst+1*dst_ystride*/
241 "movq %%mm1,(%[dst],%[dst_ystride])\n\t"
242 /*dst+2*dst_ystride*/
243 "movq %%mm2,(%[dst],%[dst_ystride],2)\n\t"
244 /*dst+3*dst_ystride*/
245 "movq %%mm3,(%[dst],%[s])\n\t"
246 :[s]"=&S"(esi)
247 :[dst]"r"(dst),[src]"r"(src),[dst_ystride]"r"(dst_ystride),
248 [src_ystride]"r"(src_ystride)
249 :"memory"
252 /*This needs to be removed when decode specific functions are implemented:*/
253 __asm__ __volatile__("emms\n\t");
256 static void loop_filter_v(unsigned char *_pix,int _ystride,
257 const ogg_int16_t *_ll){
258 long esi;
259 _pix-=_ystride*2;
260 __asm__ __volatile__(
261 /*mm0=0*/
262 "pxor %%mm0,%%mm0\n\t"
263 /*esi=_ystride*3*/
264 "lea (%[ystride],%[ystride],2),%[s]\n\t"
265 /*mm7=_pix[0...8]*/
266 "movq (%[pix]),%%mm7\n\t"
267 /*mm4=_pix[0...8+_ystride*3]*/
268 "movq (%[pix],%[s]),%%mm4\n\t"
269 /*mm6=_pix[0...8]*/
270 "movq %%mm7,%%mm6\n\t"
271 /*Expand unsigned _pix[0...3] to 16 bits.*/
272 "punpcklbw %%mm0,%%mm6\n\t"
273 "movq %%mm4,%%mm5\n\t"
274 /*Expand unsigned _pix[4...8] to 16 bits.*/
275 "punpckhbw %%mm0,%%mm7\n\t"
276 /*Expand other arrays too.*/
277 "punpcklbw %%mm0,%%mm4\n\t"
278 "punpckhbw %%mm0,%%mm5\n\t"
279 /*mm7:mm6=_p[0...8]-_p[0...8+_ystride*3]:*/
280 "psubw %%mm4,%%mm6\n\t"
281 "psubw %%mm5,%%mm7\n\t"
282 /*mm5=mm4=_pix[0...8+_ystride]*/
283 "movq (%[pix],%[ystride]),%%mm4\n\t"
284 /*mm1=mm3=mm2=_pix[0..8]+_ystride*2]*/
285 "movq (%[pix],%[ystride],2),%%mm2\n\t"
286 "movq %%mm4,%%mm5\n\t"
287 "movq %%mm2,%%mm3\n\t"
288 "movq %%mm2,%%mm1\n\t"
289 /*Expand these arrays.*/
290 "punpckhbw %%mm0,%%mm5\n\t"
291 "punpcklbw %%mm0,%%mm4\n\t"
292 "punpckhbw %%mm0,%%mm3\n\t"
293 "punpcklbw %%mm0,%%mm2\n\t"
294 /*mm0=3 3 3 3
295 mm3:mm2=_pix[0...8+_ystride*2]-_pix[0...8+_ystride]*/
296 "pcmpeqw %%mm0,%%mm0\n\t"
297 "psubw %%mm5,%%mm3\n\t"
298 "psrlw $14,%%mm0\n\t"
299 "psubw %%mm4,%%mm2\n\t"
300 /*Scale by 3.*/
301 "pmullw %%mm0,%%mm3\n\t"
302 "pmullw %%mm0,%%mm2\n\t"
303 /*mm0=4 4 4 4
304 f=mm3:mm2==_pix[0...8]-_pix[0...8+_ystride*3]+
305 3*(_pix[0...8+_ystride*2]-_pix[0...8+_ystride])*/
306 "psrlw $1,%%mm0\n\t"
307 "paddw %%mm7,%%mm3\n\t"
308 "psllw $2,%%mm0\n\t"
309 "paddw %%mm6,%%mm2\n\t"
310 /*Add 4.*/
311 "paddw %%mm0,%%mm3\n\t"
312 "paddw %%mm0,%%mm2\n\t"
313 /*"Divide" by 8.*/
314 "psraw $3,%%mm3\n\t"
315 "psraw $3,%%mm2\n\t"
316 /*Now compute lflim of mm3:mm2 cf. Section 7.10 of the sepc.*/
317 /*Free up mm5.*/
318 "packuswb %%mm5,%%mm4\n\t"
319 /*mm0=L L L L*/
320 "movq (%[ll]),%%mm0\n\t"
321 /*if(R_i<-2L||R_i>2L)R_i=0:*/
322 "movq %%mm2,%%mm5\n\t"
323 "pxor %%mm6,%%mm6\n\t"
324 "movq %%mm0,%%mm7\n\t"
325 "psubw %%mm0,%%mm6\n\t"
326 "psllw $1,%%mm7\n\t"
327 "psllw $1,%%mm6\n\t"
328 /*mm2==R_3 R_2 R_1 R_0*/
329 /*mm5==R_3 R_2 R_1 R_0*/
330 /*mm6==-2L -2L -2L -2L*/
331 /*mm7==2L 2L 2L 2L*/
332 "pcmpgtw %%mm2,%%mm7\n\t"
333 "pcmpgtw %%mm6,%%mm5\n\t"
334 "pand %%mm7,%%mm2\n\t"
335 "movq %%mm0,%%mm7\n\t"
336 "pand %%mm5,%%mm2\n\t"
337 "psllw $1,%%mm7\n\t"
338 "movq %%mm3,%%mm5\n\t"
339 /*mm3==R_7 R_6 R_5 R_4*/
340 /*mm5==R_7 R_6 R_5 R_4*/
341 /*mm6==-2L -2L -2L -2L*/
342 /*mm7==2L 2L 2L 2L*/
343 "pcmpgtw %%mm3,%%mm7\n\t"
344 "pcmpgtw %%mm6,%%mm5\n\t"
345 "pand %%mm7,%%mm3\n\t"
346 "movq %%mm0,%%mm7\n\t"
347 "pand %%mm5,%%mm3\n\t"
348 /*if(R_i<-L)R_i'=R_i+2L;
349 if(R_i>L)R_i'=R_i-2L;
350 if(R_i<-L||R_i>L)R_i=-R_i':*/
351 "psraw $1,%%mm6\n\t"
352 "movq %%mm2,%%mm5\n\t"
353 "psllw $1,%%mm7\n\t"
354 /*mm2==R_3 R_2 R_1 R_0*/
355 /*mm5==R_3 R_2 R_1 R_0*/
356 /*mm6==-L -L -L -L*/
357 /*mm0==L L L L*/
358 /*mm5=R_i>L?FF:00*/
359 "pcmpgtw %%mm0,%%mm5\n\t"
360 /*mm6=-L>R_i?FF:00*/
361 "pcmpgtw %%mm2,%%mm6\n\t"
362 /*mm7=R_i>L?2L:0*/
363 "pand %%mm5,%%mm7\n\t"
364 /*mm2=R_i>L?R_i-2L:R_i*/
365 "psubw %%mm7,%%mm2\n\t"
366 "movq %%mm0,%%mm7\n\t"
367 /*mm5=-L>R_i||R_i>L*/
368 "por %%mm6,%%mm5\n\t"
369 "psllw $1,%%mm7\n\t"
370 /*mm7=-L>R_i?2L:0*/
371 "pand %%mm6,%%mm7\n\t"
372 "pxor %%mm6,%%mm6\n\t"
373 /*mm2=-L>R_i?R_i+2L:R_i*/
374 "paddw %%mm7,%%mm2\n\t"
375 "psubw %%mm0,%%mm6\n\t"
376 /*mm5=-L>R_i||R_i>L?-R_i':0*/
377 "pand %%mm2,%%mm5\n\t"
378 "movq %%mm0,%%mm7\n\t"
379 /*mm2=-L>R_i||R_i>L?0:R_i*/
380 "psubw %%mm5,%%mm2\n\t"
381 "psllw $1,%%mm7\n\t"
382 /*mm2=-L>R_i||R_i>L?-R_i':R_i*/
383 "psubw %%mm5,%%mm2\n\t"
384 "movq %%mm3,%%mm5\n\t"
385 /*mm3==R_7 R_6 R_5 R_4*/
386 /*mm5==R_7 R_6 R_5 R_4*/
387 /*mm6==-L -L -L -L*/
388 /*mm0==L L L L*/
389 /*mm6=-L>R_i?FF:00*/
390 "pcmpgtw %%mm3,%%mm6\n\t"
391 /*mm5=R_i>L?FF:00*/
392 "pcmpgtw %%mm0,%%mm5\n\t"
393 /*mm7=R_i>L?2L:0*/
394 "pand %%mm5,%%mm7\n\t"
395 /*mm2=R_i>L?R_i-2L:R_i*/
396 "psubw %%mm7,%%mm3\n\t"
397 "psllw $1,%%mm0\n\t"
398 /*mm5=-L>R_i||R_i>L*/
399 "por %%mm6,%%mm5\n\t"
400 /*mm0=-L>R_i?2L:0*/
401 "pand %%mm6,%%mm0\n\t"
402 /*mm3=-L>R_i?R_i+2L:R_i*/
403 "paddw %%mm0,%%mm3\n\t"
404 /*mm5=-L>R_i||R_i>L?-R_i':0*/
405 "pand %%mm3,%%mm5\n\t"
406 /*mm2=-L>R_i||R_i>L?0:R_i*/
407 "psubw %%mm5,%%mm3\n\t"
408 /*mm2=-L>R_i||R_i>L?-R_i':R_i*/
409 "psubw %%mm5,%%mm3\n\t"
410 /*Unfortunately, there's no unsigned byte+signed byte with unsigned
411 saturation op code, so we have to promote things back 16 bits.*/
412 "pxor %%mm0,%%mm0\n\t"
413 "movq %%mm4,%%mm5\n\t"
414 "punpcklbw %%mm0,%%mm4\n\t"
415 "punpckhbw %%mm0,%%mm5\n\t"
416 "movq %%mm1,%%mm6\n\t"
417 "punpcklbw %%mm0,%%mm1\n\t"
418 "punpckhbw %%mm0,%%mm6\n\t"
419 /*_pix[0...8+_ystride]+=R_i*/
420 "paddw %%mm2,%%mm4\n\t"
421 "paddw %%mm3,%%mm5\n\t"
422 /*_pix[0...8+_ystride*2]-=R_i*/
423 "psubw %%mm2,%%mm1\n\t"
424 "psubw %%mm3,%%mm6\n\t"
425 "packuswb %%mm5,%%mm4\n\t"
426 "packuswb %%mm6,%%mm1\n\t"
427 /*Write it back out.*/
428 "movq %%mm4,(%[pix],%[ystride])\n\t"
429 "movq %%mm1,(%[pix],%[ystride],2)\n\t"
430 :[s]"=&S"(esi)
431 :[pix]"r"(_pix),[ystride]"r"((long)_ystride),[ll]"r"(_ll)
432 :"memory"
436 /*This code implements the bulk of loop_filter_h().
437 Data are striped p0 p1 p2 p3 ... p0 p1 p2 p3 ..., so in order to load all
438 four p0's to one register we must transpose the values in four mmx regs.
439 When half is done we repeat this for the rest.*/
440 static void loop_filter_h4(unsigned char *_pix,long _ystride,
441 const ogg_int16_t *_ll){
442 long esi;
443 long edi;
444 __asm__ __volatile__(
445 /*x x x x 3 2 1 0*/
446 "movd (%[pix]),%%mm0\n\t"
447 /*esi=_ystride*3*/
448 "lea (%[ystride],%[ystride],2),%[s]\n\t"
449 /*x x x x 7 6 5 4*/
450 "movd (%[pix],%[ystride]),%%mm1\n\t"
451 /*x x x x B A 9 8*/
452 "movd (%[pix],%[ystride],2),%%mm2\n\t"
453 /*x x x x F E D C*/
454 "movd (%[pix],%[s]),%%mm3\n\t"
455 /*mm0=7 3 6 2 5 1 4 0*/
456 "punpcklbw %%mm1,%%mm0\n\t"
457 /*mm2=F B E A D 9 C 8*/
458 "punpcklbw %%mm3,%%mm2\n\t"
459 /*mm1=7 3 6 2 5 1 4 0*/
460 "movq %%mm0,%%mm1\n\t"
461 /*mm0=F B 7 3 E A 6 2*/
462 "punpckhwd %%mm2,%%mm0\n\t"
463 /*mm1=D 9 5 1 C 8 4 0*/
464 "punpcklwd %%mm2,%%mm1\n\t"
465 "pxor %%mm7,%%mm7\n\t"
466 /*mm5=D 9 5 1 C 8 4 0*/
467 "movq %%mm1,%%mm5\n\t"
468 /*mm1=x C x 8 x 4 x 0==pix[0]*/
469 "punpcklbw %%mm7,%%mm1\n\t"
470 /*mm5=x D x 9 x 5 x 1==pix[1]*/
471 "punpckhbw %%mm7,%%mm5\n\t"
472 /*mm3=F B 7 3 E A 6 2*/
473 "movq %%mm0,%%mm3\n\t"
474 /*mm0=x E x A x 6 x 2==pix[2]*/
475 "punpcklbw %%mm7,%%mm0\n\t"
476 /*mm3=x F x B x 7 x 3==pix[3]*/
477 "punpckhbw %%mm7,%%mm3\n\t"
478 /*mm1=mm1-mm3==pix[0]-pix[3]*/
479 "psubw %%mm3,%%mm1\n\t"
480 /*Save a copy of pix[2] for later.*/
481 "movq %%mm0,%%mm4\n\t"
482 /*mm2=3 3 3 3
483 mm0=mm0-mm5==pix[2]-pix[1]*/
484 "pcmpeqw %%mm2,%%mm2\n\t"
485 "psubw %%mm5,%%mm0\n\t"
486 "psrlw $14,%%mm2\n\t"
487 /*Scale by 3.*/
488 "pmullw %%mm2,%%mm0\n\t"
489 /*mm2=4 4 4 4
490 f=mm1==_pix[0]-_pix[3]+ 3*(_pix[2]-_pix[1])*/
491 "psrlw $1,%%mm2\n\t"
492 "paddw %%mm1,%%mm0\n\t"
493 "psllw $2,%%mm2\n\t"
494 /*Add 4.*/
495 "paddw %%mm2,%%mm0\n\t"
496 /*"Divide" by 8, producing the residuals R_i.*/
497 "psraw $3,%%mm0\n\t"
498 /*Now compute lflim of mm0 cf. Section 7.10 of the sepc.*/
499 /*mm6=L L L L*/
500 "movq (%[ll]),%%mm6\n\t"
501 /*if(R_i<-2L||R_i>2L)R_i=0:*/
502 "movq %%mm0,%%mm1\n\t"
503 "pxor %%mm2,%%mm2\n\t"
504 "movq %%mm6,%%mm3\n\t"
505 "psubw %%mm6,%%mm2\n\t"
506 "psllw $1,%%mm3\n\t"
507 "psllw $1,%%mm2\n\t"
508 /*mm0==R_3 R_2 R_1 R_0*/
509 /*mm1==R_3 R_2 R_1 R_0*/
510 /*mm2==-2L -2L -2L -2L*/
511 /*mm3==2L 2L 2L 2L*/
512 "pcmpgtw %%mm0,%%mm3\n\t"
513 "pcmpgtw %%mm2,%%mm1\n\t"
514 "pand %%mm3,%%mm0\n\t"
515 "pand %%mm1,%%mm0\n\t"
516 /*if(R_i<-L)R_i'=R_i+2L;
517 if(R_i>L)R_i'=R_i-2L;
518 if(R_i<-L||R_i>L)R_i=-R_i':*/
519 "psraw $1,%%mm2\n\t"
520 "movq %%mm0,%%mm1\n\t"
521 "movq %%mm6,%%mm3\n\t"
522 /*mm0==R_3 R_2 R_1 R_0*/
523 /*mm1==R_3 R_2 R_1 R_0*/
524 /*mm2==-L -L -L -L*/
525 /*mm6==L L L L*/
526 /*mm2=-L>R_i?FF:00*/
527 "pcmpgtw %%mm0,%%mm2\n\t"
528 /*mm1=R_i>L?FF:00*/
529 "pcmpgtw %%mm6,%%mm1\n\t"
530 /*mm3=2L 2L 2L 2L*/
531 "psllw $1,%%mm3\n\t"
532 /*mm6=2L 2L 2L 2L*/
533 "psllw $1,%%mm6\n\t"
534 /*mm3=R_i>L?2L:0*/
535 "pand %%mm1,%%mm3\n\t"
536 /*mm6=-L>R_i?2L:0*/
537 "pand %%mm2,%%mm6\n\t"
538 /*mm0=R_i>L?R_i-2L:R_i*/
539 "psubw %%mm3,%%mm0\n\t"
540 /*mm1=-L>R_i||R_i>L*/
541 "por %%mm2,%%mm1\n\t"
542 /*mm0=-L>R_i?R_i+2L:R_i*/
543 "paddw %%mm6,%%mm0\n\t"
544 /*mm1=-L>R_i||R_i>L?R_i':0*/
545 "pand %%mm0,%%mm1\n\t"
546 /*mm0=-L>R_i||R_i>L?0:R_i*/
547 "psubw %%mm1,%%mm0\n\t"
548 /*mm0=-L>R_i||R_i>L?-R_i':R_i*/
549 "psubw %%mm1,%%mm0\n\t"
550 /*_pix[1]+=R_i;*/
551 "paddw %%mm0,%%mm5\n\t"
552 /*_pix[2]-=R_i;*/
553 "psubw %%mm0,%%mm4\n\t"
554 /*mm5=x x x x D 9 5 1*/
555 "packuswb %%mm7,%%mm5\n\t"
556 /*mm4=x x x x E A 6 2*/
557 "packuswb %%mm7,%%mm4\n\t"
558 /*mm5=E D A 9 6 5 2 1*/
559 "punpcklbw %%mm4,%%mm5\n\t"
560 /*edi=6 5 2 1*/
561 "movd %%mm5,%%edi\n\t"
562 "movw %%di,1(%[pix])\n\t"
563 /*Why is there such a big stall here?*/
564 "psrlq $32,%%mm5\n\t"
565 "shrl $16,%%edi\n\t"
566 "movw %%di,1(%[pix],%[ystride])\n\t"
567 /*edi=E D A 9*/
568 "movd %%mm5,%%edi\n\t"
569 "movw %%di,1(%[pix],%[ystride],2)\n\t"
570 "shrl $16,%%edi\n\t"
571 "movw %%di,1(%[pix],%[s])\n\t"
572 :[s]"=&S"(esi),[d]"=&D"(edi),
573 [pix]"+r"(_pix),[ystride]"+r"(_ystride),[ll]"+r"(_ll)
575 :"memory"
579 static void loop_filter_h(unsigned char *_pix,int _ystride,
580 const ogg_int16_t *_ll){
581 _pix-=2;
582 loop_filter_h4(_pix,_ystride,_ll);
583 loop_filter_h4(_pix+(_ystride<<2),_ystride,_ll);
586 /*We copy the whole function because the MMX routines will be inlined 4 times,
587 and we can do just a single emms call at the end this way.
588 We also do not use the _bv lookup table, instead computing the values that
589 would lie in it on the fly.*/
591 /*Apply the loop filter to a given set of fragment rows in the given plane.
592 The filter may be run on the bottom edge, affecting pixels in the next row of
593 fragments, so this row also needs to be available.
594 _bv: The bounding values array.
595 _refi: The index of the frame buffer to filter.
596 _pli: The color plane to filter.
597 _fragy0: The Y coordinate of the first fragment row to filter.
598 _fragy_end: The Y coordinate of the fragment row to stop filtering at.*/
599 void oc_state_loop_filter_frag_rows_mmx(oc_theora_state *_state,int *_bv,
600 int _refi,int _pli,int _fragy0,int _fragy_end){
601 ogg_int16_t __attribute__((aligned(8))) ll[4];
602 th_img_plane *iplane;
603 oc_fragment_plane *fplane;
604 oc_fragment *frag_top;
605 oc_fragment *frag0;
606 oc_fragment *frag;
607 oc_fragment *frag_end;
608 oc_fragment *frag0_end;
609 oc_fragment *frag_bot;
610 ll[0]=ll[1]=ll[2]=ll[3]=
611 (ogg_int16_t)_state->loop_filter_limits[_state->qis[0]];
612 iplane=_state->ref_frame_bufs[_refi]+_pli;
613 fplane=_state->fplanes+_pli;
614 /*The following loops are constructed somewhat non-intuitively on purpose.
615 The main idea is: if a block boundary has at least one coded fragment on
616 it, the filter is applied to it.
617 However, the order that the filters are applied in matters, and VP3 chose
618 the somewhat strange ordering used below.*/
619 frag_top=_state->frags+fplane->froffset;
620 frag0=frag_top+_fragy0*fplane->nhfrags;
621 frag0_end=frag0+(_fragy_end-_fragy0)*fplane->nhfrags;
622 frag_bot=_state->frags+fplane->froffset+fplane->nfrags;
623 while(frag0<frag0_end){
624 frag=frag0;
625 frag_end=frag+fplane->nhfrags;
626 while(frag<frag_end){
627 if(frag->coded){
628 if(frag>frag0){
629 loop_filter_h(frag->buffer[_refi],iplane->stride,ll);
631 if(frag0>frag_top){
632 loop_filter_v(frag->buffer[_refi],iplane->stride,ll);
634 if(frag+1<frag_end&&!(frag+1)->coded){
635 loop_filter_h(frag->buffer[_refi]+8,iplane->stride,ll);
637 if(frag+fplane->nhfrags<frag_bot&&!(frag+fplane->nhfrags)->coded){
638 loop_filter_v((frag+fplane->nhfrags)->buffer[_refi],
639 iplane->stride,ll);
642 frag++;
644 frag0+=fplane->nhfrags;
646 /*This needs to be removed when decode specific functions are implemented:*/
647 __asm__ __volatile__("emms\n\t");
650 #endif