1 /********************************************************************
3 * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
4 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
5 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
6 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2003 *
9 * by the Xiph.Org Foundation http://www.xiph.org/ *
11 ********************************************************************
14 last mod: $Id: mmxfrag.c 14345 2008-01-04 18:02:21Z tterribe $
16 ********************************************************************/
18 /*MMX acceleration of fragment reconstruction for motion compensation.
19 Originally written by Rudolf Marek.
20 Additional optimization by Nils Pipenbrinck.
21 Note: Loops are unrolled for best performance.
22 The iteration each instruction belongs to is marked in the comments as #i.*/
27 void oc_frag_recon_intra_mmx(unsigned char *_dst
,int _dst_ystride
,
28 const ogg_int16_t
*_residue
){
30 /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
31 "pcmpeqw %%mm0,%%mm0\n\t"
32 /*#0 Load low residue.*/
33 "movq 0*8(%[residue]),%%mm1\n\t"
34 /*#0 Load high residue.*/
35 "movq 1*8(%[residue]),%%mm2\n\t"
36 /*Set mm0 to 0x8000800080008000.*/
38 /*#1 Load low residue.*/
39 "movq 2*8(%[residue]),%%mm3\n\t"
40 /*#1 Load high residue.*/
41 "movq 3*8(%[residue]),%%mm4\n\t"
42 /*Set mm0 to 0x0080008000800080.*/
44 /*#2 Load low residue.*/
45 "movq 4*8(%[residue]),%%mm5\n\t"
46 /*#2 Load high residue.*/
47 "movq 5*8(%[residue]),%%mm6\n\t"
48 /*#0 Bias low residue.*/
49 "paddsw %%mm0,%%mm1\n\t"
50 /*#0 Bias high residue.*/
51 "paddsw %%mm0,%%mm2\n\t"
53 "packuswb %%mm2,%%mm1\n\t"
54 /*#1 Bias low residue.*/
55 "paddsw %%mm0,%%mm3\n\t"
56 /*#1 Bias high residue.*/
57 "paddsw %%mm0,%%mm4\n\t"
59 "packuswb %%mm4,%%mm3\n\t"
60 /*#2 Bias low residue.*/
61 "paddsw %%mm0,%%mm5\n\t"
62 /*#2 Bias high residue.*/
63 "paddsw %%mm0,%%mm6\n\t"
65 "packuswb %%mm6,%%mm5\n\t"
67 "movq %%mm1,(%[dst])\n\t"
69 "movq %%mm3,(%[dst],%[dst_ystride])\n\t"
71 "movq %%mm5,(%[dst],%[dst_ystride],2)\n\t"
72 /*#3 Load low residue.*/
73 "movq 6*8(%[residue]),%%mm1\n\t"
74 /*#3 Load high residue.*/
75 "movq 7*8(%[residue]),%%mm2\n\t"
76 /*#4 Load high residue.*/
77 "movq 8*8(%[residue]),%%mm3\n\t"
78 /*#4 Load high residue.*/
79 "movq 9*8(%[residue]),%%mm4\n\t"
80 /*#5 Load high residue.*/
81 "movq 10*8(%[residue]),%%mm5\n\t"
82 /*#5 Load high residue.*/
83 "movq 11*8(%[residue]),%%mm6\n\t"
84 /*#3 Bias low residue.*/
85 "paddsw %%mm0,%%mm1\n\t"
86 /*#3 Bias high residue.*/
87 "paddsw %%mm0,%%mm2\n\t"
89 "packuswb %%mm2,%%mm1\n\t"
90 /*#4 Bias low residue.*/
91 "paddsw %%mm0,%%mm3\n\t"
92 /*#4 Bias high residue.*/
93 "paddsw %%mm0,%%mm4\n\t"
95 "packuswb %%mm4,%%mm3\n\t"
96 /*#5 Bias low residue.*/
97 "paddsw %%mm0,%%mm5\n\t"
98 /*#5 Bias high residue.*/
99 "paddsw %%mm0,%%mm6\n\t"
101 "packuswb %%mm6,%%mm5\n\t"
103 "movq %%mm1,(%[dst],%[dst_ystride3])\n\t"
105 "movq %%mm3,(%[dst4])\n\t"
107 "movq %%mm5,(%[dst4],%[dst_ystride])\n\t"
108 /*#6 Load low residue.*/
109 "movq 12*8(%[residue]),%%mm1\n\t"
110 /*#6 Load high residue.*/
111 "movq 13*8(%[residue]),%%mm2\n\t"
112 /*#7 Load low residue.*/
113 "movq 14*8(%[residue]),%%mm3\n\t"
114 /*#7 Load high residue.*/
115 "movq 15*8(%[residue]),%%mm4\n\t"
116 /*#6 Bias low residue.*/
117 "paddsw %%mm0,%%mm1\n\t"
118 /*#6 Bias high residue.*/
119 "paddsw %%mm0,%%mm2\n\t"
121 "packuswb %%mm2,%%mm1\n\t"
122 /*#7 Bias low residue.*/
123 "paddsw %%mm0,%%mm3\n\t"
124 /*#7 Bias high residue.*/
125 "paddsw %%mm0,%%mm4\n\t"
127 "packuswb %%mm4,%%mm3\n\t"
129 "movq %%mm1,(%[dst4],%[dst_ystride],2)\n\t"
131 "movq %%mm3,(%[dst4],%[dst_ystride3])\n\t"
133 :[residue
]"r"(_residue
),
135 [dst4
]"r"(_dst
+(_dst_ystride
<<2)),
136 [dst_ystride
]"r"((long)_dst_ystride
),
137 [dst_ystride3
]"r"((long)_dst_ystride
*3)
142 void oc_frag_recon_inter_mmx(unsigned char *_dst
,int _dst_ystride
,
143 const unsigned char *_src
,int _src_ystride
,const ogg_int16_t
*_residue
){
146 __asm__
__volatile__("pxor %%mm0,%%mm0\n\t"::);
148 __asm__
__volatile__(
150 "movq (%[src]),%%mm3\n\t"
152 "movq (%[src],%[src_ystride]),%%mm7\n\t"
153 /*#0 Get copy of src.*/
154 "movq %%mm3,%%mm4\n\t"
155 /*#0 Expand high source.*/
156 "punpckhbw %%mm0,%%mm4\n\t"
157 /*#0 Expand low source.*/
158 "punpcklbw %%mm0,%%mm3\n\t"
159 /*#0 Add residue high.*/
160 "paddsw 8(%[residue]),%%mm4\n\t"
161 /*#1 Get copy of src.*/
162 "movq %%mm7,%%mm2\n\t"
163 /*#0 Add residue low.*/
164 "paddsw (%[residue]), %%mm3\n\t"
165 /*#1 Expand high source.*/
166 "punpckhbw %%mm0,%%mm2\n\t"
167 /*#0 Pack final row pixels.*/
168 "packuswb %%mm4,%%mm3\n\t"
169 /*#1 Expand low source.*/
170 "punpcklbw %%mm0,%%mm7\n\t"
171 /*#1 Add residue low.*/
172 "paddsw 16(%[residue]),%%mm7\n\t"
173 /*#1 Add residue high.*/
174 "paddsw 24(%[residue]),%%mm2\n\t"
176 "lea 32(%[residue]),%[residue]\n\t"
177 /*#1 Pack final row pixels.*/
178 "packuswb %%mm2,%%mm7\n\t"
180 "lea (%[src],%[src_ystride],2),%[src]\n\t"
182 "movq %%mm3,(%[dst])\n\t"
184 "movq %%mm7,(%[dst],%[dst_ystride])\n\t"
186 "lea (%[dst],%[dst_ystride],2),%[dst]\n\t"
187 :[residue
]"+r"(_residue
),[dst
]"+r"(_dst
),[src
]"+r"(_src
)
188 :[dst_ystride
]"r"((long)_dst_ystride
),
189 [src_ystride
]"r"((long)_src_ystride
)
195 void oc_frag_recon_inter2_mmx(unsigned char *_dst
,int _dst_ystride
,
196 const unsigned char *_src1
,int _src1_ystride
,const unsigned char *_src2
,
197 int _src2_ystride
,const ogg_int16_t
*_residue
){
199 /*NOTE: This assumes that
200 _dst_ystride==_src1_ystride&&_dst_ystride==_src2_ystride.
201 This is currently always the case, but a slower fallback version will need
202 to be written if it ever is not.*/
204 __asm__
__volatile__("pxor %%mm7,%%mm7\n\t"::);
206 __asm__
__volatile__(
208 "movq (%[src1]),%%mm0\n\t"
210 "movq (%[src2]),%%mm2\n\t"
212 "movq %%mm0,%%mm1\n\t"
214 "movq %%mm2,%%mm3\n\t"
216 "movq (%[src1],%[ystride]),%%mm4\n\t"
217 /*#0 Unpack lower src1.*/
218 "punpcklbw %%mm7,%%mm0\n\t"
220 "movq (%[src2],%[ystride]),%%mm5\n\t"
221 /*#0 Unpack higher src1.*/
222 "punpckhbw %%mm7,%%mm1\n\t"
223 /*#0 Unpack lower src2.*/
224 "punpcklbw %%mm7,%%mm2\n\t"
225 /*#0 Unpack higher src2.*/
226 "punpckhbw %%mm7,%%mm3\n\t"
227 /*Advance src1 ptr.*/
228 "lea (%[src1],%[ystride],2),%[src1]\n\t"
229 /*Advance src2 ptr.*/
230 "lea (%[src2],%[ystride],2),%[src2]\n\t"
231 /*#0 Lower src1+src2.*/
232 "paddsw %%mm2,%%mm0\n\t"
233 /*#0 Higher src1+src2.*/
234 "paddsw %%mm3,%%mm1\n\t"
236 "movq %%mm4,%%mm2\n\t"
237 /*#0 Build lo average.*/
240 "movq %%mm5,%%mm3\n\t"
241 /*#1 Unpack lower src1.*/
242 "punpcklbw %%mm7,%%mm4\n\t"
243 /*#0 Build hi average.*/
245 /*#1 Unpack higher src1.*/
246 "punpckhbw %%mm7,%%mm2\n\t"
248 "paddsw (%[residue]),%%mm0\n\t"
249 /*#1 Unpack lower src2.*/
250 "punpcklbw %%mm7,%%mm5\n\t"
251 /*#0 high+=residue.*/
252 "paddsw 8(%[residue]),%%mm1\n\t"
253 /*#1 Unpack higher src2.*/
254 "punpckhbw %%mm7,%%mm3\n\t"
255 /*#1 Lower src1+src2.*/
256 "paddsw %%mm4,%%mm5\n\t"
257 /*#0 Pack and saturate.*/
258 "packuswb %%mm1,%%mm0\n\t"
259 /*#1 Higher src1+src2.*/
260 "paddsw %%mm2,%%mm3\n\t"
262 "movq %%mm0,(%[dst])\n\t"
263 /*#1 Build lo average.*/
265 /*#1 Build hi average.*/
268 "paddsw 16(%[residue]),%%mm5\n\t"
269 /*#1 high+=residue.*/
270 "paddsw 24(%[residue]),%%mm3\n\t"
271 /*#1 Pack and saturate.*/
272 "packuswb %%mm3,%%mm5\n\t"
273 /*#1 Write row ptr.*/
274 "movq %%mm5,(%[dst],%[ystride])\n\t"
275 /*Advance residue ptr.*/
276 "add $32,%[residue]\n\t"
277 /*Advance dest ptr.*/
278 "lea (%[dst],%[ystride],2),%[dst]\n\t"
279 :[dst
]"+r"(_dst
),[residue
]"+r"(_residue
),
280 [src1
]"+r"(_src1
),[src2
]"+r"(_src2
)
281 :[ystride
]"r"((long)_dst_ystride
)
287 void oc_restore_fpu_mmx(void){
288 __asm__
__volatile__("emms\n\t");