2 * aligned/packed access motion
4 * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavcodec/avcodec.h"
25 #include "libavcodec/dsputil.h"
28 #define LP(p) *(uint32_t*)(p)
31 #define UNPACK(ph,pl,tt0,tt1) do { \
32 uint32_t t0,t1; t0=tt0;t1=tt1; \
33 ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \
34 pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0)
36 #define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03))
37 #define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03))
40 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )
41 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) )
43 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) )
44 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) )
48 #define put(d,s) d = s
49 #define avg(d,s) d = rnd_avg32(s,d)
54 OP(LP(dest),MERGE1(LP(ref),LP(ref+4),ofs)); \
61 OP(LP(dest),LP(ref)); \
69 static void put_pixels4_c(uint8_t *dest
,const uint8_t *ref
, const int stride
,int height
)
72 case 0: OP_C40(); return;
73 case 1: OP_C4(1); return;
74 case 2: OP_C4(2); return;
75 case 3: OP_C4(3); return;
82 static void avg_pixels4_c(uint8_t *dest
,const uint8_t *ref
, const int stride
,int height
)
85 case 0: OP_C40(); return;
86 case 1: OP_C4(1); return;
87 case 2: OP_C4(2); return;
88 case 3: OP_C4(3); return;
94 #define OP_C(ofs,sz,avg2) \
101 OP(LP(dest+0), MERGE1(t0,t1,ofs)); \
103 OP(LP(dest+4), MERGE1(t1,t0,ofs)); \
106 OP(LP(dest+8), MERGE1(t0,t1,ofs)); \
108 OP(LP(dest+12), MERGE1(t1,t0,ofs)); \
116 #define OP_C0(sz,avg2) \
119 OP(LP(dest+0), LP(ref+0)); \
120 OP(LP(dest+4), LP(ref+4)); \
122 OP(LP(dest+8), LP(ref+8)); \
123 OP(LP(dest+12), LP(ref+12)); \
130 #define OP_X(ofs,sz,avg2) \
137 OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
139 OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
142 OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
144 OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
152 #define OP_Y0(sz,avg2) \
154 uint32_t t0,t1,t2,t3,t; \
166 OP(LP(dest+0), avg2(t0,t)); t0 = t; \
168 OP(LP(dest+4), avg2(t1,t)); t1 = t; \
171 OP(LP(dest+8), avg2(t2,t)); t2 = t; \
173 OP(LP(dest+12), avg2(t3,t)); t3 = t; \
179 #define OP_Y(ofs,sz,avg2) \
181 uint32_t t0,t1,t2,t3,t,w0,w1; \
186 t0 = MERGE1(w0,w1,ofs); \
188 t1 = MERGE1(w1,w0,ofs); \
191 t2 = MERGE1(w0,w1,ofs); \
193 t3 = MERGE1(w1,w0,ofs); \
200 t = MERGE1(w0,w1,ofs); \
201 OP(LP(dest+0), avg2(t0,t)); t0 = t; \
203 t = MERGE1(w1,w0,ofs); \
204 OP(LP(dest+4), avg2(t1,t)); t1 = t; \
207 t = MERGE1(w0,w1,ofs); \
208 OP(LP(dest+8), avg2(t2,t)); t2 = t; \
210 t = MERGE1(w1,w0,ofs); \
211 OP(LP(dest+12), avg2(t3,t)); t3 = t; \
217 #define OP_X0(sz,avg2) OP_X(0,sz,avg2)
218 #define OP_XY0(sz,PACK) OP_XY(0,sz,PACK)
219 #define OP_XY(ofs,sz,PACK) \
221 uint32_t t2,t3,w0,w1; \
222 uint32_t a0,a1,a2,a3,a4,a5,a6,a7; \
227 UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
229 UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
232 UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
234 UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
240 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
241 OP(LP(dest+0),PACK(a0,a1,t2,t3)); \
244 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
245 OP(LP(dest+4),PACK(a2,a3,t2,t3)); \
249 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
250 OP(LP(dest+8),PACK(a4,a5,t2,t3)); \
253 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
254 OP(LP(dest+12),PACK(a6,a7,t2,t3)); \
261 #define DEFFUNC(op,rnd,xy,sz,OP_N,avgfunc) \
262 static void op##_##rnd##_pixels##sz##_##xy (uint8_t * dest, const uint8_t * ref, \
263 const int stride, int height) \
265 switch((int)ref&3) { \
266 case 0:OP_N##0(sz,rnd##_##avgfunc); return; \
267 case 1:OP_N(1,sz,rnd##_##avgfunc); return; \
268 case 2:OP_N(2,sz,rnd##_##avgfunc); return; \
269 case 3:OP_N(3,sz,rnd##_##avgfunc); return; \
275 DEFFUNC(put
, rnd
,o
,8,OP_C
,avg2
)
276 DEFFUNC(put
, rnd
,x
,8,OP_X
,avg2
)
277 DEFFUNC(put
,no_rnd
,x
,8,OP_X
,avg2
)
278 DEFFUNC(put
, rnd
,y
,8,OP_Y
,avg2
)
279 DEFFUNC(put
,no_rnd
,y
,8,OP_Y
,avg2
)
280 DEFFUNC(put
, rnd
,xy
,8,OP_XY
,PACK
)
281 DEFFUNC(put
,no_rnd
,xy
,8,OP_XY
,PACK
)
282 DEFFUNC(put
, rnd
,o
,16,OP_C
,avg2
)
283 DEFFUNC(put
, rnd
,x
,16,OP_X
,avg2
)
284 DEFFUNC(put
,no_rnd
,x
,16,OP_X
,avg2
)
285 DEFFUNC(put
, rnd
,y
,16,OP_Y
,avg2
)
286 DEFFUNC(put
,no_rnd
,y
,16,OP_Y
,avg2
)
287 DEFFUNC(put
, rnd
,xy
,16,OP_XY
,PACK
)
288 DEFFUNC(put
,no_rnd
,xy
,16,OP_XY
,PACK
)
293 DEFFUNC(avg
, rnd
,o
,8,OP_C
,avg2
)
294 DEFFUNC(avg
, rnd
,x
,8,OP_X
,avg2
)
295 DEFFUNC(avg
,no_rnd
,x
,8,OP_X
,avg2
)
296 DEFFUNC(avg
, rnd
,y
,8,OP_Y
,avg2
)
297 DEFFUNC(avg
,no_rnd
,y
,8,OP_Y
,avg2
)
298 DEFFUNC(avg
, rnd
,xy
,8,OP_XY
,PACK
)
299 DEFFUNC(avg
,no_rnd
,xy
,8,OP_XY
,PACK
)
300 DEFFUNC(avg
, rnd
,o
,16,OP_C
,avg2
)
301 DEFFUNC(avg
, rnd
,x
,16,OP_X
,avg2
)
302 DEFFUNC(avg
,no_rnd
,x
,16,OP_X
,avg2
)
303 DEFFUNC(avg
, rnd
,y
,16,OP_Y
,avg2
)
304 DEFFUNC(avg
,no_rnd
,y
,16,OP_Y
,avg2
)
305 DEFFUNC(avg
, rnd
,xy
,16,OP_XY
,PACK
)
306 DEFFUNC(avg
,no_rnd
,xy
,16,OP_XY
,PACK
)
310 #define put_no_rnd_pixels8_o put_rnd_pixels8_o
311 #define put_no_rnd_pixels16_o put_rnd_pixels16_o
312 #define avg_no_rnd_pixels8_o avg_rnd_pixels8_o
313 #define avg_no_rnd_pixels16_o avg_rnd_pixels16_o
315 #define put_pixels8_c put_rnd_pixels8_o
316 #define put_pixels16_c put_rnd_pixels16_o
317 #define avg_pixels8_c avg_rnd_pixels8_o
318 #define avg_pixels16_c avg_rnd_pixels16_o
319 #define put_no_rnd_pixels8_c put_rnd_pixels8_o
320 #define put_no_rnd_pixels16_c put_rnd_pixels16_o
321 #define avg_no_rnd_pixels8_c avg_rnd_pixels8_o
322 #define avg_no_rnd_pixels16_c avg_rnd_pixels16_o
332 void dsputil_init_align(DSPContext
* c
, AVCodecContext
*avctx
)
334 c
->put_pixels_tab
[0][0] = put_rnd_pixels16_o
;
335 c
->put_pixels_tab
[0][1] = put_rnd_pixels16_x
;
336 c
->put_pixels_tab
[0][2] = put_rnd_pixels16_y
;
337 c
->put_pixels_tab
[0][3] = put_rnd_pixels16_xy
;
338 c
->put_pixels_tab
[1][0] = put_rnd_pixels8_o
;
339 c
->put_pixels_tab
[1][1] = put_rnd_pixels8_x
;
340 c
->put_pixels_tab
[1][2] = put_rnd_pixels8_y
;
341 c
->put_pixels_tab
[1][3] = put_rnd_pixels8_xy
;
343 c
->put_no_rnd_pixels_tab
[0][0] = put_no_rnd_pixels16_o
;
344 c
->put_no_rnd_pixels_tab
[0][1] = put_no_rnd_pixels16_x
;
345 c
->put_no_rnd_pixels_tab
[0][2] = put_no_rnd_pixels16_y
;
346 c
->put_no_rnd_pixels_tab
[0][3] = put_no_rnd_pixels16_xy
;
347 c
->put_no_rnd_pixels_tab
[1][0] = put_no_rnd_pixels8_o
;
348 c
->put_no_rnd_pixels_tab
[1][1] = put_no_rnd_pixels8_x
;
349 c
->put_no_rnd_pixels_tab
[1][2] = put_no_rnd_pixels8_y
;
350 c
->put_no_rnd_pixels_tab
[1][3] = put_no_rnd_pixels8_xy
;
352 c
->avg_pixels_tab
[0][0] = avg_rnd_pixels16_o
;
353 c
->avg_pixels_tab
[0][1] = avg_rnd_pixels16_x
;
354 c
->avg_pixels_tab
[0][2] = avg_rnd_pixels16_y
;
355 c
->avg_pixels_tab
[0][3] = avg_rnd_pixels16_xy
;
356 c
->avg_pixels_tab
[1][0] = avg_rnd_pixels8_o
;
357 c
->avg_pixels_tab
[1][1] = avg_rnd_pixels8_x
;
358 c
->avg_pixels_tab
[1][2] = avg_rnd_pixels8_y
;
359 c
->avg_pixels_tab
[1][3] = avg_rnd_pixels8_xy
;
361 c
->avg_no_rnd_pixels_tab
[0][0] = avg_no_rnd_pixels16_o
;
362 c
->avg_no_rnd_pixels_tab
[0][1] = avg_no_rnd_pixels16_x
;
363 c
->avg_no_rnd_pixels_tab
[0][2] = avg_no_rnd_pixels16_y
;
364 c
->avg_no_rnd_pixels_tab
[0][3] = avg_no_rnd_pixels16_xy
;
365 c
->avg_no_rnd_pixels_tab
[1][0] = avg_no_rnd_pixels8_o
;
366 c
->avg_no_rnd_pixels_tab
[1][1] = avg_no_rnd_pixels8_x
;
367 c
->avg_no_rnd_pixels_tab
[1][2] = avg_no_rnd_pixels8_y
;
368 c
->avg_no_rnd_pixels_tab
[1][3] = avg_no_rnd_pixels8_xy
;
372 #define dspfunc(PFX, IDX, NUM) \
373 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
374 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
375 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
376 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
377 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
378 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
379 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
380 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
381 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
382 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
383 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
384 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
385 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
386 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
387 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
388 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
390 dspfunc(put_qpel
, 0, 16);
391 dspfunc(put_no_rnd_qpel
, 0, 16);
393 dspfunc(avg_qpel
, 0, 16);
394 /* dspfunc(avg_no_rnd_qpel, 0, 16); */
396 dspfunc(put_qpel
, 1, 8);
397 dspfunc(put_no_rnd_qpel
, 1, 8);
399 dspfunc(avg_qpel
, 1, 8);
400 /* dspfunc(avg_no_rnd_qpel, 1, 8); */
402 dspfunc(put_h264_qpel
, 0, 16);
403 dspfunc(put_h264_qpel
, 1, 8);
404 dspfunc(put_h264_qpel
, 2, 4);
405 dspfunc(avg_h264_qpel
, 0, 16);
406 dspfunc(avg_h264_qpel
, 1, 8);
407 dspfunc(avg_h264_qpel
, 2, 4);
410 c
->put_h264_chroma_pixels_tab
[0]= put_h264_chroma_mc8_c
;
411 c
->put_h264_chroma_pixels_tab
[1]= put_h264_chroma_mc4_c
;
412 c
->put_h264_chroma_pixels_tab
[2]= put_h264_chroma_mc2_c
;
413 c
->avg_h264_chroma_pixels_tab
[0]= avg_h264_chroma_mc8_c
;
414 c
->avg_h264_chroma_pixels_tab
[1]= avg_h264_chroma_mc4_c
;
415 c
->avg_h264_chroma_pixels_tab
[2]= avg_h264_chroma_mc2_c
;
417 c
->put_mspel_pixels_tab
[0]= put_mspel8_mc00_c
;
418 c
->put_mspel_pixels_tab
[1]= put_mspel8_mc10_c
;
419 c
->put_mspel_pixels_tab
[2]= put_mspel8_mc20_c
;
420 c
->put_mspel_pixels_tab
[3]= put_mspel8_mc30_c
;
421 c
->put_mspel_pixels_tab
[4]= put_mspel8_mc02_c
;
422 c
->put_mspel_pixels_tab
[5]= put_mspel8_mc12_c
;
423 c
->put_mspel_pixels_tab
[6]= put_mspel8_mc22_c
;
424 c
->put_mspel_pixels_tab
[7]= put_mspel8_mc32_c
;