2 * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavutil/attributes.h"
32 #include "libavutil/ppc/types_altivec.h"
33 #include "libavcodec/dsputil.h"
35 #include "dsputil_altivec.h"
37 static int ssd_int8_vs_int16_altivec(const int8_t *pix1
, const int16_t *pix2
,
40 vector
signed char vpix1
;
41 vector
signed short vpix2
, vdiff
, vpix1l
,vpix1h
;
42 union { vector
signed int vscore
;
45 u
.vscore
= vec_splat_s32(0);
47 //XXX lazy way, fix it later
49 #define vec_unaligned_load(b) \
50 vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
54 // score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
55 //load pix1 and the first batch of pix2
57 vpix1
= vec_unaligned_load(pix1
);
58 vpix2
= vec_unaligned_load(pix2
);
61 vpix1h
= vec_unpackh(vpix1
);
62 vdiff
= vec_sub(vpix1h
, vpix2
);
63 vpix1l
= vec_unpackl(vpix1
);
64 // load another batch from pix2
65 vpix2
= vec_unaligned_load(pix2
);
66 u
.vscore
= vec_msum(vdiff
, vdiff
, u
.vscore
);
67 vdiff
= vec_sub(vpix1l
, vpix2
);
68 u
.vscore
= vec_msum(vdiff
, vdiff
, u
.vscore
);
73 u
.vscore
= vec_sums(u
.vscore
, vec_splat_s32(0));
76 for (i
= 0; i
< size
; i
++) {
77 u
.score
[3] += (pix1
[i
]-pix2
[i
])*(pix1
[i
]-pix2
[i
]);
82 static int32_t scalarproduct_int16_altivec(const int16_t *v1
, const int16_t *v2
,
88 register vec_s16 vec1
;
89 register vec_s32 res
= vec_splat_s32(0), t
;
92 for(i
= 0; i
< order
; i
+= 8){
93 pv
= (const vec_s16
*)v1
;
94 vec1
= vec_perm(pv
[0], pv
[1], vec_lvsl(0, v1
));
95 t
= vec_msum(vec1
, vec_ld(0, v2
), zero_s32v
);
96 res
= vec_sums(t
, res
);
100 res
= vec_splat(res
, 3);
101 vec_ste(res
, 0, &ires
);
105 static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1
, const int16_t *v2
, const int16_t *v3
, int order
, int mul
)
108 vec_s16
*pv1
= (vec_s16
*)v1
;
109 register vec_s16 muls
= {mul
,mul
,mul
,mul
,mul
,mul
,mul
,mul
};
110 register vec_s16 t0
, t1
, i0
, i1
, i4
;
111 register vec_s16 i2
= vec_ld(0, v2
), i3
= vec_ld(0, v3
);
112 register vec_s32 res
= zero_s32v
;
113 register vec_u8 align
= vec_lvsl(0, v2
);
118 t0
= vec_perm(i2
, i1
, align
);
120 t1
= vec_perm(i1
, i2
, align
);
123 res
= vec_msum(t0
, i0
, res
);
124 res
= vec_msum(t1
, i1
, res
);
126 t0
= vec_perm(i3
, i4
, align
);
128 t1
= vec_perm(i4
, i3
, align
);
129 pv1
[0] = vec_mladd(t0
, muls
, i0
);
130 pv1
[1] = vec_mladd(t1
, muls
, i1
);
135 res
= vec_splat(vec_sums(res
, zero_s32v
), 3);
136 vec_ste(res
, 0, &ires
);
140 av_cold
void ff_int_init_altivec(DSPContext
*c
, AVCodecContext
*avctx
)
142 c
->ssd_int8_vs_int16
= ssd_int8_vs_int16_altivec
;
143 c
->scalarproduct_int16
= scalarproduct_int16_altivec
;
144 c
->scalarproduct_and_madd_int16
= scalarproduct_and_madd_int16_altivec
;