aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libavcodec / x86 / mpegvideodsp.c
blobb701ef8cc77e5f64d72705747c8c7f4d60139626
1 /*
2 * This file is part of Libav.
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "config.h"
20 #include "libavutil/attributes.h"
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86/cpu.h"
23 #include "libavcodec/mpegvideodsp.h"
25 #if HAVE_INLINE_ASM
27 static void gmc_mmx(uint8_t *dst, uint8_t *src,
28 int stride, int h, int ox, int oy,
29 int dxx, int dxy, int dyx, int dyy,
30 int shift, int r, int width, int height)
32 const int w = 8;
33 const int ix = ox >> (16 + shift);
34 const int iy = oy >> (16 + shift);
35 const int oxs = ox >> 4;
36 const int oys = oy >> 4;
37 const int dxxs = dxx >> 4;
38 const int dxys = dxy >> 4;
39 const int dyxs = dyx >> 4;
40 const int dyys = dyy >> 4;
41 const uint16_t r4[4] = { r, r, r, r };
42 const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
43 const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
44 const uint64_t shift2 = 2 * shift;
45 int x, y;
47 const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
48 const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
49 const int dxh = dxy * (h - 1);
50 const int dyw = dyx * (w - 1);
52 if ( // non-constant fullpel offset (3% of blocks)
53 ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
54 (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
55 // uses more than 16 bits of subpel mv (only at huge resolution)
56 (dxx | dxy | dyx | dyy) & 15 ||
57 (unsigned) ix >= width - w ||
58 (unsigned) iy >= height - h) {
59 // FIXME could still use mmx for some of the rows
60 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
61 shift, r, width, height);
62 return;
65 src += ix + iy * stride;
67 __asm__ volatile (
68 "movd %0, %%mm6 \n\t"
69 "pxor %%mm7, %%mm7 \n\t"
70 "punpcklwd %%mm6, %%mm6 \n\t"
71 "punpcklwd %%mm6, %%mm6 \n\t"
72 :: "r" (1 << shift));
74 for (x = 0; x < w; x += 4) {
75 uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
76 oxs - dxys + dxxs * (x + 1),
77 oxs - dxys + dxxs * (x + 2),
78 oxs - dxys + dxxs * (x + 3) };
79 uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
80 oys - dyys + dyxs * (x + 1),
81 oys - dyys + dyxs * (x + 2),
82 oys - dyys + dyxs * (x + 3) };
84 for (y = 0; y < h; y++) {
85 __asm__ volatile (
86 "movq %0, %%mm4 \n\t"
87 "movq %1, %%mm5 \n\t"
88 "paddw %2, %%mm4 \n\t"
89 "paddw %3, %%mm5 \n\t"
90 "movq %%mm4, %0 \n\t"
91 "movq %%mm5, %1 \n\t"
92 "psrlw $12, %%mm4 \n\t"
93 "psrlw $12, %%mm5 \n\t"
94 : "+m" (*dx4), "+m" (*dy4)
95 : "m" (*dxy4), "m" (*dyy4));
97 __asm__ volatile (
98 "movq %%mm6, %%mm2 \n\t"
99 "movq %%mm6, %%mm1 \n\t"
100 "psubw %%mm4, %%mm2 \n\t"
101 "psubw %%mm5, %%mm1 \n\t"
102 "movq %%mm2, %%mm0 \n\t"
103 "movq %%mm4, %%mm3 \n\t"
104 "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
105 "pmullw %%mm5, %%mm3 \n\t" // dx * dy
106 "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
107 "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
109 "movd %4, %%mm5 \n\t"
110 "movd %3, %%mm4 \n\t"
111 "punpcklbw %%mm7, %%mm5 \n\t"
112 "punpcklbw %%mm7, %%mm4 \n\t"
113 "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
114 "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
116 "movd %2, %%mm5 \n\t"
117 "movd %1, %%mm4 \n\t"
118 "punpcklbw %%mm7, %%mm5 \n\t"
119 "punpcklbw %%mm7, %%mm4 \n\t"
120 "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
121 "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
122 "paddw %5, %%mm1 \n\t"
123 "paddw %%mm3, %%mm2 \n\t"
124 "paddw %%mm1, %%mm0 \n\t"
125 "paddw %%mm2, %%mm0 \n\t"
127 "psrlw %6, %%mm0 \n\t"
128 "packuswb %%mm0, %%mm0 \n\t"
129 "movd %%mm0, %0 \n\t"
131 : "=m" (dst[x + y * stride])
132 : "m" (src[0]), "m" (src[1]),
133 "m" (src[stride]), "m" (src[stride + 1]),
134 "m" (*r4), "m" (shift2));
135 src += stride;
137 src += 4 - h * stride;
141 #endif /* HAVE_INLINE_ASM */
143 av_cold void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c)
145 #if HAVE_INLINE_ASM
146 int cpu_flags = av_get_cpu_flags();
148 if (INLINE_MMX(cpu_flags))
149 c->gmc = gmc_mmx;
150 #endif /* HAVE_INLINE_ASM */