aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libavcodec / x86 / huffyuvdsp_init.c
blob80e6cfbb12dac400ec275b01b7c97183aad58af4
1 /*
2 * Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "config.h"
22 #include "libavutil/attributes.h"
23 #include "libavutil/cpu.h"
24 #include "libavutil/x86/asm.h"
25 #include "libavutil/x86/cpu.h"
26 #include "libavcodec/huffyuvdsp.h"
28 void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
29 const uint8_t *diff, int w,
30 int *left, int *left_top);
32 int ff_add_hfyu_left_pred_ssse3(uint8_t *dst, const uint8_t *src,
33 int w, int left);
34 int ff_add_hfyu_left_pred_unaligned_ssse3(uint8_t *dst, const uint8_t *src,
35 int w, int left);
37 #if HAVE_INLINE_ASM
39 #if HAVE_7REGS
40 static void add_hfyu_median_pred_cmov(uint8_t *dst, const uint8_t *top,
41 const uint8_t *diff, int w,
42 int *left, int *left_top)
44 x86_reg w2 = -w;
45 x86_reg x;
46 int l = *left & 0xff;
47 int tl = *left_top & 0xff;
48 int t;
49 __asm__ volatile (
50 "mov %7, %3 \n"
51 "1: \n"
52 "movzbl (%3, %4), %2 \n"
53 "mov %2, %k3 \n"
54 "sub %b1, %b3 \n"
55 "add %b0, %b3 \n"
56 "mov %2, %1 \n"
57 "cmp %0, %2 \n"
58 "cmovg %0, %2 \n"
59 "cmovg %1, %0 \n"
60 "cmp %k3, %0 \n"
61 "cmovg %k3, %0 \n"
62 "mov %7, %3 \n"
63 "cmp %2, %0 \n"
64 "cmovl %2, %0 \n"
65 "add (%6, %4), %b0 \n"
66 "mov %b0, (%5, %4) \n"
67 "inc %4 \n"
68 "jl 1b \n"
69 : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
70 : "r"(dst + w), "r"(diff + w), "rm"(top + w)
72 *left = l;
73 *left_top = tl;
75 #endif /* HAVE_7REGS */
77 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
79 x86_reg i = 0;
81 __asm__ volatile (
82 "jmp 2f \n\t"
83 "1: \n\t"
84 "movq (%1, %0), %%mm0 \n\t"
85 "movq (%2, %0), %%mm1 \n\t"
86 "paddb %%mm0, %%mm1 \n\t"
87 "movq %%mm1, (%2, %0) \n\t"
88 "movq 8(%1, %0), %%mm0 \n\t"
89 "movq 8(%2, %0), %%mm1 \n\t"
90 "paddb %%mm0, %%mm1 \n\t"
91 "movq %%mm1, 8(%2, %0) \n\t"
92 "add $16, %0 \n\t"
93 "2: \n\t"
94 "cmp %3, %0 \n\t"
95 "js 1b \n\t"
96 : "+r" (i)
97 : "r" (src), "r" (dst), "r" ((x86_reg) w - 15));
99 for (; i < w; i++)
100 dst[i + 0] += src[i + 0];
103 #endif /* HAVE_INLINE_ASM */
105 av_cold void ff_huffyuvdsp_init_x86(HuffYUVDSPContext *c)
107 int cpu_flags = av_get_cpu_flags();
109 #if HAVE_INLINE_ASM
110 #if HAVE_7REGS
111 if (cpu_flags & AV_CPU_FLAG_CMOV)
112 c->add_hfyu_median_pred = add_hfyu_median_pred_cmov;
113 #endif /* HAVE_7REGS */
115 if (INLINE_MMX(cpu_flags))
116 c->add_bytes = add_bytes_mmx;
117 #endif /* HAVE_INLINE_ASM */
119 if (EXTERNAL_MMXEXT(cpu_flags)) {
120 /* slower than cmov version on AMD */
121 if (!(cpu_flags & AV_CPU_FLAG_3DNOW))
122 c->add_hfyu_median_pred = ff_add_hfyu_median_pred_mmxext;
125 if (EXTERNAL_SSSE3(cpu_flags)) {
126 c->add_hfyu_left_pred = ff_add_hfyu_left_pred_ssse3;
129 if (EXTERNAL_SSSE3_FAST(cpu_flags)) {
130 c->add_hfyu_left_pred = ff_add_hfyu_left_pred_unaligned_ssse3;