Use add_*flags only after compiler-specific configuration
[FFMpeg-mirror/ordered_chapters.git] / libavcodec / arm / mathops.h
blob2da9c1cab3844d247d00decfa5f83fa45b5c499f
1 /*
2 * simple math operations
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #ifndef AVCODEC_ARM_MATHOPS_H
23 #define AVCODEC_ARM_MATHOPS_H
25 #include <stdint.h>
26 #include "libavutil/common.h"
28 #if HAVE_INLINE_ASM
30 # define MULL MULL
31 static inline av_const int MULL(int a, int b, unsigned shift)
33 int lo, hi;
34 __asm__("smull %0, %1, %2, %3 \n\t"
35 "mov %0, %0, lsr %4 \n\t"
36 "add %1, %0, %1, lsl %5 \n\t"
37 : "=&r"(lo), "=&r"(hi)
38 : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift));
39 return hi;
42 #define MULH MULH
43 #if HAVE_ARMV6
44 static inline av_const int MULH(int a, int b)
46 int r;
47 __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
48 return r;
50 #else
51 static inline av_const int MULH(int a, int b)
53 int lo, hi;
54 __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
55 return hi;
57 #endif
59 static inline av_const int64_t MUL64(int a, int b)
61 union { uint64_t x; unsigned hl[2]; } x;
62 __asm__ ("smull %0, %1, %2, %3"
63 : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
64 return x.x;
66 #define MUL64 MUL64
68 static inline av_const int64_t MAC64(int64_t d, int a, int b)
70 union { uint64_t x; unsigned hl[2]; } x = { d };
71 __asm__ ("smlal %0, %1, %2, %3"
72 : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
73 return x.x;
75 #define MAC64(d, a, b) ((d) = MAC64(d, a, b))
76 #define MLS64(d, a, b) MAC64(d, -(a), b)
78 #if HAVE_ARMV5TE
80 /* signed 16x16 -> 32 multiply add accumulate */
81 # define MAC16(rt, ra, rb) \
82 __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
84 /* signed 16x16 -> 32 multiply */
85 # define MUL16 MUL16
86 static inline av_const int MUL16(int ra, int rb)
88 int rt;
89 __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
90 return rt;
93 #endif
95 #define mid_pred mid_pred
96 static inline av_const int mid_pred(int a, int b, int c)
98 int m;
99 __asm__ volatile (
100 "mov %0, %2 \n\t"
101 "cmp %1, %2 \n\t"
102 "movgt %0, %1 \n\t"
103 "movgt %1, %2 \n\t"
104 "cmp %1, %3 \n\t"
105 "movle %1, %3 \n\t"
106 "cmp %0, %1 \n\t"
107 "movgt %0, %1 \n\t"
108 : "=&r"(m), "+r"(a)
109 : "r"(b), "r"(c));
110 return m;
113 #endif /* HAVE_INLINE_ASM */
115 #endif /* AVCODEC_ARM_MATHOPS_H */