Merge commit 'origin/master' into ordered_chapters
[FFMpeg-mirror/ordered_chapters.git] / libavcodec / arm / mathops.h
blob2244fa19ae58c27ae7f3bfa05def729eba2aed31
1 /*
2 * simple math operations
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #ifndef AVCODEC_ARM_MATHOPS_H
23 #define AVCODEC_ARM_MATHOPS_H
25 #include <stdint.h>
26 #include "config.h"
27 #include "libavutil/common.h"
29 #if HAVE_INLINE_ASM
31 # define MULL MULL
32 static inline av_const int MULL(int a, int b, unsigned shift)
34 int lo, hi;
35 __asm__("smull %0, %1, %2, %3 \n\t"
36 "mov %0, %0, lsr %4 \n\t"
37 "add %1, %0, %1, lsl %5 \n\t"
38 : "=&r"(lo), "=&r"(hi)
39 : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift));
40 return hi;
43 #define MULH MULH
44 #if HAVE_ARMV6
45 static inline av_const int MULH(int a, int b)
47 int r;
48 __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
49 return r;
51 #else
52 static inline av_const int MULH(int a, int b)
54 int lo, hi;
55 __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
56 return hi;
58 #endif
60 static inline av_const int64_t MUL64(int a, int b)
62 union { uint64_t x; unsigned hl[2]; } x;
63 __asm__ ("smull %0, %1, %2, %3"
64 : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
65 return x.x;
67 #define MUL64 MUL64
69 static inline av_const int64_t MAC64(int64_t d, int a, int b)
71 union { uint64_t x; unsigned hl[2]; } x = { d };
72 __asm__ ("smlal %0, %1, %2, %3"
73 : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
74 return x.x;
76 #define MAC64(d, a, b) ((d) = MAC64(d, a, b))
77 #define MLS64(d, a, b) MAC64(d, -(a), b)
79 #if HAVE_ARMV5TE
81 /* signed 16x16 -> 32 multiply add accumulate */
82 # define MAC16(rt, ra, rb) \
83 __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
85 /* signed 16x16 -> 32 multiply */
86 # define MUL16 MUL16
87 static inline av_const int MUL16(int ra, int rb)
89 int rt;
90 __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
91 return rt;
94 #endif
96 #define mid_pred mid_pred
97 static inline av_const int mid_pred(int a, int b, int c)
99 int m;
100 __asm__ volatile (
101 "mov %0, %2 \n\t"
102 "cmp %1, %2 \n\t"
103 "movgt %0, %1 \n\t"
104 "movgt %1, %2 \n\t"
105 "cmp %1, %3 \n\t"
106 "movle %1, %3 \n\t"
107 "cmp %0, %1 \n\t"
108 "movgt %0, %1 \n\t"
109 : "=&r"(m), "+r"(a)
110 : "r"(b), "r"(c));
111 return m;
114 #endif /* HAVE_INLINE_ASM */
116 #endif /* AVCODEC_ARM_MATHOPS_H */