avcodec/jpegxl_parse{,r}: fix integer overflow for some malformed files
[FFMpeg-mirror.git] / libavfilter / af_earwax.c
blob49a98787ec5ee8855702bbbbe803deb4d71df76c
1 /*
2 * Copyright (c) 2011 Mina Nagy Zaki
3 * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
4 * This source code is freely redistributable and may be used for any purpose.
5 * This copyright notice must be maintained. Edward Beingessner And Sundry
6 * Contributors are not responsible for the consequences of using this
7 * software.
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file
28 * Stereo Widening Effect. Adds audio cues to move stereo image in
29 * front of the listener. Adapted from the libsox earwax effect.
32 #include "libavutil/channel_layout.h"
33 #include "avfilter.h"
34 #include "audio.h"
35 #include "filters.h"
36 #include "formats.h"
38 #define NUMTAPS 32
40 static const int8_t filt[NUMTAPS * 2] = {
41 /* 30° 330° */
42 4, -6, /* 32 tap stereo FIR filter. */
43 4, -11, /* One side filters as if the */
44 -1, -5, /* signal was from 30 degrees */
45 3, 3, /* from the ear, the other as */
46 -2, 5, /* if 330 degrees. */
47 -5, 0,
48 9, 1,
49 6, 3, /* Input */
50 -4, -1, /* Left Right */
51 -5, -3, /* __________ __________ */
52 -2, -5, /* | | | | */
53 -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
54 6, -7, /* / |__________| |__________| \ */
55 30, -29, /* / \ / \ */
56 12, -3, /* / X \ */
57 -11, 4, /* / / \ \ */
58 -3, 7, /* ____V_____ __________V V__________ _____V____ */
59 -20, 23, /* | | | | | | | | */
60 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
61 1, -6, /* |__________| |__________| |__________| |__________| */
62 -14, -5, /* \ ___ / \ ___ / */
63 15, -18, /* \ / \ / _____ \ / \ / */
64 6, 7, /* `->| + |<--' / \ `-->| + |<-' */
65 15, -10, /* \___/ _/ \_ \___/ */
66 -14, 22, /* \ / \ / \ / */
67 -7, -2, /* `--->| | | |<---' */
68 -4, 9, /* \_/ \_/ */
69 6, -12, /* */
70 6, -6, /* Headphones */
71 0, -11,
72 0, -5,
73 4, 0};
75 typedef struct EarwaxContext {
76 int16_t filter[2][NUMTAPS];
77 int16_t taps[4][NUMTAPS * 2];
79 AVFrame *frame[2];
80 } EarwaxContext;
82 static int query_formats(const AVFilterContext *ctx,
83 AVFilterFormatsConfig **cfg_in,
84 AVFilterFormatsConfig **cfg_out)
86 static const enum AVSampleFormat formats[] = {
87 AV_SAMPLE_FMT_S16P,
88 AV_SAMPLE_FMT_NONE,
90 static const AVChannelLayout layouts[] = {
91 AV_CHANNEL_LAYOUT_STEREO,
92 { .nb_channels = 0 },
94 static const int sample_rates[] = { 44100, -1 };
96 int ret;
98 ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, formats);
99 if (ret < 0)
100 return ret;
102 ret = ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
103 if (ret < 0)
104 return ret;
106 ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, layouts);
107 if (ret < 0)
108 return ret;
110 return 0;
113 //FIXME: replace with DSPContext.scalarproduct_int16
114 static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin,
115 const int16_t *filt, int16_t *out)
117 int32_t sample;
118 int16_t j;
120 while (in < endin) {
121 sample = 0;
122 for (j = 0; j < NUMTAPS; j++)
123 sample += in[j] * filt[j];
124 *out = av_clip_int16(sample >> 7);
125 out++;
126 in++;
129 return out;
132 static int config_input(AVFilterLink *inlink)
134 EarwaxContext *s = inlink->dst->priv;
136 for (int i = 0; i < NUMTAPS; i++) {
137 s->filter[0][i] = filt[i * 2];
138 s->filter[1][i] = filt[i * 2 + 1];
141 return 0;
144 static void convolve(AVFilterContext *ctx, AVFrame *in,
145 int input_ch, int output_ch,
146 int filter_ch, int tap_ch)
148 EarwaxContext *s = ctx->priv;
149 int16_t *taps, *endin, *dst, *src;
150 int len;
152 taps = s->taps[tap_ch];
153 dst = (int16_t *)s->frame[input_ch]->data[output_ch];
154 src = (int16_t *)in->data[input_ch];
156 len = FFMIN(NUMTAPS, in->nb_samples);
157 // copy part of new input and process with saved input
158 memcpy(taps+NUMTAPS, src, len * sizeof(*taps));
159 dst = scalarproduct(taps, taps + len, s->filter[filter_ch], dst);
161 // process current input
162 if (in->nb_samples >= NUMTAPS) {
163 endin = src + in->nb_samples - NUMTAPS;
164 scalarproduct(src, endin, s->filter[filter_ch], dst);
166 // save part of input for next round
167 memcpy(taps, endin, NUMTAPS * sizeof(*taps));
168 } else {
169 memmove(taps, taps + in->nb_samples, NUMTAPS * sizeof(*taps));
173 static void mix(AVFilterContext *ctx, AVFrame *out,
174 int output_ch, int f0, int f1, int i0, int i1)
176 EarwaxContext *s = ctx->priv;
177 const int16_t *srcl = (const int16_t *)s->frame[f0]->data[i0];
178 const int16_t *srcr = (const int16_t *)s->frame[f1]->data[i1];
179 int16_t *dst = (int16_t *)out->data[output_ch];
181 for (int n = 0; n < out->nb_samples; n++)
182 dst[n] = av_clip_int16(srcl[n] + srcr[n]);
185 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
187 AVFilterContext *ctx = inlink->dst;
188 EarwaxContext *s = ctx->priv;
189 AVFilterLink *outlink = ctx->outputs[0];
190 AVFrame *out = ff_get_audio_buffer(outlink, in->nb_samples);
192 for (int ch = 0; ch < 2; ch++) {
193 if (!s->frame[ch] || s->frame[ch]->nb_samples < in->nb_samples) {
194 av_frame_free(&s->frame[ch]);
195 s->frame[ch] = ff_get_audio_buffer(outlink, in->nb_samples);
196 if (!s->frame[ch]) {
197 av_frame_free(&in);
198 av_frame_free(&out);
199 return AVERROR(ENOMEM);
204 if (!out) {
205 av_frame_free(&in);
206 return AVERROR(ENOMEM);
208 av_frame_copy_props(out, in);
210 convolve(ctx, in, 0, 0, 0, 0);
211 convolve(ctx, in, 0, 1, 1, 1);
212 convolve(ctx, in, 1, 0, 0, 2);
213 convolve(ctx, in, 1, 1, 1, 3);
215 mix(ctx, out, 0, 0, 1, 1, 0);
216 mix(ctx, out, 1, 0, 1, 0, 1);
218 av_frame_free(&in);
219 return ff_filter_frame(outlink, out);
222 static av_cold void uninit(AVFilterContext *ctx)
224 EarwaxContext *s = ctx->priv;
226 av_frame_free(&s->frame[0]);
227 av_frame_free(&s->frame[1]);
230 static const AVFilterPad earwax_inputs[] = {
232 .name = "default",
233 .type = AVMEDIA_TYPE_AUDIO,
234 .filter_frame = filter_frame,
235 .config_props = config_input,
239 const FFFilter ff_af_earwax = {
240 .p.name = "earwax",
241 .p.description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
242 .priv_size = sizeof(EarwaxContext),
243 .uninit = uninit,
244 FILTER_INPUTS(earwax_inputs),
245 FILTER_OUTPUTS(ff_audio_default_filterpad),
246 FILTER_QUERY_FUNC2(query_formats),