2 * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
3 * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
4 * Based on the process described by Martin Weston for BBC R&D
5 * Author of FFmpeg filter: Mark Himsley for BBC Broadcast Systems Development
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavutil/common.h"
25 #include "libavutil/imgutils.h"
26 #include "libavutil/mem.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
34 typedef struct W3FDIFContext
{
36 int filter
; ///< 0 is simple, 1 is more complex
37 int mode
; ///< 0 is frame, 1 is field
38 int parity
; ///< frame field parity
39 int deint
; ///< which frames to deinterlace
40 int linesize
[4]; ///< bytes of pixel data per line for each plane
41 int planeheight
[4]; ///< height of each plane
42 int field
; ///< which field are we on, 0 or 1
45 AVFrame
*prev
, *cur
, *next
; ///< previous, current, next frames
46 int32_t **work_line
; ///< lines we are calculating
53 #define OFFSET(x) offsetof(W3FDIFContext, x)
54 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
55 #define CONST(name, help, val, u) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, .unit = u }
57 static const AVOption w3fdif_options
[] = {
58 { "filter", "specify the filter", OFFSET(filter
), AV_OPT_TYPE_INT
, {.i64
=1}, 0, 1, FLAGS
, .unit
= "filter" },
59 CONST("simple", NULL
, 0, "filter"),
60 CONST("complex", NULL
, 1, "filter"),
61 { "mode", "specify the interlacing mode", OFFSET(mode
), AV_OPT_TYPE_INT
, {.i64
=1}, 0, 1, FLAGS
, .unit
= "mode"},
62 CONST("frame", "send one frame for each frame", 0, "mode"),
63 CONST("field", "send one frame for each field", 1, "mode"),
64 { "parity", "specify the assumed picture field parity", OFFSET(parity
), AV_OPT_TYPE_INT
, {.i64
=-1}, -1, 1, FLAGS
, .unit
= "parity" },
65 CONST("tff", "assume top field first", 0, "parity"),
66 CONST("bff", "assume bottom field first", 1, "parity"),
67 CONST("auto", "auto detect parity", -1, "parity"),
68 { "deint", "specify which frames to deinterlace", OFFSET(deint
), AV_OPT_TYPE_INT
, {.i64
=0}, 0, 1, FLAGS
, .unit
= "deint" },
69 CONST("all", "deinterlace all frames", 0, "deint"),
70 CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"),
74 AVFILTER_DEFINE_CLASS(w3fdif
);
76 static const enum AVPixelFormat pix_fmts
[] = {
77 AV_PIX_FMT_YUV410P
, AV_PIX_FMT_YUV411P
,
78 AV_PIX_FMT_YUV420P
, AV_PIX_FMT_YUV422P
,
79 AV_PIX_FMT_YUV440P
, AV_PIX_FMT_YUV444P
,
80 AV_PIX_FMT_YUVJ444P
, AV_PIX_FMT_YUVJ440P
,
81 AV_PIX_FMT_YUVJ422P
, AV_PIX_FMT_YUVJ420P
,
83 AV_PIX_FMT_YUVA420P
, AV_PIX_FMT_YUVA422P
, AV_PIX_FMT_YUVA444P
,
84 AV_PIX_FMT_GBRP
, AV_PIX_FMT_GBRAP
,
86 AV_PIX_FMT_GRAY9
, AV_PIX_FMT_GRAY10
, AV_PIX_FMT_GRAY12
, AV_PIX_FMT_GRAY14
, AV_PIX_FMT_GRAY16
,
87 AV_PIX_FMT_YUV420P9
, AV_PIX_FMT_YUV422P9
, AV_PIX_FMT_YUV444P9
,
88 AV_PIX_FMT_YUV420P10
, AV_PIX_FMT_YUV422P10
, AV_PIX_FMT_YUV444P10
,
90 AV_PIX_FMT_YUV420P12
, AV_PIX_FMT_YUV422P12
, AV_PIX_FMT_YUV444P12
,
92 AV_PIX_FMT_YUV420P14
, AV_PIX_FMT_YUV422P14
, AV_PIX_FMT_YUV444P14
,
93 AV_PIX_FMT_YUV420P16
, AV_PIX_FMT_YUV422P16
, AV_PIX_FMT_YUV444P16
,
94 AV_PIX_FMT_GBRP9
, AV_PIX_FMT_GBRP10
, AV_PIX_FMT_GBRP12
, AV_PIX_FMT_GBRP14
, AV_PIX_FMT_GBRP16
,
95 AV_PIX_FMT_YUVA444P9
, AV_PIX_FMT_YUVA444P10
, AV_PIX_FMT_YUVA444P12
, AV_PIX_FMT_YUVA444P16
,
96 AV_PIX_FMT_YUVA422P9
, AV_PIX_FMT_YUVA422P10
, AV_PIX_FMT_YUVA422P12
, AV_PIX_FMT_YUVA422P16
,
97 AV_PIX_FMT_YUVA420P9
, AV_PIX_FMT_YUVA420P10
, AV_PIX_FMT_YUVA420P16
,
98 AV_PIX_FMT_GBRAP10
, AV_PIX_FMT_GBRAP12
, AV_PIX_FMT_GBRAP16
,
102 static void filter_simple_low(int32_t *work_line
,
103 uint8_t *in_lines_cur
[2],
104 const int16_t *coef
, int linesize
)
108 for (i
= 0; i
< linesize
; i
++) {
109 *work_line
= *in_lines_cur
[0]++ * coef
[0];
110 *work_line
++ += *in_lines_cur
[1]++ * coef
[1];
114 static void filter_complex_low(int32_t *work_line
,
115 uint8_t *in_lines_cur
[4],
116 const int16_t *coef
, int linesize
)
120 for (i
= 0; i
< linesize
; i
++) {
121 *work_line
= *in_lines_cur
[0]++ * coef
[0];
122 *work_line
+= *in_lines_cur
[1]++ * coef
[1];
123 *work_line
+= *in_lines_cur
[2]++ * coef
[2];
124 *work_line
++ += *in_lines_cur
[3]++ * coef
[3];
128 static void filter_simple_high(int32_t *work_line
,
129 uint8_t *in_lines_cur
[3],
130 uint8_t *in_lines_adj
[3],
131 const int16_t *coef
, int linesize
)
135 for (i
= 0; i
< linesize
; i
++) {
136 *work_line
+= *in_lines_cur
[0]++ * coef
[0];
137 *work_line
+= *in_lines_adj
[0]++ * coef
[0];
138 *work_line
+= *in_lines_cur
[1]++ * coef
[1];
139 *work_line
+= *in_lines_adj
[1]++ * coef
[1];
140 *work_line
+= *in_lines_cur
[2]++ * coef
[2];
141 *work_line
++ += *in_lines_adj
[2]++ * coef
[2];
145 static void filter_complex_high(int32_t *work_line
,
146 uint8_t *in_lines_cur
[5],
147 uint8_t *in_lines_adj
[5],
148 const int16_t *coef
, int linesize
)
152 for (i
= 0; i
< linesize
; i
++) {
153 *work_line
+= *in_lines_cur
[0]++ * coef
[0];
154 *work_line
+= *in_lines_adj
[0]++ * coef
[0];
155 *work_line
+= *in_lines_cur
[1]++ * coef
[1];
156 *work_line
+= *in_lines_adj
[1]++ * coef
[1];
157 *work_line
+= *in_lines_cur
[2]++ * coef
[2];
158 *work_line
+= *in_lines_adj
[2]++ * coef
[2];
159 *work_line
+= *in_lines_cur
[3]++ * coef
[3];
160 *work_line
+= *in_lines_adj
[3]++ * coef
[3];
161 *work_line
+= *in_lines_cur
[4]++ * coef
[4];
162 *work_line
++ += *in_lines_adj
[4]++ * coef
[4];
166 static void filter_scale(uint8_t *out_pixel
, const int32_t *work_pixel
, int linesize
, int max
)
170 for (j
= 0; j
< linesize
; j
++, out_pixel
++, work_pixel
++)
171 *out_pixel
= av_clip(*work_pixel
, 0, 255 * 256 * 128) >> 15;
174 static void filter16_simple_low(int32_t *work_line
,
175 uint8_t *in_lines_cur8
[2],
176 const int16_t *coef
, int linesize
)
178 uint16_t *in_lines_cur
[2] = { (uint16_t *)in_lines_cur8
[0], (uint16_t *)in_lines_cur8
[1] };
182 for (i
= 0; i
< linesize
; i
++) {
183 *work_line
= *in_lines_cur
[0]++ * coef
[0];
184 *work_line
++ += *in_lines_cur
[1]++ * coef
[1];
188 static void filter16_complex_low(int32_t *work_line
,
189 uint8_t *in_lines_cur8
[4],
190 const int16_t *coef
, int linesize
)
192 uint16_t *in_lines_cur
[4] = { (uint16_t *)in_lines_cur8
[0],
193 (uint16_t *)in_lines_cur8
[1],
194 (uint16_t *)in_lines_cur8
[2],
195 (uint16_t *)in_lines_cur8
[3] };
199 for (i
= 0; i
< linesize
; i
++) {
200 *work_line
= *in_lines_cur
[0]++ * coef
[0];
201 *work_line
+= *in_lines_cur
[1]++ * coef
[1];
202 *work_line
+= *in_lines_cur
[2]++ * coef
[2];
203 *work_line
++ += *in_lines_cur
[3]++ * coef
[3];
207 static void filter16_simple_high(int32_t *work_line
,
208 uint8_t *in_lines_cur8
[3],
209 uint8_t *in_lines_adj8
[3],
210 const int16_t *coef
, int linesize
)
212 uint16_t *in_lines_cur
[3] = { (uint16_t *)in_lines_cur8
[0],
213 (uint16_t *)in_lines_cur8
[1],
214 (uint16_t *)in_lines_cur8
[2] };
215 uint16_t *in_lines_adj
[3] = { (uint16_t *)in_lines_adj8
[0],
216 (uint16_t *)in_lines_adj8
[1],
217 (uint16_t *)in_lines_adj8
[2] };
221 for (i
= 0; i
< linesize
; i
++) {
222 *work_line
+= *in_lines_cur
[0]++ * coef
[0];
223 *work_line
+= *in_lines_adj
[0]++ * coef
[0];
224 *work_line
+= *in_lines_cur
[1]++ * coef
[1];
225 *work_line
+= *in_lines_adj
[1]++ * coef
[1];
226 *work_line
+= *in_lines_cur
[2]++ * coef
[2];
227 *work_line
++ += *in_lines_adj
[2]++ * coef
[2];
231 static void filter16_complex_high(int32_t *work_line
,
232 uint8_t *in_lines_cur8
[5],
233 uint8_t *in_lines_adj8
[5],
234 const int16_t *coef
, int linesize
)
236 uint16_t *in_lines_cur
[5] = { (uint16_t *)in_lines_cur8
[0],
237 (uint16_t *)in_lines_cur8
[1],
238 (uint16_t *)in_lines_cur8
[2],
239 (uint16_t *)in_lines_cur8
[3],
240 (uint16_t *)in_lines_cur8
[4] };
241 uint16_t *in_lines_adj
[5] = { (uint16_t *)in_lines_adj8
[0],
242 (uint16_t *)in_lines_adj8
[1],
243 (uint16_t *)in_lines_adj8
[2],
244 (uint16_t *)in_lines_adj8
[3],
245 (uint16_t *)in_lines_adj8
[4] };
249 for (i
= 0; i
< linesize
; i
++) {
250 *work_line
+= *in_lines_cur
[0]++ * coef
[0];
251 *work_line
+= *in_lines_adj
[0]++ * coef
[0];
252 *work_line
+= *in_lines_cur
[1]++ * coef
[1];
253 *work_line
+= *in_lines_adj
[1]++ * coef
[1];
254 *work_line
+= *in_lines_cur
[2]++ * coef
[2];
255 *work_line
+= *in_lines_adj
[2]++ * coef
[2];
256 *work_line
+= *in_lines_cur
[3]++ * coef
[3];
257 *work_line
+= *in_lines_adj
[3]++ * coef
[3];
258 *work_line
+= *in_lines_cur
[4]++ * coef
[4];
259 *work_line
++ += *in_lines_adj
[4]++ * coef
[4];
263 static void filter16_scale(uint8_t *out_pixel8
, const int32_t *work_pixel
, int linesize
, int max
)
265 uint16_t *out_pixel
= (uint16_t *)out_pixel8
;
269 for (j
= 0; j
< linesize
; j
++, out_pixel
++, work_pixel
++)
270 *out_pixel
= av_clip(*work_pixel
, 0, max
) >> 15;
273 static int config_input(AVFilterLink
*inlink
)
275 AVFilterContext
*ctx
= inlink
->dst
;
276 W3FDIFContext
*s
= ctx
->priv
;
277 const AVPixFmtDescriptor
*desc
= av_pix_fmt_desc_get(inlink
->format
);
278 int ret
, i
, depth
, nb_threads
;
280 if ((ret
= av_image_fill_linesizes(s
->linesize
, inlink
->format
, inlink
->w
)) < 0)
283 s
->planeheight
[1] = s
->planeheight
[2] = AV_CEIL_RSHIFT(inlink
->h
, desc
->log2_chroma_h
);
284 s
->planeheight
[0] = s
->planeheight
[3] = inlink
->h
;
287 av_log(ctx
, AV_LOG_ERROR
, "Video of less than 3 lines is not supported\n");
288 return AVERROR(EINVAL
);
291 s
->nb_planes
= av_pix_fmt_count_planes(inlink
->format
);
292 nb_threads
= ff_filter_get_nb_threads(ctx
);
293 s
->work_line
= av_calloc(nb_threads
, sizeof(*s
->work_line
));
295 return AVERROR(ENOMEM
);
296 s
->nb_threads
= nb_threads
;
298 for (i
= 0; i
< s
->nb_threads
; i
++) {
299 s
->work_line
[i
] = av_calloc(FFALIGN(s
->linesize
[0], 32), sizeof(*s
->work_line
[0]));
300 if (!s
->work_line
[i
])
301 return AVERROR(ENOMEM
);
304 depth
= desc
->comp
[0].depth
;
305 s
->max
= ((1 << depth
) - 1) * 256 * 128;
307 s
->dsp
.filter_simple_low
= filter_simple_low
;
308 s
->dsp
.filter_complex_low
= filter_complex_low
;
309 s
->dsp
.filter_simple_high
= filter_simple_high
;
310 s
->dsp
.filter_complex_high
= filter_complex_high
;
311 s
->dsp
.filter_scale
= filter_scale
;
313 s
->dsp
.filter_simple_low
= filter16_simple_low
;
314 s
->dsp
.filter_complex_low
= filter16_complex_low
;
315 s
->dsp
.filter_simple_high
= filter16_simple_high
;
316 s
->dsp
.filter_complex_high
= filter16_complex_high
;
317 s
->dsp
.filter_scale
= filter16_scale
;
321 ff_w3fdif_init_x86(&s
->dsp
, depth
);
327 static int config_output(AVFilterLink
*outlink
)
329 AVFilterContext
*ctx
= outlink
->src
;
330 AVFilterLink
*inlink
= ctx
->inputs
[0];
331 FilterLink
*il
= ff_filter_link(inlink
);
332 FilterLink
*ol
= ff_filter_link(outlink
);
333 W3FDIFContext
*s
= ctx
->priv
;
335 outlink
->time_base
= av_mul_q(inlink
->time_base
, (AVRational
){1, 2});
337 ol
->frame_rate
= av_mul_q(il
->frame_rate
, (AVRational
){2, 1});
343 * Filter coefficients from PH-2071, scaled by 256 * 128.
344 * Each set of coefficients has a set for low-frequencies and high-frequencies.
345 * n_coef_lf[] and n_coef_hf[] are the number of coefs for simple and more-complex.
346 * It is important for later that n_coef_lf[] is even and n_coef_hf[] is odd.
347 * coef_lf[][] and coef_hf[][] are the coefficients for low-frequencies
348 * and high-frequencies for simple and more-complex mode.
350 static const int8_t n_coef_lf
[2] = { 2, 4 };
351 static const int16_t coef_lf
[2][4] = {{ 16384, 16384, 0, 0},
352 { -852, 17236, 17236, -852}};
353 static const int8_t n_coef_hf
[2] = { 3, 5 };
354 static const int16_t coef_hf
[2][5] = {{ -2048, 4096, -2048, 0, 0},
355 { 1016, -3801, 5570, -3801, 1016}};
357 typedef struct ThreadData
{
358 AVFrame
*out
, *cur
, *adj
;
361 static int deinterlace_plane_slice(AVFilterContext
*ctx
, void *arg
,
362 int jobnr
, int nb_jobs
, int plane
)
364 W3FDIFContext
*s
= ctx
->priv
;
365 ThreadData
*td
= arg
;
366 AVFrame
*out
= td
->out
;
367 AVFrame
*cur
= td
->cur
;
368 AVFrame
*adj
= td
->adj
;
369 const int filter
= s
->filter
;
370 uint8_t *in_line
, *in_lines_cur
[5], *in_lines_adj
[5];
371 uint8_t *out_line
, *out_pixel
;
372 int32_t *work_line
, *work_pixel
;
373 uint8_t *cur_data
= cur
->data
[plane
];
374 uint8_t *adj_data
= adj
->data
[plane
];
375 uint8_t *dst_data
= out
->data
[plane
];
376 const int linesize
= s
->linesize
[plane
];
377 const int height
= s
->planeheight
[plane
];
378 const int cur_line_stride
= cur
->linesize
[plane
];
379 const int adj_line_stride
= adj
->linesize
[plane
];
380 const int dst_line_stride
= out
->linesize
[plane
];
381 const int start
= (height
* jobnr
) / nb_jobs
;
382 const int end
= (height
* (jobnr
+1)) / nb_jobs
;
383 const int max
= s
->max
;
384 const int interlaced
= !!(cur
->flags
& AV_FRAME_FLAG_INTERLACED
);
385 const int tff
= s
->field
== (s
->parity
== -1 ? interlaced
? !!(cur
->flags
& AV_FRAME_FLAG_TOP_FIELD_FIRST
) : 1 :
389 /* copy unchanged the lines of the field */
390 y_out
= start
+ (tff
^ (start
& 1));
392 in_line
= cur_data
+ (y_out
* cur_line_stride
);
393 out_line
= dst_data
+ (y_out
* dst_line_stride
);
395 while (y_out
< end
) {
396 memcpy(out_line
, in_line
, linesize
);
398 in_line
+= cur_line_stride
* 2;
399 out_line
+= dst_line_stride
* 2;
402 /* interpolate other lines of the field */
403 y_out
= start
+ ((!tff
) ^ (start
& 1));
405 out_line
= dst_data
+ (y_out
* dst_line_stride
);
407 while (y_out
< end
) {
408 /* get low vertical frequencies from current field */
409 for (j
= 0; j
< n_coef_lf
[filter
]; j
++) {
410 y_in
= (y_out
+ 1) + (j
* 2) - n_coef_lf
[filter
];
414 while (y_in
>= height
)
417 in_lines_cur
[j
] = cur_data
+ (y_in
* cur_line_stride
);
420 work_line
= s
->work_line
[jobnr
];
421 switch (n_coef_lf
[filter
]) {
423 s
->dsp
.filter_simple_low(work_line
, in_lines_cur
,
424 coef_lf
[filter
], linesize
);
427 s
->dsp
.filter_complex_low(work_line
, in_lines_cur
,
428 coef_lf
[filter
], linesize
);
431 /* get high vertical frequencies from adjacent fields */
432 for (j
= 0; j
< n_coef_hf
[filter
]; j
++) {
433 y_in
= (y_out
+ 1) + (j
* 2) - n_coef_hf
[filter
];
437 while (y_in
>= height
)
440 in_lines_cur
[j
] = cur_data
+ (y_in
* cur_line_stride
);
441 in_lines_adj
[j
] = adj_data
+ (y_in
* adj_line_stride
);
444 work_line
= s
->work_line
[jobnr
];
445 switch (n_coef_hf
[filter
]) {
447 s
->dsp
.filter_simple_high(work_line
, in_lines_cur
, in_lines_adj
,
448 coef_hf
[filter
], linesize
);
451 s
->dsp
.filter_complex_high(work_line
, in_lines_cur
, in_lines_adj
,
452 coef_hf
[filter
], linesize
);
455 /* save scaled result to the output frame, scaling down by 256 * 128 */
456 work_pixel
= s
->work_line
[jobnr
];
457 out_pixel
= out_line
;
459 s
->dsp
.filter_scale(out_pixel
, work_pixel
, linesize
, max
);
461 /* move on to next line */
463 out_line
+= dst_line_stride
* 2;
469 static int deinterlace_slice(AVFilterContext
*ctx
, void *arg
,
470 int jobnr
, int nb_jobs
)
472 W3FDIFContext
*s
= ctx
->priv
;
474 for (int p
= 0; p
< s
->nb_planes
; p
++)
475 deinterlace_plane_slice(ctx
, arg
, jobnr
, nb_jobs
, p
);
480 static int filter(AVFilterContext
*ctx
, int is_second
)
482 W3FDIFContext
*s
= ctx
->priv
;
483 AVFilterLink
*outlink
= ctx
->outputs
[0];
487 out
= ff_get_video_buffer(outlink
, outlink
->w
, outlink
->h
);
489 return AVERROR(ENOMEM
);
490 av_frame_copy_props(out
, s
->cur
);
491 #if FF_API_INTERLACED_FRAME
492 FF_DISABLE_DEPRECATION_WARNINGS
493 out
->interlaced_frame
= 0;
494 FF_ENABLE_DEPRECATION_WARNINGS
496 out
->flags
&= ~AV_FRAME_FLAG_INTERLACED
;
499 if (out
->pts
!= AV_NOPTS_VALUE
)
502 int64_t cur_pts
= s
->cur
->pts
;
503 int64_t next_pts
= s
->next
->pts
;
505 if (next_pts
!= AV_NOPTS_VALUE
&& cur_pts
!= AV_NOPTS_VALUE
) {
506 out
->pts
= cur_pts
+ next_pts
;
508 out
->pts
= AV_NOPTS_VALUE
;
512 adj
= s
->field
? s
->next
: s
->prev
;
513 td
.out
= out
; td
.cur
= s
->cur
; td
.adj
= adj
;
514 ff_filter_execute(ctx
, deinterlace_slice
, &td
, NULL
,
515 FFMIN(s
->planeheight
[1], s
->nb_threads
));
518 s
->field
= !s
->field
;
520 return ff_filter_frame(outlink
, out
);
523 static int filter_frame(AVFilterLink
*inlink
, AVFrame
*frame
)
525 AVFilterContext
*ctx
= inlink
->dst
;
526 W3FDIFContext
*s
= ctx
->priv
;
529 av_frame_free(&s
->prev
);
535 s
->cur
= av_frame_clone(s
->next
);
537 return AVERROR(ENOMEM
);
543 if ((s
->deint
&& !(s
->cur
->flags
& AV_FRAME_FLAG_INTERLACED
)) || ctx
->is_disabled
) {
544 AVFrame
*out
= av_frame_clone(s
->cur
);
546 return AVERROR(ENOMEM
);
548 av_frame_free(&s
->prev
);
549 if (out
->pts
!= AV_NOPTS_VALUE
)
551 return ff_filter_frame(ctx
->outputs
[0], out
);
554 ret
= filter(ctx
, 0);
555 if (ret
< 0 || s
->mode
== 0)
558 return filter(ctx
, 1);
561 static int request_frame(AVFilterLink
*outlink
)
563 AVFilterContext
*ctx
= outlink
->src
;
564 W3FDIFContext
*s
= ctx
->priv
;
570 ret
= ff_request_frame(ctx
->inputs
[0]);
572 if (ret
== AVERROR_EOF
&& s
->cur
) {
573 AVFrame
*next
= av_frame_clone(s
->next
);
575 return AVERROR(ENOMEM
);
576 next
->pts
= s
->next
->pts
* 2 - s
->cur
->pts
;
577 filter_frame(ctx
->inputs
[0], next
);
579 } else if (ret
< 0) {
586 static av_cold
void uninit(AVFilterContext
*ctx
)
588 W3FDIFContext
*s
= ctx
->priv
;
591 av_frame_free(&s
->prev
);
592 av_frame_free(&s
->cur
);
593 av_frame_free(&s
->next
);
595 for (i
= 0; i
< s
->nb_threads
; i
++)
596 av_freep(&s
->work_line
[i
]);
598 av_freep(&s
->work_line
);
601 static const AVFilterPad w3fdif_inputs
[] = {
604 .type
= AVMEDIA_TYPE_VIDEO
,
605 .filter_frame
= filter_frame
,
606 .config_props
= config_input
,
610 static const AVFilterPad w3fdif_outputs
[] = {
613 .type
= AVMEDIA_TYPE_VIDEO
,
614 .config_props
= config_output
,
615 .request_frame
= request_frame
,
619 const FFFilter ff_vf_w3fdif
= {
621 .p
.description
= NULL_IF_CONFIG_SMALL("Apply Martin Weston three field deinterlace."),
622 .p
.priv_class
= &w3fdif_class
,
623 .p
.flags
= AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
| AVFILTER_FLAG_SLICE_THREADS
,
624 .priv_size
= sizeof(W3FDIFContext
),
626 FILTER_INPUTS(w3fdif_inputs
),
627 FILTER_OUTPUTS(w3fdif_outputs
),
628 FILTER_PIXFMTS_ARRAY(pix_fmts
),
629 .process_command
= ff_filter_process_command
,