FFmpeg  4.3.8
vf_fade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Brandon Mintern
3  * Copyright (c) 2007 Bobby Bingham
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video fade filter
25  * based heavily on vf_negate.c by Bobby Bingham
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "avfilter.h"
35 #include "drawutils.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #define R 0
41 #define G 1
42 #define B 2
43 #define A 3
44 
45 #define Y 0
46 #define U 1
47 #define V 2
48 
49 #define FADE_IN 0
50 #define FADE_OUT 1
51 
52 typedef struct FadeContext {
53  const AVClass *class;
54  int type;
57  int hsub, vsub, bpp, depth;
62  int alpha;
63  int is_planar;
64  uint64_t start_time, duration;
66  uint8_t color_rgba[4]; ///< fade color
67  int black_fade; ///< if color_rgba is black
68  int (*filter_slice_luma)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
69  int (*filter_slice_chroma)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
70  int (*filter_slice_alpha)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
71 } FadeContext;
72 
74 {
75  FadeContext *s = ctx->priv;
76 
77  s->fade_per_frame = (1 << 16) / s->nb_frames;
79 
80  if (s->duration != 0) {
81  // If duration (seconds) is non-zero, assume that we are not fading based on frames
82  s->nb_frames = 0; // Mostly to clean up logging
83  }
84 
85  // Choose what to log. If both time-based and frame-based options, both lines will be in the log
86  if (s->start_frame || s->nb_frames) {
88  "type:%s start_frame:%d nb_frames:%d alpha:%d\n",
89  s->type == FADE_IN ? "in" : "out", s->start_frame,
90  s->nb_frames,s->alpha);
91  }
92  if (s->start_time || s->duration) {
94  "type:%s start_time:%f duration:%f alpha:%d\n",
95  s->type == FADE_IN ? "in" : "out", (s->start_time / (double)AV_TIME_BASE),
96  (s->duration / (double)AV_TIME_BASE),s->alpha);
97  }
98 
99  s->black_fade = !memcmp(s->color_rgba, "\x00\x00\x00\xff", 4);
100  return 0;
101 }
102 
104 {
105  const FadeContext *s = ctx->priv;
106  static const enum AVPixelFormat pix_fmts[] = {
126  };
127  static const enum AVPixelFormat pix_fmts_rgb[] = {
132  AV_PIX_FMT_NONE
133  };
134  static const enum AVPixelFormat pix_fmts_alpha[] = {
143  AV_PIX_FMT_NONE
144  };
145  static const enum AVPixelFormat pix_fmts_rgba[] = {
149  AV_PIX_FMT_NONE
150  };
151  AVFilterFormats *fmts_list;
152 
153  if (s->alpha) {
154  if (s->black_fade)
155  fmts_list = ff_make_format_list(pix_fmts_alpha);
156  else
157  fmts_list = ff_make_format_list(pix_fmts_rgba);
158  } else {
159  if (s->black_fade)
160  fmts_list = ff_make_format_list(pix_fmts);
161  else
162  fmts_list = ff_make_format_list(pix_fmts_rgb);
163  }
164  if (!fmts_list)
165  return AVERROR(ENOMEM);
166  return ff_set_common_formats(ctx, fmts_list);
167 }
168 
169 const static enum AVPixelFormat studio_level_pix_fmts[] = {
183 };
184 
186  int slice_start, int slice_end,
187  int do_alpha, int step)
188 {
189  int i, j;
190  const uint8_t r_idx = s->rgba_map[R];
191  const uint8_t g_idx = s->rgba_map[G];
192  const uint8_t b_idx = s->rgba_map[B];
193  const uint8_t a_idx = s->rgba_map[A];
194  const uint8_t *c = s->color_rgba;
195 
196  for (i = slice_start; i < slice_end; i++) {
197  uint8_t *p = frame->data[0] + i * frame->linesize[0];
198  for (j = 0; j < frame->width; j++) {
199 #define INTERP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)p[c_name] - (int)c[c_idx]) * s->factor + (1<<15)) >> 16)
200  p[r_idx] = INTERP(r_idx, 0);
201  p[g_idx] = INTERP(g_idx, 1);
202  p[b_idx] = INTERP(b_idx, 2);
203  if (do_alpha)
204  p[a_idx] = INTERP(a_idx, 3);
205  p += step;
206  }
207  }
208 }
209 
211  int slice_start, int slice_end,
212  int do_alpha)
213 {
214  int i, j;
215  const uint8_t *c = s->color_rgba;
216 
217  for (i = slice_start; i < slice_end; i++) {
218  uint8_t *pg = frame->data[0] + i * frame->linesize[0];
219  uint8_t *pb = frame->data[1] + i * frame->linesize[1];
220  uint8_t *pr = frame->data[2] + i * frame->linesize[2];
221  uint8_t *pa = frame->data[3] + i * frame->linesize[3];
222  for (j = 0; j < frame->width; j++) {
223 #define INTERPP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)c_name - (int)c[c_idx]) * s->factor + (1<<15)) >> 16)
224  pr[j] = INTERPP(pr[j], 0);
225  pg[j] = INTERPP(pg[j], 1);
226  pb[j] = INTERPP(pb[j], 2);
227  if (do_alpha)
228  pa[j] = INTERPP(pa[j], 3);
229  }
230  }
231 }
232 
233 static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr,
234  int nb_jobs)
235 {
236  FadeContext *s = ctx->priv;
237  AVFrame *frame = arg;
238  int slice_start = (frame->height * jobnr ) / nb_jobs;
239  int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
240 
241  if (s->is_planar && s->alpha)
242  filter_rgb_planar(s, frame, slice_start, slice_end, 1);
243  else if (s->is_planar)
244  filter_rgb_planar(s, frame, slice_start, slice_end, 0);
245  else if (s->alpha) filter_rgb(s, frame, slice_start, slice_end, 1, 4);
246  else if (s->bpp == 3) filter_rgb(s, frame, slice_start, slice_end, 0, 3);
247  else if (s->bpp == 4) filter_rgb(s, frame, slice_start, slice_end, 0, 4);
248  else av_assert0(0);
249 
250  return 0;
251 }
252 
253 static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
254  int nb_jobs)
255 {
256  FadeContext *s = ctx->priv;
257  AVFrame *frame = arg;
258  int slice_start = (frame->height * jobnr ) / nb_jobs;
259  int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
260  int i, j;
261 
262  for (int k = 0; k < 1 + 2 * (s->is_planar && s->is_rgb); k++) {
263  for (i = slice_start; i < slice_end; i++) {
264  uint8_t *p = frame->data[k] + i * frame->linesize[k];
265  for (j = 0; j < frame->width * s->bpp; j++) {
266  /* s->factor is using 16 lower-order bits for decimal
267  * places. 32768 = 1 << 15, it is an integer representation
268  * of 0.5 and is for rounding. */
269  *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
270  p++;
271  }
272  }
273  }
274 
275  return 0;
276 }
277 
278 static int filter_slice_luma16(AVFilterContext *ctx, void *arg, int jobnr,
279  int nb_jobs)
280 {
281  FadeContext *s = ctx->priv;
282  AVFrame *frame = arg;
283  int slice_start = (frame->height * jobnr ) / nb_jobs;
284  int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
285  int i, j;
286 
287  for (int k = 0; k < 1 + 2 * (s->is_planar && s->is_rgb); k++) {
288  for (i = slice_start; i < slice_end; i++) {
289  uint16_t *p = (uint16_t *)(frame->data[k] + i * frame->linesize[k]);
290  for (j = 0; j < frame->width * s->bpp; j++) {
291  /* s->factor is using 16 lower-order bits for decimal
292  * places. 32768 = 1 << 15, it is an integer representation
293  * of 0.5 and is for rounding. */
294  *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
295  p++;
296  }
297  }
298  }
299 
300  return 0;
301 }
302 
303 static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
304  int nb_jobs)
305 {
306  FadeContext *s = ctx->priv;
307  AVFrame *frame = arg;
308  int i, j, plane;
309  const int width = AV_CEIL_RSHIFT(frame->width, s->hsub);
310  const int height= AV_CEIL_RSHIFT(frame->height, s->vsub);
311  int slice_start = (height * jobnr ) / nb_jobs;
312  int slice_end = FFMIN(((height * (jobnr+1)) / nb_jobs), frame->height);
313 
314  for (plane = 1; plane < 3; plane++) {
315  for (i = slice_start; i < slice_end; i++) {
316  uint8_t *p = frame->data[plane] + i * frame->linesize[plane];
317  for (j = 0; j < width; j++) {
318  /* 8421367 = ((128 << 1) + 1) << 15. It is an integer
319  * representation of 128.5. The .5 is for rounding
320  * purposes. */
321  *p = ((*p - 128) * s->factor + 8421367) >> 16;
322  p++;
323  }
324  }
325  }
326 
327  return 0;
328 }
329 
330 static int filter_slice_chroma16(AVFilterContext *ctx, void *arg, int jobnr,
331  int nb_jobs)
332 {
333  FadeContext *s = ctx->priv;
334  AVFrame *frame = arg;
335  int i, j, plane;
336  const int width = AV_CEIL_RSHIFT(frame->width, s->hsub);
337  const int height= AV_CEIL_RSHIFT(frame->height, s->vsub);
338  const int mid = 1 << (s->depth - 1);
339  const int add = ((mid << 1) + 1) << 15;
340  int slice_start = (height * jobnr ) / nb_jobs;
341  int slice_end = FFMIN(((height * (jobnr+1)) / nb_jobs), frame->height);
342 
343  for (plane = 1; plane < 3; plane++) {
344  for (i = slice_start; i < slice_end; i++) {
345  uint16_t *p = (uint16_t *)(frame->data[plane] + i * frame->linesize[plane]);
346  for (j = 0; j < width; j++) {
347  *p = ((*p - mid) * s->factor + add) >> 16;
348  p++;
349  }
350  }
351  }
352 
353  return 0;
354 }
355 
356 static int filter_slice_alpha(AVFilterContext *ctx, void *arg, int jobnr,
357  int nb_jobs)
358 {
359  FadeContext *s = ctx->priv;
360  AVFrame *frame = arg;
361  int plane = s->is_packed_rgb ? 0 : A;
362  int slice_start = (frame->height * jobnr ) / nb_jobs;
363  int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
364  int i, j;
365 
366  for (i = slice_start; i < slice_end; i++) {
367  uint8_t *p = frame->data[plane] + i * frame->linesize[plane] + s->is_packed_rgb*s->rgba_map[A];
368  int step = s->is_packed_rgb ? 4 : 1;
369  for (j = 0; j < frame->width; j++) {
370  /* s->factor is using 16 lower-order bits for decimal
371  * places. 32768 = 1 << 15, it is an integer representation
372  * of 0.5 and is for rounding. */
373  *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
374  p += step;
375  }
376  }
377 
378  return 0;
379 }
380 
381 static int filter_slice_alpha16(AVFilterContext *ctx, void *arg, int jobnr,
382  int nb_jobs)
383 {
384  FadeContext *s = ctx->priv;
385  AVFrame *frame = arg;
386  int plane = s->is_packed_rgb ? 0 : A;
387  int slice_start = (frame->height * jobnr ) / nb_jobs;
388  int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
389  int i, j;
390 
391  for (i = slice_start; i < slice_end; i++) {
392  uint16_t *p = (uint16_t *)(frame->data[plane] + i * frame->linesize[plane]) + s->is_packed_rgb*s->rgba_map[A];
393  int step = s->is_packed_rgb ? 4 : 1;
394  for (j = 0; j < frame->width; j++) {
395  /* s->factor is using 16 lower-order bits for decimal
396  * places. 32768 = 1 << 15, it is an integer representation
397  * of 0.5 and is for rounding. */
398  *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
399  p += step;
400  }
401  }
402 
403  return 0;
404 }
405 
406 static int config_props(AVFilterLink *inlink)
407 {
408  FadeContext *s = inlink->dst->priv;
409  const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
410 
411  s->hsub = pixdesc->log2_chroma_w;
412  s->vsub = pixdesc->log2_chroma_h;
413 
414  ff_fill_rgba_map(s->rgba_map, inlink->format);
415 
416  s->depth = pixdesc->comp[0].depth;
417  s->bpp = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR ?
418  1 :
419  av_get_bits_per_pixel(pixdesc) >> 3;
420  s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA);
421  s->is_planar = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR;
422  s->is_rgb = pixdesc->flags & AV_PIX_FMT_FLAG_RGB;
423  s->is_packed_rgb = !s->is_planar && s->is_rgb;
424 
425  /* use CCIR601/709 black level for studio-level pixel non-alpha components */
426  s->black_level =
427  ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !s->alpha ? 16 * (1 << (s->depth - 8)): 0;
428  /* 32768 = 1 << 15, it is an integer representation
429  * of 0.5 and is for rounding. */
430  s->black_level_scaled = (s->black_level << 16) + 32768;
431 
435 
436  return 0;
437 }
438 
439 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
440 {
441  AVFilterContext *ctx = inlink->dst;
442  FadeContext *s = ctx->priv;
443  double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base);
444 
445  // Calculate Fade assuming this is a Fade In
446  if (s->fade_state == VF_FADE_WAITING) {
447  s->factor=0;
448  if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE
449  && inlink->frame_count_out >= s->start_frame) {
450  // Time to start fading
452 
453  // Save start time in case we are starting based on frames and fading based on time
454  if (s->start_time == 0 && s->start_frame != 0) {
455  s->start_time = frame_timestamp*(double)AV_TIME_BASE;
456  }
457 
458  // Save start frame in case we are starting based on time and fading based on frames
459  if (s->start_time != 0 && s->start_frame == 0) {
460  s->start_frame = inlink->frame_count_out;
461  }
462  }
463  }
464  if (s->fade_state == VF_FADE_FADING) {
465  if (s->duration == 0) {
466  // Fading based on frame count
467  s->factor = (inlink->frame_count_out - s->start_frame) * s->fade_per_frame;
468  if (inlink->frame_count_out > s->start_frame + s->nb_frames) {
470  }
471 
472  } else {
473  // Fading based on duration
474  s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE)
475  * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
476  if (frame_timestamp > s->start_time/(double)AV_TIME_BASE
477  + s->duration/(double)AV_TIME_BASE) {
479  }
480  }
481  }
482  if (s->fade_state == VF_FADE_DONE) {
483  s->factor=UINT16_MAX;
484  }
485 
486  s->factor = av_clip_uint16(s->factor);
487 
488  // Invert fade_factor if Fading Out
489  if (s->type == FADE_OUT) {
490  s->factor=UINT16_MAX-s->factor;
491  }
492 
493  if (s->factor < UINT16_MAX) {
494  if (s->alpha) {
495  ctx->internal->execute(ctx, s->filter_slice_alpha, frame, NULL,
496  FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
497  } else if (s->is_rgb && !s->black_fade) {
498  ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL,
499  FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
500  } else {
501  /* luma, or rgb plane in case of black */
502  ctx->internal->execute(ctx, s->filter_slice_luma, frame, NULL,
503  FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
504 
505  if (frame->data[1] && frame->data[2] && !s->is_rgb) {
506  /* chroma planes */
507  ctx->internal->execute(ctx, s->filter_slice_chroma, frame, NULL,
508  FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
509  }
510  }
511  }
512 
513  return ff_filter_frame(inlink->dst->outputs[0], frame);
514 }
515 
516 
517 #define OFFSET(x) offsetof(FadeContext, x)
518 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
519 
520 static const AVOption fade_options[] = {
521  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
522  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
523  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_IN }, .flags = FLAGS, .unit = "type" },
524  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_OUT }, .flags = FLAGS, .unit = "type" },
525  { "start_frame", "Number of the first frame to which to apply the effect.",
526  OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
527  { "s", "Number of the first frame to which to apply the effect.",
528  OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
529  { "nb_frames", "Number of frames to which the effect should be applied.",
530  OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 1, INT_MAX, FLAGS },
531  { "n", "Number of frames to which the effect should be applied.",
532  OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 1, INT_MAX, FLAGS },
533  { "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, FLAGS },
534  { "start_time", "Number of seconds of the beginning of the effect.",
535  OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
536  { "st", "Number of seconds of the beginning of the effect.",
537  OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
538  { "duration", "Duration of the effect in seconds.",
539  OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
540  { "d", "Duration of the effect in seconds.",
541  OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
542  { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, 0, 0, FLAGS },
543  { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, 0, 0, FLAGS },
544  { NULL }
545 };
546 
548 
550  {
551  .name = "default",
552  .type = AVMEDIA_TYPE_VIDEO,
553  .config_props = config_props,
554  .filter_frame = filter_frame,
555  .needs_writable = 1,
556  },
557  { NULL }
558 };
559 
561  {
562  .name = "default",
563  .type = AVMEDIA_TYPE_VIDEO,
564  },
565  { NULL }
566 };
567 
569  .name = "fade",
570  .description = NULL_IF_CONFIG_SMALL("Fade in/out input video."),
571  .init = init,
572  .priv_size = sizeof(FadeContext),
573  .priv_class = &fade_class,
575  .inputs = avfilter_vf_fade_inputs,
576  .outputs = avfilter_vf_fade_outputs,
578 };
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:440
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:432
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
static const AVFilterPad avfilter_vf_fade_outputs[]
Definition: vf_fade.c:560
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:407
int bpp
Definition: vf_fade.c:57
uint8_t is_rgb
Definition: vf_fade.c:59
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:435
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define INTERPP(c_name, c_idx)
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
enum FadeContext::@221 fade_state
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2501
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
static int config_props(AVFilterLink *inlink)
Definition: vf_fade.c:406
unsigned int black_level
Definition: vf_fade.c:58
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
static const AVOption fade_options[]
Definition: vf_fade.c:520
static int filter_slice_chroma16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:330
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
const char * name
Pad name.
Definition: internal.h:60
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
uint64_t duration
Definition: vf_fade.c:64
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:88
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:177
AVOptions.
#define FLAGS
Definition: vf_fade.c:518
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
#define INTERP(c_name, c_idx)
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:431
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
static AVFrame * frame
int black_fade
if color_rgba is black
Definition: vf_fade.c:67
#define height
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
int alpha
Definition: vf_fade.c:62
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:410
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:402
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:439
int ff_fmt_is_in(int fmt, const int *fmts)
Tell if an integer is contained in the provided -1-terminated list of integers.
Definition: formats.c:271
#define av_log(a,...)
static av_always_inline void filter_rgb(FadeContext *s, const AVFrame *frame, int slice_start, int slice_end, int do_alpha, int step)
Definition: vf_fade.c:185
A filter pad used for either input or output.
Definition: internal.h:54
static int query_formats(AVFilterContext *ctx)
Definition: vf_fade.c:103
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define AVERROR(e)
Definition: error.h:43
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:496
static av_cold int init(AVFilterContext *ctx)
Definition: vf_fade.c:73
int(* filter_slice_chroma)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:69
uint8_t is_packed_rgb
Definition: vf_fade.c:60
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:188
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
void * priv
private data for use by the filter
Definition: avfilter.h:353
uint64_t start_time
Definition: vf_fade.c:64
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:441
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
#define A
Definition: vf_fade.c:43
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_fade.c:439
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:395
int fade_per_frame
Definition: vf_fade.c:55
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:233
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:438
#define width
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static int filter_slice_luma16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:278
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:436
static const AVFilterPad avfilter_vf_fade_inputs[]
Definition: vf_fade.c:549
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:396
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
static int filter_slice_alpha16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:381
#define FADE_OUT
Definition: vf_fade.c:50
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:408
int(* filter_slice_alpha)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:70
AVFilter ff_vf_fade
Definition: vf_fade.c:568
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:405
misc drawing utilities
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int start_frame
Definition: vf_fade.c:56
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
int vsub
Definition: vf_fade.c:57
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:394
static enum AVPixelFormat pix_fmts_rgb[3]
Definition: av1_parser.c:48
int type
Definition: vf_fade.c:54
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define FADE_IN
Definition: vf_fade.c:49
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:406
#define flags(name, subs,...)
Definition: cbs_av1.c:576
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
uint8_t color_rgba[4]
fade color
Definition: vf_fade.c:66
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
common internal and external API header
if(ret< 0)
Definition: vf_mcdeint.c:279
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static double c[64]
int nb_frames
Definition: vf_fade.c:56
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:433
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
#define OFFSET(x)
Definition: vf_fade.c:517
avfilter_execute_func * execute
Definition: internal.h:144
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2040
static av_always_inline void filter_rgb_planar(FadeContext *s, const AVFrame *frame, int slice_start, int slice_end, int do_alpha)
Definition: vf_fade.c:210
int factor
Definition: vf_fade.c:55
unsigned int black_level_scaled
Definition: vf_fade.c:58
int hsub
Definition: vf_fade.c:57
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
uint8_t rgba_map[4]
Definition: vf_fade.c:61
int height
Definition: frame.h:358
#define G
Definition: vf_fade.c:41
int is_planar
Definition: vf_fade.c:63
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
#define av_always_inline
Definition: attributes.h:45
#define R
Definition: vf_fade.c:40
internal API functions
#define B
Definition: vf_fade.c:42
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static const enum AVPixelFormat studio_level_pix_fmts[]
Definition: vf_fade.c:169
int depth
Definition: vf_fade.c:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:409
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:437
int(* filter_slice_luma)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fade.c:68
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AVFILTER_DEFINE_CLASS(fade)