FFmpeg  4.3.8
vf_xmedian.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avstring.h"
22 #include "libavutil/imgutils.h"
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/qsort.h"
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "internal.h"
31 #include "framesync.h"
32 #include "video.h"
33 
34 typedef struct XMedianContext {
35  const AVClass *class;
37  int nb_inputs;
38  int nb_frames;
39  int planes;
40  float percentile;
41 
42  int tmedian;
43  int radius;
44  int index;
45  int depth;
46  int max;
47  int nb_planes;
48  int linesize[4];
49  int width[4];
50  int height[4];
51 
54 
55  int (*median_frames)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
57 
59 {
60  static const enum AVPixelFormat pixel_fmts[] = {
88  };
90  if (!formats)
91  return AVERROR(ENOMEM);
92  return ff_set_common_formats(ctx, formats);
93 }
94 
96 {
97  XMedianContext *s = ctx->priv;
98  int ret;
99 
100  s->tmedian = !strcmp(ctx->filter->name, "tmedian");
101 
102  if (!s->tmedian) {
103  s->radius = s->nb_inputs / 2;
104  } else {
105  s->nb_inputs = s->radius * 2 + 1;
106  }
107 
108  if (s->nb_inputs & 1)
109  s->index = s->radius * 2.f * s->percentile;
110  else
111  s->index = av_clip(s->radius * 2.f * s->percentile, 1, s->nb_inputs - 1);
112  s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames));
113  if (!s->frames)
114  return AVERROR(ENOMEM);
115 
116  for (int i = 0; i < s->nb_inputs && !s->tmedian; i++) {
117  AVFilterPad pad = { 0 };
118 
119  pad.type = AVMEDIA_TYPE_VIDEO;
120  pad.name = av_asprintf("input%d", i);
121  if (!pad.name)
122  return AVERROR(ENOMEM);
123 
124  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
125  av_freep(&pad.name);
126  return ret;
127  }
128  }
129 
130  return 0;
131 }
132 
133 typedef struct ThreadData {
134  AVFrame **in, *out;
135 } ThreadData;
136 
137 static int comparei(const void *p1, const void *p2)
138 {
139  int left = *(const int *)p1;
140  int right = *(const int *)p2;
141  return FFDIFFSIGN(left, right);
142 }
143 
144 static int median_frames16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
145 {
146  XMedianContext *s = ctx->priv;
147  ThreadData *td = arg;
148  AVFrame **in = td->in;
149  AVFrame *out = td->out;
150  const int nb_inputs = s->nb_inputs;
151  const int radius = s->radius;
152  const int index = s->index;
153  int values[256];
154 
155  for (int p = 0; p < s->nb_planes; p++) {
156  const int slice_start = (s->height[p] * jobnr) / nb_jobs;
157  const int slice_end = (s->height[p] * (jobnr+1)) / nb_jobs;
158  uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]);
159 
160  if (!((1 << p) & s->planes)) {
161  av_image_copy_plane((uint8_t *)dst, out->linesize[p],
162  in[radius]->data[p] + slice_start * in[radius]->linesize[p],
163  in[radius]->linesize[p],
164  s->linesize[p], slice_end - slice_start);
165  continue;
166  }
167 
168  for (int y = slice_start; y < slice_end; y++) {
169  for (int x = 0; x < s->width[p]; x++) {
170  for (int i = 0; i < nb_inputs; i++) {
171  const uint16_t *src = (const uint16_t *)(in[i]->data[p] + y * in[i]->linesize[p]);
172  values[i] = src[x];
173  }
174 
175  AV_QSORT(values, nb_inputs, int, comparei);
176  if (nb_inputs & 1)
177  dst[x] = values[index];
178  else
179  dst[x] = (values[index] + values[index - 1]) >> 1;
180  }
181 
182  dst += out->linesize[p] / 2;
183  }
184  }
185 
186  return 0;
187 }
188 
189 static int median_frames8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
190 {
191  XMedianContext *s = ctx->priv;
192  ThreadData *td = arg;
193  AVFrame **in = td->in;
194  AVFrame *out = td->out;
195  const int nb_inputs = s->nb_inputs;
196  const int radius = s->radius;
197  const int index = s->index;
198  int values[256];
199 
200  for (int p = 0; p < s->nb_planes; p++) {
201  const int slice_start = (s->height[p] * jobnr) / nb_jobs;
202  const int slice_end = (s->height[p] * (jobnr+1)) / nb_jobs;
203  uint8_t *dst = out->data[p] + slice_start * out->linesize[p];
204 
205  if (!((1 << p) & s->planes)) {
206  av_image_copy_plane(dst, out->linesize[p],
207  in[radius]->data[p] + slice_start * in[radius]->linesize[p],
208  in[radius]->linesize[p],
209  s->linesize[p], slice_end - slice_start);
210  continue;
211  }
212 
213  for (int y = slice_start; y < slice_end; y++) {
214  for (int x = 0; x < s->width[p]; x++) {
215  for (int i = 0; i < nb_inputs; i++)
216  values[i] = in[i]->data[p][y * in[i]->linesize[p] + x];
217 
218  AV_QSORT(values, nb_inputs, int, comparei);
219  if (nb_inputs & 1)
220  dst[x] = values[index];
221  else
222  dst[x] = (values[index] + values[index - 1]) >> 1;
223  }
224 
225  dst += out->linesize[p];
226  }
227  }
228 
229  return 0;
230 }
231 
233 {
234  AVFilterContext *ctx = fs->parent;
235  AVFilterLink *outlink = ctx->outputs[0];
236  XMedianContext *s = fs->opaque;
237  AVFrame **in = s->frames;
238  AVFrame *out;
239  ThreadData td;
240  int i, ret;
241 
242  for (i = 0; i < s->nb_inputs; i++) {
243  if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
244  return ret;
245  }
246 
247  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
248  if (!out)
249  return AVERROR(ENOMEM);
250  out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
251 
252  td.in = in;
253  td.out = out;
254  ctx->internal->execute(ctx, s->median_frames, &td, NULL, FFMIN(s->height[1], ff_filter_get_nb_threads(ctx)));
255 
256  return ff_filter_frame(outlink, out);
257 }
258 
259 static int config_output(AVFilterLink *outlink)
260 {
261  AVFilterContext *ctx = outlink->src;
262  XMedianContext *s = ctx->priv;
263  AVRational frame_rate = ctx->inputs[0]->frame_rate;
264  AVRational sar = ctx->inputs[0]->sample_aspect_ratio;
265  AVFilterLink *inlink = ctx->inputs[0];
266  int height = ctx->inputs[0]->h;
267  int width = ctx->inputs[0]->w;
268  FFFrameSyncIn *in;
269  int i, ret;
270 
271  for (int i = 1; i < s->nb_inputs && !s->tmedian; i++) {
272  if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) {
273  av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height);
274  return AVERROR(EINVAL);
275  }
276  }
277 
278  s->desc = av_pix_fmt_desc_get(outlink->format);
279  if (!s->desc)
280  return AVERROR_BUG;
282  s->depth = s->desc->comp[0].depth;
283  s->max = (1 << s->depth) - 1;
284 
285  if (s->depth <= 8)
287  else
289 
290  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
291  return ret;
292 
293  s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, s->desc->log2_chroma_w);
294  s->width[0] = s->width[3] = inlink->w;
295  s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
296  s->height[0] = s->height[3] = inlink->h;
297 
298  if (s->tmedian)
299  return 0;
300 
301  outlink->w = width;
302  outlink->h = height;
303  outlink->frame_rate = frame_rate;
304  outlink->sample_aspect_ratio = sar;
305 
306  if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
307  return ret;
308 
309  in = s->fs.in;
310  s->fs.opaque = s;
312 
313  for (i = 0; i < s->nb_inputs; i++) {
314  AVFilterLink *inlink = ctx->inputs[i];
315 
316  in[i].time_base = inlink->time_base;
317  in[i].sync = 1;
318  in[i].before = EXT_STOP;
319  in[i].after = EXT_STOP;
320  }
321 
322  ret = ff_framesync_configure(&s->fs);
323  outlink->time_base = s->fs.time_base;
324 
325  return ret;
326 }
327 
329 {
330  XMedianContext *s = ctx->priv;
331 
332  ff_framesync_uninit(&s->fs);
333 
334  for (int i = 0; i < ctx->nb_inputs && !s->tmedian; i++)
335  av_freep(&ctx->input_pads[i].name);
336  for (int i = 0; i < s->nb_frames && s->frames && s->tmedian; i++)
337  av_frame_free(&s->frames[i]);
338  av_freep(&s->frames);
339 }
340 
342 {
343  XMedianContext *s = ctx->priv;
344  return ff_framesync_activate(&s->fs);
345 }
346 
347 #define OFFSET(x) offsetof(XMedianContext, x)
348 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
349 
350 static const AVOption xmedian_options[] = {
351  { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=3}, 3, 255, .flags = FLAGS },
352  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, .flags = FLAGS },
353  { "percentile", "set percentile", OFFSET(percentile),AV_OPT_TYPE_FLOAT,{.dbl=0.5}, 0, 1, .flags = FLAGS },
354  { NULL },
355 };
356 
357 static const AVFilterPad outputs[] = {
358  {
359  .name = "default",
360  .type = AVMEDIA_TYPE_VIDEO,
361  .config_props = config_output,
362  },
363  { NULL }
364 };
365 
366 #if CONFIG_XMEDIAN_FILTER
367 AVFILTER_DEFINE_CLASS(xmedian);
368 
370  .name = "xmedian",
371  .description = NULL_IF_CONFIG_SMALL("Pick median pixels from several video inputs."),
372  .priv_size = sizeof(XMedianContext),
373  .priv_class = &xmedian_class,
375  .outputs = outputs,
376  .init = init,
377  .uninit = uninit,
378  .activate = activate,
380 };
381 
382 #endif /* CONFIG_XMEDIAN_FILTER */
383 
384 #if CONFIG_TMEDIAN_FILTER
385 static int tmedian_filter_frame(AVFilterLink *inlink, AVFrame *in)
386 {
387  AVFilterContext *ctx = inlink->dst;
388  AVFilterLink *outlink = ctx->outputs[0];
389  XMedianContext *s = ctx->priv;
390  ThreadData td;
391  AVFrame *out;
392 
393  if (s->nb_frames < s->nb_inputs) {
394  s->frames[s->nb_frames] = in;
395  s->nb_frames++;
396  if (s->nb_frames < s->nb_inputs)
397  return 0;
398  } else {
399  av_frame_free(&s->frames[0]);
400  memmove(&s->frames[0], &s->frames[1], sizeof(*s->frames) * (s->nb_inputs - 1));
401  s->frames[s->nb_inputs - 1] = in;
402  }
403 
404  if (ctx->is_disabled) {
405  out = av_frame_clone(s->frames[0]);
406  if (!out)
407  return AVERROR(ENOMEM);
408  return ff_filter_frame(outlink, out);
409  }
410 
411  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
412  if (!out)
413  return AVERROR(ENOMEM);
414  out->pts = s->frames[0]->pts;
415 
416  td.out = out;
417  td.in = s->frames;
418  ctx->internal->execute(ctx, s->median_frames, &td, NULL, FFMIN(s->height[0], ff_filter_get_nb_threads(ctx)));
419 
420  return ff_filter_frame(outlink, out);
421 }
422 
423 static const AVOption tmedian_options[] = {
424  { "radius", "set median filter radius", OFFSET(radius), AV_OPT_TYPE_INT, {.i64=1}, 1, 127, .flags = FLAGS },
425  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, .flags = FLAGS },
426  { "percentile", "set percentile", OFFSET(percentile), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0, 1, .flags = FLAGS },
427  { NULL },
428 };
429 
430 static const AVFilterPad tmedian_inputs[] = {
431  {
432  .name = "default",
433  .type = AVMEDIA_TYPE_VIDEO,
434  .filter_frame = tmedian_filter_frame,
435  },
436  { NULL }
437 };
438 
439 static const AVFilterPad tmedian_outputs[] = {
440  {
441  .name = "default",
442  .type = AVMEDIA_TYPE_VIDEO,
443  .config_props = config_output,
444  },
445  { NULL }
446 };
447 
449 
451  .name = "tmedian",
452  .description = NULL_IF_CONFIG_SMALL("Pick median pixels from successive frames."),
453  .priv_size = sizeof(XMedianContext),
454  .priv_class = &tmedian_class,
456  .inputs = tmedian_inputs,
457  .outputs = tmedian_outputs,
458  .init = init,
459  .uninit = uninit,
461 };
462 
463 #endif /* CONFIG_TMEDIAN_FILTER */
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:440
AVFrame * out
Definition: af_adeclick.c:494
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:432
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
#define OFFSET(x)
Definition: vf_xmedian.c:347
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:407
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:417
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:435
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
Main libavfilter public API header.
FFFrameSync fs
Definition: vf_xmedian.c:53
AVFilter ff_vf_tmedian
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:377
static int comparei(const void *p1, const void *p2)
Definition: vf_xmedian.c:137
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104
static int config_output(AVFilterLink *outlink)
Definition: vf_xmedian.c:259
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:385
int64_t pts
Timestamp of the current event.
Definition: framesync.h:167
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static int median_frames8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_xmedian.c:189
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:86
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:378
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:379
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int height[4]
Definition: vf_xmedian.c:50
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:88
AVOptions.
static int median_frames16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_xmedian.c:144
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
FFFrameSyncIn * in
Pointer to array of inputs.
Definition: framesync.h:203
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:431
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:412
const char data[16]
Definition: mxf.c:91
int width[4]
Definition: vf_xmedian.c:49
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:410
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:91
Input stream structure.
Definition: framesync.h:81
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:402
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:439
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static const AVOption xmedian_options[]
Definition: vf_xmedian.c:350
#define src
Definition: vp8dsp.c:254
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:283
Frame sync structure.
Definition: framesync.h:146
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:188
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:441
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:418
AVRational time_base
Time base for the incoming frames.
Definition: framesync.h:96
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:334
int linesize[4]
Definition: vf_xmedian.c:48
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
AVFilter ff_vf_xmedian
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:419
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_xmedian.c:328
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:395
static int activate(AVFilterContext *ctx)
Definition: vf_xmedian.c:341
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:416
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:381
#define FFMIN(a, b)
Definition: common.h:96
static int query_formats(AVFilterContext *ctx)
Definition: vf_xmedian.c:58
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:438
AVFormatContext * ctx
Definition: movenc.c:48
AVRational time_base
Time base for the output events.
Definition: framesync.h:162
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:436
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
void * opaque
Opaque pointer, not used by the API.
Definition: framesync.h:177
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:396
static av_cold int init(AVFilterContext *ctx)
Definition: vf_xmedian.c:95
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:415
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:408
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:405
Used for passing data between threads.
Definition: dsddec.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:77
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:380
int(* median_frames)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_xmedian.c:55
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame ** frames
Definition: vf_xmedian.c:52
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events...
Definition: framesync.h:139
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:394
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static int process_frame(FFFrameSync *fs)
Definition: vf_xmedian.c:232
static const AVFilterPad outputs[]
Definition: vf_xmedian.c:357
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:406
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
#define flags(name, subs,...)
Definition: cbs_av1.c:576
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
float percentile
Definition: vf_xmedian.c:40
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:433
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
avfilter_execute_func * execute
Definition: internal.h:144
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2040
Completely stop all streams with this one.
Definition: framesync.h:65
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:314
A list of supported formats for one end of a filter link.
Definition: formats.h:64
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
An instance of a filter.
Definition: avfilter.h:338
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFrame * in
Definition: af_afftdn.c:1083
formats
Definition: signature.h:48
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
internal API functions
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
Definition: framesync.c:246
int depth
Number of bits in the component.
Definition: pixdesc.h:58
#define FLAGS
Definition: vf_xmedian.c:348
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
const AVPixFmtDescriptor * desc
Definition: vf_xmedian.c:36
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:409
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:437
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:266