32 #define MIN_FILTER_SIZE 3 33 #define MAX_FILTER_SIZE 301 35 #define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1) 89 #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x) 90 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM 162 const int frame_size =
lrint((
double)sample_rate * (frame_len_msec / 1000.0));
163 return frame_size + (frame_size % 2);
253 for (
int i = 0;
i < side;
i++)
257 int count = (q->
size - new_size + 1) / 2;
268 double total_weight = 0.0;
269 const double sigma = (((s->
filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
275 const double c1 = 1.0 / (sigma * sqrt(2.0 *
M_PI));
276 const double c2 = 2.0 * sigma * sigma;
287 adjust = 1.0 / total_weight;
354 for (c = 0; c < inlink->
channels; c++) {
372 static inline double fade(
double prev,
double next,
int pos,
int length)
374 const double step_size = 1.0 / length;
375 const double f0 = 1.0 - (step_size * (pos + 1.0));
376 const double f1 = 1.0 - f0;
377 return f0 * prev + f1 * next;
382 return value *
value;
387 const double CONST = 0.8862269254527580136490837416705725913987747280611935;
393 double max = DBL_EPSILON;
397 for (c = 0; c < frame->
channels; c++) {
401 max =
FFMAX(max, fabs(data_ptr[i]));
407 max =
FFMAX(max, fabs(data_ptr[i]));
415 double rms_value = 0.0;
419 for (c = 0; c < frame->
channels; c++) {
423 rms_value +=
pow_2(data_ptr[i]);
429 const double *data_ptr = (
double *)frame->
extended_data[channel];
431 rms_value +=
pow_2(data_ptr[i]);
437 return FFMAX(sqrt(rms_value), DBL_EPSILON);
444 const double maximum_gain = s->
peak_value / peak_magnitude;
456 double min = DBL_MAX;
468 double result = 0.0, tsum = 0.0;
505 int input = pre_fill_size;
524 double smoothed, limit;
528 smoothed =
FFMIN(smoothed, limit);
537 static inline double update_value(
double new,
double old,
double aggressiveness)
539 av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
540 return aggressiveness *
new + (1.0 - aggressiveness) * old;
551 double current_average_value = 0.0;
555 current_average_value += dst_ptr[i] * diff;
568 if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
570 double step_size = 1.0;
572 while (step_size > DBL_EPSILON) {
573 while ((
llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
574 llrint(current_threshold * (UINT64_C(1) << 63))) &&
575 (
bound(current_threshold + step_size, 1.0) <= threshold)) {
576 current_threshold += step_size;
582 return current_threshold;
591 double variance = 0.0;
599 variance +=
pow_2(data_ptr[i]);
604 const double *data_ptr = (
double *)frame->
extended_data[channel];
607 variance +=
pow_2(data_ptr[i]);
612 return FFMAX(sqrt(variance), DBL_EPSILON);
624 const double prev_value = is_first_frame ? current_threshold : s->
compress_threshold[0];
625 double prev_actual_thresh, curr_actual_thresh;
634 const double localThresh =
fade(prev_actual_thresh, curr_actual_thresh, i, frame->
nb_samples);
635 dst_ptr[
i] =
copysign(
bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
644 double prev_actual_thresh, curr_actual_thresh;
653 const double localThresh =
fade(prev_actual_thresh, curr_actual_thresh, i, frame->
nb_samples);
654 dst_ptr[
i] =
copysign(
bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
690 double current_amplification_factor;
694 for (i = 0; i < frame->
nb_samples && enabled; i++) {
696 current_amplification_factor, i,
699 dst_ptr[
i] *= amplification_factor;
752 dst_ptr[
i] *= ((i % 2) == 1) ? -1 : 1;
812 return flush(outlink);
826 char *res,
int res_len,
int flags)
871 .
name =
"dynaudnorm",
878 .
inputs = avfilter_af_dynaudnorm_inputs,
879 .
outputs = avfilter_af_dynaudnorm_outputs,
880 .priv_class = &dynaudnorm_class,
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
static const AVFilterPad avfilter_af_dynaudnorm_inputs[]
static double bound(const double threshold, const double val)
static double compute_frame_rms(AVFrame *frame, int channel)
static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame, int enabled)
This structure describes decoded (raw) audio or video data.
#define CONST(name, help, val, unit)
static int cqueue_empty(cqueue *q)
static const AVFilterPad avfilter_af_dynaudnorm_outputs[]
static double pow_2(const double value)
#define AV_LOG_WARNING
Something somehow does not look correct.
static double erf(double z)
erf function Algorithm taken from the Boost project, source: http://www.boost.org/doc/libs/1_46_1/boo...
Main libavfilter public API header.
cqueue ** gain_history_smoothed
static int cqueue_size(cqueue *q)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
#define FFERROR_NOT_READY
Filters implementation helper functions.
static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
int is_disabled
the enabled state from the last expression evaluation
double * prev_amplification_factor
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
static int config_input(AVFilterLink *inlink)
Structure holding the queue.
static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq)
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static cqueue * cqueue_create(int size, int max_size)
double * compress_threshold
#define AVERROR_EOF
End of file.
static av_cold void uninit(AVFilterContext *ctx)
cqueue ** gain_history_minimum
static void cqueue_free(cqueue *q)
static void cqueue_resize(cqueue *q, int new_size)
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
A filter pad used for either input or output.
A link between two filters.
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
cqueue ** gain_history_original
static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink, AVFilterLink *outlink)
static int query_formats(AVFilterContext *ctx)
static double cqueue_peek(cqueue *q, int index)
#define i(width, name, range_min, range_max)
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
AVFILTER_DEFINE_CLASS(dynaudnorm)
int sample_rate
samples per second
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
void * priv
private data for use by the filter
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
simple assert() macros that are a bit more flexible than ISO C assert().
static const uint8_t offset[127][2]
AVFrame * queue[FF_BUFQUEUE_SIZE]
#define FF_FILTER_FORWARD_WANTED(outlink, inlink)
Forward the frame_wanted_out flag from an output link to an input link.
int channels
number of audio channels, only used for audio.
int ff_inlink_queued_samples(AVFilterLink *link)
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
static int flush(AVFilterLink *outlink)
AVFilterContext * src
source filter
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
static const AVFilterPad inputs[]
static const AVFilterPad outputs[]
A list of supported channel layouts.
AVFilter ff_af_dynaudnorm
AVSampleFormat
Audio sample formats.
unsigned short available
number of available buffers
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
static double compute_frame_std_dev(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static av_cold int init(AVFilterContext *ctx)
Describe the class of an AVClass context structure.
double * dc_correction_value
const char * name
Filter name.
static av_always_inline double copysign(double x, double y)
static double setup_compress_thresh(double threshold)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
AVFilterLink ** outputs
array of pointers to output links
enum MovChannelLayoutTag * layouts
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
#define flags(name, subs,...)
cqueue ** threshold_history
static double find_peak_magnitude(AVFrame *frame, int channel)
static int cqueue_pop(cqueue *q)
static double minimum_filter(cqueue *q)
channel
Use these values when setting the channel map with ebur128_set_channel().
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
int channels
Number of channels.
static int cqueue_enqueue(cqueue *q, double element)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
AVFilterContext * dst
dest filter
static double update_value(double new, double old, double aggressiveness)
static int activate(AVFilterContext *ctx)
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
static enum AVSampleFormat sample_fmts[]
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
#define av_malloc_array(a, b)
static const AVOption dynaudnorm_options[]
static int cqueue_dequeue(cqueue *q, double *element)
uint8_t ** extended_data
pointers to the data planes/channels.
static double val(void *priv, double ch)
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, local_gain gain)
static int frame_size(int sample_rate, int frame_len_msec)
int nb_samples
number of audio samples (per channel) described by this frame
static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
static double fade(double prev, double next, int pos, int length)