47 #define WEIGHT_LUT_NBITS 9 48 #define WEIGHT_LUT_SIZE (1<<WEIGHT_LUT_NBITS) 71 #define OFFSET(x) offsetof(NLMeansContext, x) 72 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM 134 const int e = ii[(y + p ) * ii_lz_32 + (x + p )];
135 const int d = ii[(y + p ) * ii_lz_32 + (x - p - 1)];
136 const int b = ii[(y - p - 1) * ii_lz_32 + (x + p )];
137 const int a = ii[(y - p - 1) * ii_lz_32 + (x - p - 1)];
138 return e - d - b +
a;
160 for (y = 0; y <
h; y++) {
161 uint32_t
acc = dst[-1] - dst[-dst_linesize_32 - 1];
163 for (x = 0; x <
w; x++) {
164 const int d = s1[x] - s2[x];
166 dst[x] = dst[-dst_linesize_32 + x] +
acc;
170 dst += dst_linesize_32;
199 int startx,
int starty,
201 int offx,
int offy,
int r,
int sw,
int sh,
206 for (y = starty; y < starty +
h; y++) {
207 uint32_t
acc = dst[y*dst_linesize_32 + startx - 1] - dst[(y-1)*dst_linesize_32 + startx - 1];
208 const int s1y = av_clip(y - r, 0, sh - 1);
209 const int s2y = av_clip(y - (r + offy), 0, sh - 1);
211 for (x = startx; x < startx +
w; x++) {
212 const int s1x = av_clip(x - r, 0, sw - 1);
213 const int s2x = av_clip(x - (r + offx), 0, sw - 1);
214 const uint8_t v1 = src[s1y*linesize + s1x];
215 const uint8_t v2 = src[s2y*linesize + s2x];
216 const int d = v1 - v2;
218 dst[y*dst_linesize_32 + x] = dst[(y-1)*dst_linesize_32 + x] + acc;
241 const uint8_t *
src,
int linesize,
int offx,
int offy,
245 const int ii_w = w + e*2;
246 const int ii_h = h + e*2;
253 const int s2x = e + offx;
254 const int s2y = e + offy;
258 const int startx_safe =
FFMAX(s1x, s2x);
259 const int starty_safe =
FFMAX(s1y, s2y);
260 const int endx_safe =
FFMIN(s1x + w, s2x + w);
261 const int endy_safe =
FFMIN(s1y + h, s2y + h);
276 startx_safe, endy_safe - starty_safe);
284 src + (starty_safe - s1y) * linesize + (startx_safe - s1x), linesize,
285 src + (starty_safe - s2y) * linesize + (startx_safe - s2x), linesize,
286 endx_safe - startx_safe, endy_safe - starty_safe);
290 endx_safe, starty_safe,
293 ii_w - endx_safe, endy_safe - starty_safe);
300 ii_w, ii_h - endy_safe);
334 s->
ii_w = inlink->
w + e*2;
335 s->
ii_h = inlink->
h + e*2;
374 const int slice_start = (process_h * jobnr ) / nb_jobs;
375 const int slice_end = (process_h * (jobnr+1)) / nb_jobs;
379 for (y = starty; y <
endy; y++) {
382 if (patch_diff_sq < s->max_meaningful_diff) {
387 wa->
sum += weight * src[y*src_linesize + x];
395 uint8_t *dst,
int dst_linesize,
405 const uint32_t *centered_ii = s->
ii + e*s->
ii_lz_32 + e;
409 for (offy = -r; offy <=
r; offy++) {
410 for (offx = -r; offx <=
r; offx++) {
413 .
src = src + offy*src_linesize + offx,
415 .startx =
FFMAX(0, -offx),
416 .starty =
FFMAX(0, -offy),
417 .endx =
FFMIN(w, w - offx),
418 .endy =
FFMIN(h, h - offy),
419 .ii_start = centered_ii + offy*s->
ii_lz_32 + offx,
425 offx, offy, e, w, h);
431 for (y = 0; y <
h; y++) {
432 for (x = 0; x <
w; x++) {
437 wa->
sum += 1.0 * src[y*src_linesize + x];
473 #define CHECK_ODD_FIELD(field, name) do { \ 474 if (!(s->field & 1)) { \ 476 av_log(ctx, AV_LOG_WARNING, name " size must be odd, " \ 477 "setting it to %d\n", s->field); \ 485 const double h = s->
sigma * 10.;
508 av_log(ctx,
AV_LOG_INFO,
"Research window: %dx%d / %dx%d, patch size: %dx%d / %dx%d\n",
549 .priv_class = &nlmeans_class,
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Main libavfilter public API header.
AVFILTER_DEFINE_CLASS(nlmeans)
int h
agreed upon image height
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static const AVFilterPad nlmeans_outputs[]
static void compute_safe_ssd_integral_image_c(uint32_t *dst, int dst_linesize_32, const uint8_t *s1, int linesize1, const uint8_t *s2, int linesize2, int w, int h)
Compute squared difference of the safe area (the zone where s1 and s2 overlap).
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
const char * name
Pad name.
static const AVOption nlmeans_options[]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const AVFilterPad nlmeans_inputs[]
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static int config_input(AVFilterLink *inlink)
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static int query_formats(AVFilterContext *ctx)
A filter pad used for either input or output.
A link between two filters.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
simple assert() macros that are a bit more flexible than ISO C assert().
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
static void compute_ssd_integral_image(uint32_t *ii, int ii_linesize_32, const uint8_t *src, int linesize, int offx, int offy, int e, int w, int h)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int nlmeans_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define CHECK_ODD_FIELD(field, name)
double weight_lut[WEIGHT_LUT_SIZE]
static const AVFilterPad inputs[]
static const AVFilterPad outputs[]
int format
agreed upon media format
#define FF_ARRAY_ELEMS(a)
#define AV_LOG_INFO
Standard information.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static void compute_unsafe_ssd_integral_image(uint32_t *dst, int dst_linesize_32, int startx, int starty, const uint8_t *src, int linesize, int offx, int offy, int r, int sw, int sh, int w, int h)
Compute squared difference of an unsafe area (the zone nor s1 nor s2 could be readable).
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
const char * name
Filter name.
AVFilterLink ** outputs
array of pointers to output links
static int weight(int i, int blen, int offset)
static enum AVPixelFormat pix_fmts[]
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static av_cold int init(AVFilterContext *ctx)
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
AVFilterContext * dst
dest filter
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static int get_integral_patch_value(const uint32_t *ii, int ii_lz_32, int x, int y, int p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define av_malloc_array(a, b)
static av_cold void uninit(AVFilterContext *ctx)
AVPixelFormat
Pixel format.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
const uint32_t * ii_start
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
static int nlmeans_plane(AVFilterContext *ctx, int w, int h, int p, int r, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)