#include <unistd.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
-#include <libswscale/swscale.h>
+#include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
#include <libavresample/avresample.h>
#include <libavutil/opt.h>
#include <libavutil/audio_fifo.h>
AVCodec *vid_ocodec;
AVFrame *vid_dec_frame;
- struct SwsContext *vid_scaler;
AVFrame *vid_enc_frame;
+ AVFilterGraph *flt_graph;
+ AVFilterContext *flt_bufsrcctx;
+ AVFilterContext *flt_bufsinkctx;
+
int16_t vid_width;
int16_t vid_height;
}
+/* create a simple deinterlacer-scaler video filter chain */
+static int
+create_video_filter(video_stream_t *vs, transcoder_t *t,
+ AVCodecContext *ictx, AVCodecContext *octx)
+{
+ AVFilterInOut *flt_inputs, *flt_outputs;
+ AVFilter *flt_bufsrc, *flt_bufsink;
+ char opt[128];
+ int err;
+
+ err = 1;
+ flt_inputs = flt_outputs = NULL;
+ flt_bufsrc = flt_bufsink = NULL;
+
+ if (vs->flt_graph)
+ avfilter_graph_free(&vs->flt_graph);
+
+ vs->flt_graph = avfilter_graph_alloc();
+ if (!vs->flt_graph)
+ return err;
+
+ flt_inputs = avfilter_inout_alloc();
+ if (!flt_inputs)
+ goto out_err;
+
+ flt_outputs = avfilter_inout_alloc();
+ if (!flt_outputs)
+ goto out_err;
+
+ flt_bufsrc = avfilter_get_by_name("buffer");
+ flt_bufsink = avfilter_get_by_name("buffersink");
+ if (!flt_bufsrc || !flt_bufsink) {
+ tvherror("transcode", "%04X: libav default buffers unknown", shortid(t));
+ goto out_err;
+ }
+
+ memset(opt, 0, sizeof(opt));
+ snprintf(opt, sizeof(opt), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ ictx->width,
+ ictx->height,
+ ictx->pix_fmt,
+ ictx->time_base.num,
+ ictx->time_base.den,
+ ictx->sample_aspect_ratio.num,
+ ictx->sample_aspect_ratio.den);
+
+ err = avfilter_graph_create_filter(&vs->flt_bufsrcctx, flt_bufsrc, "in",
+ opt, NULL, vs->flt_graph);
+ if (err < 0) {
+ tvherror("transcode", "%04X: fltchain IN init error", shortid(t));
+ goto out_err;
+ }
+
+ err = avfilter_graph_create_filter(&vs->flt_bufsinkctx, flt_bufsink,
+ "out", NULL, NULL, vs->flt_graph);
+ if (err < 0) {
+ tvherror("transcode", "%04X: fltchain OUT init error", shortid(t));
+ goto out_err;
+ }
+
+ flt_outputs->name = av_strdup("in");
+ flt_outputs->filter_ctx = vs->flt_bufsrcctx;
+ flt_outputs->pad_idx = 0;
+ flt_outputs->next = NULL;
+ flt_inputs->name = av_strdup("out");
+ flt_inputs->filter_ctx = vs->flt_bufsinkctx;
+ flt_inputs->pad_idx = 0;
+ flt_inputs->next = NULL;
+
+ /* add filters: yadif to deinterlace and a scaler */
+ memset(opt, 0, sizeof(opt));
+ snprintf(opt, sizeof(opt), "yadif,scale=%dx%d",
+ octx->width,
+ octx->height);
+ err = avfilter_graph_parse_ptr(vs->flt_graph,
+ opt,
+ &flt_inputs,
+ &flt_outputs,
+ NULL);
+ if (err < 0) {
+ tvherror("transcode", "%04X: failed to init filter chain", shortid(t));
+ goto out_err;
+ }
+
+ err = avfilter_graph_config(vs->flt_graph, NULL);
+ if (err < 0) {
+ tvherror("transcode", "%04X: failed to config filter chain", shortid(t));
+ goto out_err;
+ }
+
+ avfilter_inout_free(&flt_inputs);
+ avfilter_inout_free(&flt_outputs);
+
+ return 0; /* all OK */
+
+out_err:
+ if (flt_inputs)
+ avfilter_inout_free(&flt_inputs);
+ if (flt_outputs)
+ avfilter_inout_free(&flt_outputs);
+ if (vs->flt_graph) {
+ avfilter_graph_free(&vs->flt_graph);
+ vs->flt_graph = NULL;
+ }
+
+ return err;
+}
+
/**
*
*/
AVCodecContext *ictx, *octx;
AVDictionary *opts;
AVPacket packet, packet2;
- AVPicture deint_pic;
- uint8_t *buf, *deint;
- int length, len, ret, got_picture, got_output, got_ref;
+ int length, ret, got_picture, got_output, got_ref;
video_stream_t *vs = (video_stream_t*)ts;
streaming_message_t *sm;
th_pkt_t *pkt2;
icodec = vs->vid_icodec;
ocodec = vs->vid_ocodec;
- buf = deint = NULL;
opts = NULL;
got_ref = 0;
switch (ts->ts_type) {
case SCT_MPEG2VIDEO:
- octx->pix_fmt = PIX_FMT_YUV420P;
+ octx->pix_fmt = AV_PIX_FMT_YUV420P;
octx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (t->t_props.tp_vbitrate < 64) {
break;
case SCT_VP8:
- octx->pix_fmt = PIX_FMT_YUV420P;
+ octx->pix_fmt = AV_PIX_FMT_YUV420P;
// setting quality to realtime will use as much CPU for transcoding as possible,
// while still encoding in realtime
break;
case SCT_H264:
- octx->pix_fmt = PIX_FMT_YUV420P;
+ octx->pix_fmt = AV_PIX_FMT_YUV420P;
octx->flags |= CODEC_FLAG_GLOBAL_HEADER;
// Default = "medium". We gain more encoding speed compared to the loss of quality when lowering it _slightly_.
break;
case SCT_HEVC:
- octx->pix_fmt = PIX_FMT_YUV420P;
+ octx->pix_fmt = AV_PIX_FMT_YUV420P;
octx->flags |= CODEC_FLAG_GLOBAL_HEADER;
// on all hardware ultrafast (or maybe superfast) should be safe
transcoder_stream_invalidate(ts);
goto cleanup;
}
- }
- len = avpicture_get_size(ictx->pix_fmt, ictx->width, ictx->height);
- deint = av_malloc(len);
-
- avpicture_fill(&deint_pic,
- deint,
- ictx->pix_fmt,
- ictx->width,
- ictx->height);
-
- if (avpicture_deinterlace(&deint_pic,
- (AVPicture *)vs->vid_dec_frame,
- ictx->pix_fmt,
- ictx->width,
- ictx->height) < 0) {
- tvherror("transcode", "%04X: Cannot deinterlace frame", shortid(t));
- transcoder_stream_invalidate(ts);
- goto cleanup;
+ if (create_video_filter(vs, t, ictx, octx)) {
+ tvherror("transcode", "%04X: Video filter creation failed",
+ shortid(t));
+ transcoder_stream_invalidate(ts);
+ goto cleanup;
+ }
}
- len = avpicture_get_size(octx->pix_fmt, octx->width, octx->height);
- buf = av_malloc(len + FF_INPUT_BUFFER_PADDING_SIZE);
- memset(buf, 0, len);
-
- avpicture_fill((AVPicture *)vs->vid_enc_frame,
- buf,
- octx->pix_fmt,
- octx->width,
- octx->height);
-
- vs->vid_scaler = sws_getCachedContext(vs->vid_scaler,
- ictx->width,
- ictx->height,
- ictx->pix_fmt,
- octx->width,
- octx->height,
- octx->pix_fmt,
- 1,
- NULL,
- NULL,
- NULL);
-
- if (sws_scale(vs->vid_scaler,
- (const uint8_t * const*)deint_pic.data,
- deint_pic.linesize,
- 0,
- ictx->height,
- vs->vid_enc_frame->data,
- vs->vid_enc_frame->linesize) < 0) {
- tvherror("transcode", "%04X: Cannot scale frame", shortid(t));
+ /* push decoded frame into filter chain */
+ if (av_buffersrc_add_frame(vs->flt_bufsrcctx, vs->vid_dec_frame) < 0) {
+ tvherror("transcode", "%04X: filter input error", shortid(t));
transcoder_stream_invalidate(ts);
goto cleanup;
}
- vs->vid_enc_frame->format = octx->pix_fmt;
- vs->vid_enc_frame->width = octx->width;
- vs->vid_enc_frame->height = octx->height;
-
- vs->vid_enc_frame->pkt_pts = vs->vid_dec_frame->pkt_pts;
- vs->vid_enc_frame->pkt_dts = vs->vid_dec_frame->pkt_dts;
-
- if (vs->vid_dec_frame->reordered_opaque != AV_NOPTS_VALUE)
- vs->vid_enc_frame->pts = vs->vid_dec_frame->reordered_opaque;
-
- else if (ictx->coded_frame && ictx->coded_frame->pts != AV_NOPTS_VALUE)
- vs->vid_enc_frame->pts = vs->vid_dec_frame->pts;
-
- ret = avcodec_encode_video2(octx, &packet2, vs->vid_enc_frame, &got_output);
- if (ret < 0) {
- tvherror("transcode", "%04X: Error encoding frame", shortid(t));
- transcoder_stream_invalidate(ts);
- goto cleanup;
+ /* and pull out a filtered frame */
+ while (1) {
+ ret = av_buffersink_get_frame(vs->flt_bufsinkctx, vs->vid_enc_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ break;
+ if (ret < 0) {
+ tvherror("transcode", "%04X: filter output error", shortid(t));
+ transcoder_stream_invalidate(ts);
+ goto cleanup;
+ }
+
+ vs->vid_enc_frame->format = octx->pix_fmt;
+ vs->vid_enc_frame->width = octx->width;
+ vs->vid_enc_frame->height = octx->height;
+
+ vs->vid_enc_frame->pkt_pts = vs->vid_dec_frame->pkt_pts;
+ vs->vid_enc_frame->pkt_dts = vs->vid_dec_frame->pkt_dts;
+
+ if (vs->vid_dec_frame->reordered_opaque != AV_NOPTS_VALUE)
+ vs->vid_enc_frame->pts = vs->vid_dec_frame->reordered_opaque;
+
+ else if (ictx->coded_frame && ictx->coded_frame->pts != AV_NOPTS_VALUE)
+ vs->vid_enc_frame->pts = vs->vid_dec_frame->pts;
+
+ ret = avcodec_encode_video2(octx, &packet2, vs->vid_enc_frame, &got_output);
+ if (ret < 0) {
+ tvherror("transcode", "%04X: Error encoding frame", shortid(t));
+ transcoder_stream_invalidate(ts);
+ goto cleanup;
+ }
+ av_frame_unref(vs->vid_enc_frame);
}
if (got_output)
av_free_packet(&packet);
- if(buf)
- av_free(buf);
-
- if(deint)
- av_free(deint);
-
if(opts)
av_dict_free(&opts);
if(vs->vid_dec_frame)
av_free(vs->vid_dec_frame);
- if(vs->vid_scaler)
- sws_freeContext(vs->vid_scaler);
-
if(vs->vid_enc_frame)
av_free(vs->vid_enc_frame);
if (vs->vid_first_pkt)
pkt_ref_dec(vs->vid_first_pkt);
+ if (vs->flt_graph) {
+ avfilter_graph_free(&vs->flt_graph);
+ vs->flt_graph = NULL;
+ }
+
free(ts);
}
if (t->t_props.tp_nrprocessors)
vs->vid_octx->thread_count = t->t_props.tp_nrprocessors;
- vs->vid_dec_frame = avcodec_alloc_frame();
- vs->vid_enc_frame = avcodec_alloc_frame();
+ vs->vid_dec_frame = av_frame_alloc();
+ vs->vid_enc_frame = av_frame_alloc();
+
+ av_frame_unref(vs->vid_dec_frame);
+ av_frame_unref(vs->vid_enc_frame);
- avcodec_get_frame_defaults(vs->vid_dec_frame);
- avcodec_get_frame_defaults(vs->vid_enc_frame);
+ vs->flt_graph = NULL; /* allocated in packet processor */
LIST_INSERT_HEAD(&t->t_stream_list, (transcoder_stream_t*)vs, ts_link);