Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement video filtering with libavfilter #68

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ interface.
* Video frame scaling and pixel format transformations
* Audio resampling
* Bitstream filters
* Simple Video filtergraphs

## Requirements

Expand All @@ -35,6 +36,7 @@ interface.
* libavformat
* libswresample
* libswscale
* libavfilter

## Compilation

Expand Down
8 changes: 8 additions & 0 deletions ac-ffmpeg/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,11 @@ path = "../ac-ffmpeg-features"

[dev-dependencies]
clap = "2.33"

[features]
filters = []

[[example]]
name = "filtering"
path = "examples/filtering.rs"
required-features = ["filters"]
13 changes: 11 additions & 2 deletions ac-ffmpeg/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,24 @@ fn main() {
.file(src_codec_dir.join("mod.c"))
.file(src_codec_dir.join("frame.c"))
.file(src_codec_audio_dir.join("resampler.c"))
.file(src_codec_video_dir.join("scaler.c"))
.compile("ffwrapper");
.file(src_codec_video_dir.join("scaler.c"));

if cfg!(feature = "filters") {
build.file(src_codec_video_dir.join("filter.c"));
}

build.compile("ffwrapper");

for dir in ac_ffmpeg_build::ffmpeg_lib_dirs(true) {
println!("cargo:rustc-link-search=native={}", dir.display());
}

let ffmpeg_link_mode = link_mode();

if cfg!(feature = "filters") {
link("avfilter", ffmpeg_link_mode);
}

link("avcodec", ffmpeg_link_mode);
link("avformat", ffmpeg_link_mode);
link("avutil", ffmpeg_link_mode);
Expand Down
159 changes: 159 additions & 0 deletions ac-ffmpeg/examples/filtering.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
use std::{fs::File, time::Duration};

use ac_ffmpeg::{
codec::{
video::{self, filter::VideoFilter, VideoEncoder, VideoFrameMut},
CodecParameters, Encoder, Filter, VideoCodecParameters,
},
format::{
io::IO,
muxer::{Muxer, OutputFormat},
},
time::{TimeBase, Timestamp},
Error,
};
use clap::{App, Arg};

/// Open a given output file.
fn open_output(path: &str, elementary_streams: &[CodecParameters]) -> Result<Muxer<File>, Error> {
let output_format = OutputFormat::guess_from_file_name(path)
.ok_or_else(|| Error::new(format!("unable to guess output format for file: {}", path)))?;

let output = File::create(path)
.map_err(|err| Error::new(format!("unable to create output file {}: {}", path, err)))?;

let io = IO::from_seekable_write_stream(output);

let mut muxer_builder = Muxer::builder();

for codec_parameters in elementary_streams {
muxer_builder.add_stream(codec_parameters)?;
}

muxer_builder.build(io, output_format)
}

/// Create h264 encoded black video file of a given length and with a given
/// resolution, with timecode burnt in using the drawtext filter
fn encode_black_video_with_bitc(
output: &str,
width: u32,
height: u32,
duration: Duration,
) -> Result<(), Error> {
// note: it is 1/fps
let time_base = TimeBase::new(1, 25);

let pixel_format = video::frame::get_pixel_format("yuv420p");

// create a black video frame with a given resolution
let frame = VideoFrameMut::black(pixel_format, width as _, height as _)
.with_time_base(time_base)
.freeze();

let mut encoder = VideoEncoder::builder("libx264")?
.pixel_format(pixel_format)
.width(width as _)
.height(height as _)
.time_base(time_base)
.build()?;

let codec_parameters: VideoCodecParameters = encoder.codec_parameters().into();

let mut drawtext_filter = VideoFilter::builder()?
.input_codec_parameters(&codec_parameters)
.input_time_base(time_base)
.filter_description(
"drawtext=timecode='00\\:00\\:00\\:00':rate=25:fontsize=72:fontcolor=white",
)
.build()?;

let mut muxer = open_output(output, &[codec_parameters.into()])?;

let mut frame_idx = 0;
let mut frame_timestamp = Timestamp::new(frame_idx, time_base);
let max_timestamp = Timestamp::from_millis(0) + duration;

while frame_timestamp < max_timestamp {
let cloned_frame = frame.clone().with_pts(frame_timestamp);

if let Err(err) = drawtext_filter.try_push(cloned_frame) {
return Err(Error::new(err.to_string()));
}

while let Some(frame) = drawtext_filter.take()? {
encoder.push(frame)?;

while let Some(packet) = encoder.take()? {
muxer.push(packet.with_stream_index(0))?;
}
}

frame_idx += 1;
frame_timestamp = Timestamp::new(frame_idx, time_base);
}

drawtext_filter.flush()?;
while let Some(frame) = drawtext_filter.take()? {
encoder.push(frame)?;

while let Some(packet) = encoder.take()? {
muxer.push(packet.with_stream_index(0))?;
}
}

encoder.flush()?;

while let Some(packet) = encoder.take()? {
muxer.push(packet.with_stream_index(0))?;
}

muxer.flush()
}

fn main() {
let matches = App::new("encoding")
.arg(
Arg::with_name("output")
.required(true)
.takes_value(true)
.value_name("OUTPUT")
.help("Output file"),
)
.arg(
Arg::with_name("width")
.short("w")
.takes_value(true)
.value_name("WIDTH")
.help("width")
.default_value("640"),
)
.arg(
Arg::with_name("height")
.short("h")
.takes_value(true)
.value_name("HEIGHT")
.help("height")
.default_value("480"),
)
.arg(
Arg::with_name("duration")
.short("d")
.takes_value(true)
.value_name("DURATION")
.help("duration in seconds")
.default_value("10"),
)
.get_matches();

let output_filename = matches.value_of("output").unwrap();
let width = matches.value_of("width").unwrap().parse().unwrap();
let height = matches.value_of("height").unwrap().parse().unwrap();
let duration = matches.value_of("duration").unwrap().parse().unwrap();

let duration = Duration::from_secs_f32(duration);

if let Err(err) = encode_black_video_with_bitc(output_filename, width, height, duration) {
eprintln!("ERROR: {}", err);
}
}
31 changes: 31 additions & 0 deletions ac-ffmpeg/src/codec/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -991,3 +991,34 @@ pub trait Encoder {
/// Take the next packet from the encoder.
fn take(&mut self) -> Result<Option<Packet>, Error>;
}

pub trait Filter {
type Frame;

/// Push a given frame to the filter.
///
/// # Panics
/// The method panics if the operation is not expected (i.e. another
/// operation needs to be done).
fn push(&mut self, frame: Self::Frame) -> Result<(), Error> {
self.try_push(frame).map_err(|err| err.unwrap_inner())
}

/// Push a given frame to the filter.
fn try_push(&mut self, frame: Self::Frame) -> Result<(), CodecError>;

/// Flush the filter.
///
/// # Panics
/// The method panics if the operation is not expected (i.e. another
/// operation needs to be done).
fn flush(&mut self) -> Result<(), Error> {
self.try_flush().map_err(|err| err.unwrap_inner())
}

/// Flush the filter.
fn try_flush(&mut self) -> Result<(), CodecError>;

/// Take the next frame from the filter.
fn take(&mut self) -> Result<Option<Self::Frame>, Error>;
}
139 changes: 139 additions & 0 deletions ac-ffmpeg/src/codec/video/filter.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>

AVFilterGraph* ffw_filtergraph_new() {
AVFilterGraph* filter_graph = avfilter_graph_alloc();
return filter_graph;
operutka marked this conversation as resolved.
Show resolved Hide resolved
}

AVFilterContext* ffw_filtersource_new(AVFilterGraph* filter_graph, AVCodecParameters* codec_params, int tb_num, int tb_den) {
operutka marked this conversation as resolved.
Show resolved Hide resolved
char args[512];
AVFilterContext* buffersrc_ctx;
const AVFilter* buffersrc = avfilter_get_by_name("buffer");

/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
codec_params->width, codec_params->height, codec_params->format,
tb_num, tb_den,
codec_params->sample_aspect_ratio.num, codec_params->sample_aspect_ratio.den);
if (avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return NULL;
operutka marked this conversation as resolved.
Show resolved Hide resolved
}
if (buffersrc_ctx == NULL) {
av_log(NULL, AV_LOG_ERROR, "Buffer source initialisation failed\n");
return NULL;
}
operutka marked this conversation as resolved.
Show resolved Hide resolved

return buffersrc_ctx;
}

AVFilterContext* ffw_filtersink_new(AVFilterGraph* filter_graph) {
operutka marked this conversation as resolved.
Show resolved Hide resolved
AVFilterContext* buffersink_ctx;
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };

/* buffer video sink: to terminate the filter chain. */
int ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return NULL;
operutka marked this conversation as resolved.
Show resolved Hide resolved
}
if (buffersink_ctx == NULL) {
av_log(NULL, AV_LOG_ERROR, "Buffer sink initialisation failed\n");
return NULL;
}
operutka marked this conversation as resolved.
Show resolved Hide resolved

ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
operutka marked this conversation as resolved.
Show resolved Hide resolved
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
return NULL;
operutka marked this conversation as resolved.
Show resolved Hide resolved
}

return buffersink_ctx;
}

int ffw_filtergraph_init(AVFilterGraph* filter_graph,
AVFilterContext* buffersrc_ctx, AVFilterContext* buffersink_ctx,
const char* filters_descr) {
int ret = 0;
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();

/*
* Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr.
*/

/*
* The buffer source output must be connected to the input pad of
* the first filter described by filters_descr; since the first
* filter input label is not specified, it is set to "in" by
* default.
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;

/*
* The buffer sink input must be connected to the output pad of
* the last filter described by filters_descr; since the last
* filter output label is not specified, it is set to "out" by
* default.
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;

ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, &inputs, &outputs, NULL);
if (ret < 0) {
return ret;
}

ret = avfilter_graph_config(filter_graph, NULL);
if (ret < 0) {
return ret;
}

return ret;
operutka marked this conversation as resolved.
Show resolved Hide resolved
}

int ffw_filtergraph_push_frame(AVFilterContext* context, AVFrame* frame) {
int ret = av_buffersrc_add_frame(context, frame);

if (ret == 0 || ret == AVERROR_EOF) {
return 1;
}
else if (ret == AVERROR(EAGAIN)) {
return 0;
}

return ret;
Comment on lines +78 to +87
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if this is correct. The FFmpeg documentation does not say that av_buffersrc_add_frame would behave the same way as avcodec_send_frame. Is it possible that pushing one frame to a filter input can generate multiple frames at the filter output? Do we also have to consume all output frames before pushing the next input frame (i.e. is it possible that av_buffersrc_add_frame would return AVERROR(EAGAIN))?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thought I had addressed this one, actually I reverted the change for now as applying the respective change in the C code yields "all frames must be consumed before flushing" from CodecError

}

int ffw_filtergraph_take_frame(AVFilterContext* context, AVFrame** out) {
AVFrame* frame = av_frame_alloc();
int ret = av_buffersink_get_frame(context, frame);

if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
return 0;
}
else if (ret < 0) {
return ret;
}

*out = frame;

return 1;
}

void ffw_filtergraph_free(AVFilterGraph* filter_graph) {
avfilter_graph_free(&filter_graph);
}
Loading