- Notifications
You must be signed in to change notification settings - Fork50
Fix: different frame size as inputs of the filter graph#303
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.
Already on GitHub?Sign in to your account
Uh oh!
There was an error while loading.Please reload this page.
Changes fromall commits
2dc9d24
80017d6
cb8ae5d
3864d7e
053f9be
380f238
27cf448
82382fc
cbd4700
06b8b45
969c6c3
c823ad6
d2a7f54
2dcdc1c
343ef4e
a26bae9
9abcb90
0e49751
File filter
Filter by extension
Conversations
Uh oh!
There was an error while loading.Please reload this page.
Jump to
Uh oh!
There was an error while loading.Please reload this page.
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,6 @@ | ||
#include "FilterGraph.hpp" | ||
#include <AvTranscoder/util.hpp> | ||
#include <AvTranscoder/data/decoded/VideoFrame.hpp> | ||
extern "C" { | ||
@@ -16,6 +15,123 @@ extern "C" { | ||
namespace avtranscoder | ||
{ | ||
/****************** | ||
AudioFrameBuffer | ||
******************/ | ||
AudioFrameBuffer::AudioFrameBuffer(const AudioFrameDesc& audioFrameDesc) | ||
: _audioFrameDesc(audioFrameDesc) | ||
, _frameQueue() | ||
, _totalDataSize(0) | ||
, _positionInFrontFrame(0) | ||
{ | ||
} | ||
AudioFrameBuffer::~AudioFrameBuffer() | ||
{ | ||
for(size_t i = 0; i < _frameQueue.size(); ++i) | ||
popFrame(); | ||
} | ||
size_t AudioFrameBuffer::getBytesPerSample() | ||
{ | ||
return av_get_bytes_per_sample(_audioFrameDesc._sampleFormat); | ||
} | ||
void AudioFrameBuffer::addFrame(IFrame* frame) | ||
{ | ||
LOG_DEBUG("Add a new " << frame->getDataSize() << " bytes frame to frame buffer. New buffer size: " << _frameQueue.size() + 1); | ||
// Copy the input frame to store it into the queue | ||
AudioFrame* newAudioFrame = new AudioFrame(_audioFrameDesc, false); | ||
const size_t expectedNbSamples = frame->getDataSize() / (newAudioFrame->getNbChannels() * newAudioFrame->getBytesPerSample()); | ||
newAudioFrame->setNbSamplesPerChannel(expectedNbSamples); | ||
newAudioFrame->allocateData(); | ||
newAudioFrame->copyData(*frame); | ||
_totalDataSize += newAudioFrame->getDataSize(); | ||
_frameQueue.push(newAudioFrame); | ||
} | ||
void AudioFrameBuffer::popFrame() | ||
{ | ||
_frameQueue.pop(); | ||
LOG_DEBUG("Pop frame from buffer. Remaining frames in buffer: " << _frameQueue.size()); | ||
} | ||
IFrame* AudioFrameBuffer::getFrame(const size_t size) | ||
{ | ||
LOG_DEBUG("Get a " << size << " bytes frame from a " << _totalDataSize << " bytes frame buffer"); | ||
IFrame* next = _frameQueue.front(); | ||
const size_t nextFrameSize = next->getDataSize(); | ||
// If no expected size, or if the expected size equals the front frame of the queue (with no offset) | ||
if(size == 0 || (size == nextFrameSize && _positionInFrontFrame == 0)) | ||
{ | ||
// Directly return the front frame of the queue | ||
_totalDataSize -= nextFrameSize; | ||
popFrame(); | ||
return next; | ||
} | ||
// Create a new frame | ||
AudioFrame* newAudioFrame = new AudioFrame(_audioFrameDesc, false); | ||
const size_t expectedNbSamples = size / (newAudioFrame->getNbChannels() * newAudioFrame->getBytesPerSample()); | ||
newAudioFrame->setNbSamplesPerChannel(expectedNbSamples); | ||
newAudioFrame->allocateData(); | ||
// Concatenate frames data | ||
size_t extractedDataSize = 0; | ||
unsigned char* outputData = new unsigned char[size]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. Is this type mean that the supported data type is only 1byte per sample? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. No, it is actually the raw data in bytes, without considering the sample format or whatever. | ||
while(extractedDataSize != size && _frameQueue.size() != 0) | ||
{ | ||
// Get the front frame from queue | ||
next = _frameQueue.front(); | ||
size_t remainingDataInFrontFrame = next->getDataSize() - _positionInFrontFrame; | ||
// Compute the data size to get from the frame | ||
size_t dataToGet = size - extractedDataSize; | ||
if(dataToGet > remainingDataInFrontFrame) | ||
dataToGet = remainingDataInFrontFrame; | ||
// Copy the data from the frame to temporal buffer | ||
for(size_t i = 0; i < dataToGet; i++) | ||
outputData[extractedDataSize++] = next->getData()[0][_positionInFrontFrame + i]; | ||
if(dataToGet < remainingDataInFrontFrame) | ||
{ | ||
// Set new position into front frame | ||
_positionInFrontFrame += dataToGet; | ||
} | ||
else | ||
{ | ||
// The whole front frame has been read, so pop it from queue | ||
popFrame(); | ||
_positionInFrontFrame = 0; | ||
} | ||
} | ||
_totalDataSize -= extractedDataSize; | ||
newAudioFrame->assignBuffer(outputData); | ||
return newAudioFrame; | ||
} | ||
IFrame* AudioFrameBuffer::getFrameSampleNb(const size_t sampleNb) | ||
{ | ||
const size_t expectedSize = sampleNb * getBytesPerSample(); | ||
return getFrame(expectedSize); | ||
} | ||
/****************** | ||
FilterGraph | ||
******************/ | ||
FilterGraph::FilterGraph(const ICodec& codec) | ||
: _graph(avfilter_graph_alloc()) | ||
, _filters() | ||
@@ -28,31 +144,147 @@ FilterGraph::FilterGraph(const ICodec& codec) | ||
FilterGraph::~FilterGraph() | ||
{ | ||
_inputAudioFrameBuffers.clear(); | ||
for(std::vector<Filter*>::iterator it = _filters.begin(); it < _filters.end(); ++it) | ||
{ | ||
delete(*it); | ||
} | ||
avfilter_graph_free(&_graph); | ||
} | ||
size_t FilterGraph::getAvailableFrameSize(const std::vector<IFrame*>& inputs, const size_t& index) | ||
{ | ||
size_t frameSize = inputs.at(index)->getDataSize(); | ||
if(frameSize == 0) | ||
frameSize = _inputAudioFrameBuffers.at(index).getDataSize(); | ||
return frameSize; | ||
} | ||
size_t FilterGraph::getAvailableFrameSamplesNb(const std::vector<IFrame*>& inputs, const size_t& index) | ||
{ | ||
if(_inputAudioFrameBuffers.empty()) | ||
throw std::runtime_error("Cannot compute filter graph input samples number for non-audio frames."); | ||
const size_t bytesPerSample = _inputAudioFrameBuffers.at(index).getBytesPerSample(); | ||
const size_t availableSamplesNb = getAvailableFrameSize(inputs, index) / bytesPerSample; | ||
return availableSamplesNb; | ||
} | ||
size_t FilterGraph::getMinInputFrameSamplesNb(const std::vector<IFrame*>& inputs) | ||
{ | ||
if(!inputs.size()) | ||
return 0; | ||
size_t minFrameSamplesNb = getAvailableFrameSamplesNb(inputs, 0); | ||
for(size_t index = 1; index < inputs.size(); ++index) | ||
{ | ||
const size_t availableFrameSampleNb = getAvailableFrameSamplesNb(inputs, index); | ||
if(minFrameSamplesNb > availableFrameSampleNb) | ||
minFrameSamplesNb = availableFrameSampleNb; | ||
} | ||
return minFrameSamplesNb; | ||
} | ||
bool FilterGraph::hasBufferedFrames() | ||
{ | ||
if(!_inputAudioFrameBuffers.size()) | ||
return false; | ||
for(std::vector<AudioFrameBuffer>::iterator it = _inputAudioFrameBuffers.begin(); it != _inputAudioFrameBuffers.end(); ++it) | ||
{ | ||
if(it->isEmpty()) | ||
return false; | ||
} | ||
return true; | ||
} | ||
bool FilterGraph::hasBufferedFrames(const size_t index) | ||
{ | ||
if(index >= _inputAudioFrameBuffers.size()) | ||
return false; | ||
return !_inputAudioFrameBuffers.at(index).isEmpty(); | ||
} | ||
bool FilterGraph::areInputFrameSizesEqual(const std::vector<IFrame*>& inputs) | ||
{ | ||
if(!inputs.size() || inputs.size() == 1) | ||
return true; | ||
const int frameSize = inputs.at(0)->getDataSize(); | ||
for(size_t index = 1; index < inputs.size(); ++index) | ||
{ | ||
if(frameSize != inputs.at(index)->getDataSize()) | ||
{ | ||
if(_inputAudioFrameBuffers.empty()) | ||
return false; | ||
else | ||
{ | ||
const size_t refSampleNb = frameSize / _inputAudioFrameBuffers.at(0).getBytesPerSample(); | ||
const size_t sampleNb = inputs.at(index)->getDataSize() / _inputAudioFrameBuffers.at(index).getBytesPerSample(); | ||
return (refSampleNb == sampleNb); | ||
} | ||
} | ||
} | ||
return true; | ||
} | ||
bool FilterGraph::areFrameBuffersEmpty() | ||
{ | ||
if(!_inputAudioFrameBuffers.size()) | ||
return true; | ||
for(std::vector<AudioFrameBuffer>::iterator it = _inputAudioFrameBuffers.begin(); it != _inputAudioFrameBuffers.end(); ++it) | ||
{ | ||
if(!it->isEmpty()) | ||
return false; | ||
} | ||
return true; | ||
} | ||
void FilterGraph::process(const std::vector<IFrame*>& inputs, IFrame& output) | ||
{ | ||
//Init the filter graph | ||
if(!_isInit) | ||
init(inputs, output); | ||
// Check whether we can bypass the input audio buffers | ||
const bool bypassBuffers = _inputAudioFrameBuffers.empty() || (areInputFrameSizesEqual(inputs) && areFrameBuffersEmpty()); | ||
size_t minInputFrameSamplesNb = 0; | ||
if(!bypassBuffers) | ||
{ | ||
// Fill the frame buffer with inputs | ||
for(size_t index = 0; index < inputs.size(); ++index) | ||
{ | ||
if(!inputs.at(index)->getDataSize()) | ||
{ | ||
LOG_DEBUG("Empty frame from filter graph input " << index << ". Remaining audio frames in buffer: " << _inputAudioFrameBuffers.at(index).getBufferSize()); | ||
continue; | ||
} | ||
_inputAudioFrameBuffers.at(index).addFrame(inputs.at(index)); | ||
} | ||
// Get the minimum input frames size | ||
minInputFrameSamplesNb = getMinInputFrameSamplesNb(inputs); | ||
} | ||
// Setup input frames into the filter graph | ||
for(size_t index = 0; index < inputs.size(); ++index) | ||
{ | ||
// Retrieve frame from buffer or directly from input | ||
IFrame* inputFrame = (bypassBuffers)? inputs.at(index) : _inputAudioFrameBuffers.at(index).getFrameSampleNb(minInputFrameSamplesNb); | ||
const int ret = av_buffersrc_add_frame_flags(_filters.at(index)->getAVFilterContext(), &inputFrame->getAVFrame(), AV_BUFFERSRC_FLAG_PUSH); | ||
if(ret < 0) | ||
{ | ||
throw std::runtime_error("Error when adding a frame to the source buffer used to start to process filters: " + | ||
getDescriptionFromErrorCode(ret)); | ||
} | ||
} | ||
//Pull filtered data from the filter graph | ||
for(;;) | ||
{ | ||
const int ret = av_buffersink_get_frame(_filters.at(_filters.size() - 1)->getAVFilterContext(), &output.getAVFrame()); | ||
@@ -150,6 +382,11 @@ void FilterGraph::addInBuffer(const std::vector<IFrame*>& inputs) | ||
filterOptions << "sample_rate=" << audioFrame->getSampleRate() << ":"; | ||
filterOptions << "sample_fmt=" << getSampleFormatName(audioFrame->getSampleFormat()) << ":"; | ||
filterOptions << "channel_layout=0x" << std::hex << audioFrame->getChannelLayout(); | ||
const AudioFrameDesc audioFrameDesc(audioFrame->getSampleRate(), | ||
audioFrame->getNbChannels(), | ||
getSampleFormatName(audioFrame->getSampleFormat())); | ||
_inputAudioFrameBuffers.insert(_inputAudioFrameBuffers.begin(), AudioFrameBuffer(audioFrameDesc)); | ||
} | ||
// video frame | ||
else if((*it)->isVideoFrame()) | ||
Uh oh!
There was an error while loading.Please reload this page.