mirror of
https://github.com/xiaoyifang/goldendict-ng.git
synced 2024-11-23 20:14:05 +00:00
feature: remove libao dependency and use QAudioSink(QAudioOutput) to play the pcm audio format
This commit is contained in:
parent
e3d904f8b8
commit
85aad0f80c
187
audiooutput.cpp
Normal file
187
audiooutput.cpp
Normal file
|
@ -0,0 +1,187 @@
|
|||
#include "audiooutput.h"
|
||||
|
||||
#include <QAudioFormat>
|
||||
#include <QDebug>
|
||||
#include <QtConcurrent/qtconcurrentrun.h>
|
||||
#include <QFuture>
|
||||
#include <QWaitCondition>
|
||||
#include <QCoreApplication>
|
||||
#include <QThreadPool>
|
||||
#if QT_VERSION < QT_VERSION_CHECK( 6, 0, 0 )
|
||||
#include <QAudioOutput>
|
||||
#else
|
||||
#include <QAudioSink>
|
||||
#endif
|
||||
#include <QtGlobal>
|
||||
#include <QBuffer>
|
||||
|
||||
static QAudioFormat format( int sampleRate, int channelCount )
|
||||
{
|
||||
QAudioFormat out;
|
||||
|
||||
out.setSampleRate( sampleRate );
|
||||
out.setChannelCount( 2 );
|
||||
#if QT_VERSION < QT_VERSION_CHECK( 6, 0, 0 )
|
||||
out.setByteOrder( QAudioFormat::LittleEndian );
|
||||
out.setCodec( QLatin1String( "audio/pcm" ) );
|
||||
#endif
|
||||
|
||||
#if QT_VERSION < QT_VERSION_CHECK( 6, 0, 0 )
|
||||
out.setSampleSize( 16 );
|
||||
out.setSampleType( QAudioFormat::SignedInt );
|
||||
#else
|
||||
out.setSampleFormat( QAudioFormat::Int16 );
|
||||
#endif
|
||||
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
class AudioOutputPrivate: public QIODevice
|
||||
{
|
||||
public:
|
||||
AudioOutputPrivate()
|
||||
{
|
||||
open( QIODevice::ReadOnly );
|
||||
threadPool.setMaxThreadCount( 1 );
|
||||
}
|
||||
|
||||
QFuture< void > audioPlayFuture;
|
||||
|
||||
#if QT_VERSION < QT_VERSION_CHECK( 6, 0, 0 )
|
||||
using AudioOutput = QAudioOutput;
|
||||
#else
|
||||
using AudioOutput = QAudioSink;
|
||||
#endif
|
||||
AudioOutput * audioOutput = nullptr;
|
||||
QByteArray buffer;
|
||||
qint64 offset = 0;
|
||||
bool quit = 0;
|
||||
QMutex mutex;
|
||||
QWaitCondition cond;
|
||||
QThreadPool threadPool;
|
||||
int sampleRate = 0;
|
||||
int channels = 0;
|
||||
|
||||
void setAudioFormat( int _sampleRate, int _channels )
|
||||
{
|
||||
sampleRate = _sampleRate;
|
||||
channels = _channels;
|
||||
}
|
||||
|
||||
qint64 readData( char * data, qint64 len ) override
|
||||
{
|
||||
if( !len )
|
||||
return 0;
|
||||
|
||||
QMutexLocker locker( &mutex );
|
||||
qint64 bytesWritten = 0;
|
||||
while( len && !quit )
|
||||
{
|
||||
if( buffer.isEmpty() )
|
||||
{
|
||||
// Wait for more frames
|
||||
if( bytesWritten == 0 )
|
||||
cond.wait( &mutex );
|
||||
if( buffer.isEmpty() )
|
||||
break;
|
||||
}
|
||||
|
||||
auto sampleData = buffer.data();
|
||||
const int toWrite = qMin( (qint64) buffer.size(), len );
|
||||
memcpy( &data[bytesWritten], sampleData, toWrite );
|
||||
buffer.remove( 0, toWrite );
|
||||
bytesWritten += toWrite;
|
||||
// data += toWrite;
|
||||
len -= toWrite;
|
||||
}
|
||||
|
||||
return bytesWritten;
|
||||
}
|
||||
|
||||
qint64 writeData( const char *, qint64 ) override { return 0; }
|
||||
qint64 size() const override { return buffer.size(); }
|
||||
qint64 bytesAvailable() const override { return buffer.size(); }
|
||||
bool isSequential() const override { return true; }
|
||||
bool atEnd() const override { return buffer.isEmpty(); }
|
||||
|
||||
void init( const QAudioFormat & fmt )
|
||||
{
|
||||
if( !audioOutput || ( fmt.isValid() && audioOutput->format() != fmt )
|
||||
|| audioOutput->state() == QAudio::StoppedState )
|
||||
{
|
||||
if( audioOutput )
|
||||
audioOutput->deleteLater();
|
||||
audioOutput = new AudioOutput( fmt );
|
||||
QObject::connect( audioOutput, &AudioOutput::stateChanged, audioOutput, [ & ]( QAudio::State state ) {
|
||||
switch( state )
|
||||
{
|
||||
case QAudio::StoppedState:
|
||||
if( audioOutput->error() != QAudio::NoError )
|
||||
qWarning() << "QAudioOutput stopped:" << audioOutput->error();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} );
|
||||
|
||||
audioOutput->start( this );
|
||||
}
|
||||
|
||||
// audioOutput->setVolume(volume);
|
||||
}
|
||||
|
||||
void doPlayAudio()
|
||||
{
|
||||
while( !quit )
|
||||
{
|
||||
QMutexLocker locker( &mutex );
|
||||
cond.wait( &mutex, 10 );
|
||||
auto fmt = sampleRate == 0 ? QAudioFormat() : format( sampleRate, channels );
|
||||
locker.unlock();
|
||||
if( fmt.isValid() )
|
||||
init( fmt );
|
||||
QCoreApplication::processEvents();
|
||||
}
|
||||
if( audioOutput )
|
||||
{
|
||||
audioOutput->stop();
|
||||
audioOutput->deleteLater();
|
||||
}
|
||||
audioOutput = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
AudioOutput::AudioOutput( QObject * parent ): QObject( parent ), d_ptr( new AudioOutputPrivate )
|
||||
{
|
||||
#if QT_VERSION < QT_VERSION_CHECK( 6, 0, 0 )
|
||||
d_ptr->audioPlayFuture = QtConcurrent::run( &d_ptr->threadPool, d_ptr.data(), &AudioOutputPrivate::doPlayAudio );
|
||||
#else
|
||||
d_ptr->audioPlayFuture = QtConcurrent::run( &d_ptr->threadPool, &AudioOutputPrivate::doPlayAudio, d_ptr.data() );
|
||||
#endif
|
||||
}
|
||||
|
||||
void AudioOutput::setAudioFormat( int sampleRate, int channels ) { d_ptr->setAudioFormat( sampleRate, channels ); }
|
||||
|
||||
AudioOutput::~AudioOutput()
|
||||
{
|
||||
Q_D( AudioOutput );
|
||||
d->quit = true;
|
||||
d->cond.wakeAll();
|
||||
d->audioPlayFuture.waitForFinished();
|
||||
}
|
||||
|
||||
bool AudioOutput::play( const uint8_t * data, qint64 len )
|
||||
{
|
||||
Q_D( AudioOutput );
|
||||
if( d->quit )
|
||||
return false;
|
||||
|
||||
QMutexLocker locker( &d->mutex );
|
||||
auto cuint = const_cast< uint8_t * >( data );
|
||||
auto cptr = reinterpret_cast< char * >( cuint );
|
||||
d->buffer.append( cptr, len );
|
||||
d->cond.wakeAll();
|
||||
|
||||
return true;
|
||||
}
|
25
audiooutput.h
Normal file
25
audiooutput.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
#ifndef AUDIOOUTPUT_H
|
||||
#define AUDIOOUTPUT_H
|
||||
|
||||
#include <QObject>
|
||||
#include <QScopedPointer>
|
||||
|
||||
class AudioOutputPrivate;
|
||||
class AudioOutput: public QObject
|
||||
{
|
||||
public:
|
||||
AudioOutput( QObject * parent = nullptr );
|
||||
~AudioOutput();
|
||||
|
||||
bool play( const uint8_t * data, qint64 len );
|
||||
void setAudioFormat( int sampleRate, int channels );
|
||||
protected:
|
||||
QScopedPointer< AudioOutputPrivate > d_ptr;
|
||||
|
||||
private:
|
||||
Q_DISABLE_COPY( AudioOutput )
|
||||
Q_DECLARE_PRIVATE( AudioOutput )
|
||||
};
|
||||
|
||||
|
||||
#endif // AUDIOOUTPUT_H
|
|
@ -224,7 +224,7 @@ public:
|
|||
private:
|
||||
#ifdef MAKE_FFMPEG_PLAYER
|
||||
static InternalPlayerBackend ffmpeg()
|
||||
{ return InternalPlayerBackend( "FFmpeg+libao" ); }
|
||||
{ return InternalPlayerBackend( "FFmpeg" ); }
|
||||
#endif
|
||||
|
||||
#ifdef MAKE_QTMULTIMEDIA_PLAYER
|
||||
|
|
197
ffmpegaudio.cc
197
ffmpegaudio.cc
|
@ -1,20 +1,11 @@
|
|||
#ifdef MAKE_FFMPEG_PLAYER
|
||||
|
||||
#include "audiooutput.h"
|
||||
#include "ffmpegaudio.hh"
|
||||
|
||||
#include <math.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef INT64_C
|
||||
#define INT64_C(c) (c ## LL)
|
||||
#endif
|
||||
|
||||
#ifndef UINT64_C
|
||||
#define UINT64_C(c) (c ## ULL)
|
||||
#endif
|
||||
|
||||
#include <ao/ao.h>
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
@ -27,7 +18,11 @@ extern "C" {
|
|||
#include <QDebug>
|
||||
|
||||
#include <vector>
|
||||
#if( QT_VERSION >= QT_VERSION_CHECK( 6, 2, 0 ) )
|
||||
#include <QMediaDevices>
|
||||
|
||||
#include <QAudioDevice>
|
||||
#endif
|
||||
#include "gddebug.hh"
|
||||
#include "utils.hh"
|
||||
|
||||
|
@ -53,13 +48,13 @@ AudioService & AudioService::instance()
|
|||
|
||||
AudioService::AudioService()
|
||||
{
|
||||
ao_initialize();
|
||||
// ao_initialize();
|
||||
}
|
||||
|
||||
AudioService::~AudioService()
|
||||
{
|
||||
emit cancelPlaying( true );
|
||||
ao_shutdown();
|
||||
// ao_shutdown();
|
||||
}
|
||||
|
||||
void AudioService::playMemory( const char * ptr, int size )
|
||||
|
@ -100,7 +95,8 @@ struct DecoderContext
|
|||
AVCodecContext * codecContext_;
|
||||
AVIOContext * avioContext_;
|
||||
AVStream * audioStream_;
|
||||
ao_device * aoDevice_;
|
||||
// ao_device * aoDevice_;
|
||||
AudioOutput * audioOutput;
|
||||
bool avformatOpened_;
|
||||
|
||||
SwrContext *swr_;
|
||||
|
@ -113,7 +109,7 @@ struct DecoderContext
|
|||
bool openOutputDevice( QString & errorString );
|
||||
void closeOutputDevice();
|
||||
bool play( QString & errorString );
|
||||
bool normalizeAudio( AVFrame * frame, vector<char> & samples );
|
||||
bool normalizeAudio( AVFrame * frame, vector<uint8_t> & samples );
|
||||
void playFrame( AVFrame * frame );
|
||||
};
|
||||
|
||||
|
@ -126,7 +122,7 @@ DecoderContext::DecoderContext( QByteArray const & audioData, QAtomicInt & isCan
|
|||
codecContext_( NULL ),
|
||||
avioContext_( NULL ),
|
||||
audioStream_( NULL ),
|
||||
aoDevice_( NULL ),
|
||||
audioOutput( new AudioOutput ),
|
||||
avformatOpened_( false ),
|
||||
swr_( NULL )
|
||||
{
|
||||
|
@ -243,15 +239,15 @@ bool DecoderContext::openCodec( QString & errorString )
|
|||
gdDebug( "Codec open: %s: channels: %d, rate: %d, format: %s\n", codec_->long_name,
|
||||
codecContext_->channels, codecContext_->sample_rate, av_get_sample_fmt_name( codecContext_->sample_fmt ) );
|
||||
|
||||
if ( codecContext_->sample_fmt == AV_SAMPLE_FMT_S32 ||
|
||||
codecContext_->sample_fmt == AV_SAMPLE_FMT_S32P ||
|
||||
codecContext_->sample_fmt == AV_SAMPLE_FMT_FLT ||
|
||||
codecContext_->sample_fmt == AV_SAMPLE_FMT_FLTP ||
|
||||
codecContext_->sample_fmt == AV_SAMPLE_FMT_DBL ||
|
||||
codecContext_->sample_fmt == AV_SAMPLE_FMT_DBLP )
|
||||
// if ( codecContext_->sample_fmt == AV_SAMPLE_FMT_S32 ||
|
||||
// codecContext_->sample_fmt == AV_SAMPLE_FMT_S32P ||
|
||||
// codecContext_->sample_fmt == AV_SAMPLE_FMT_FLT ||
|
||||
// codecContext_->sample_fmt == AV_SAMPLE_FMT_FLTP ||
|
||||
// codecContext_->sample_fmt == AV_SAMPLE_FMT_DBL ||
|
||||
// codecContext_->sample_fmt == AV_SAMPLE_FMT_DBLP )
|
||||
{
|
||||
swr_ = swr_alloc_set_opts( NULL,
|
||||
codecContext_->channel_layout,
|
||||
av_get_default_channel_layout(2),
|
||||
AV_SAMPLE_FMT_S16,
|
||||
codecContext_->sample_rate,
|
||||
codecContext_->channel_layout,
|
||||
|
@ -317,75 +313,25 @@ void DecoderContext::closeCodec()
|
|||
|
||||
bool DecoderContext::openOutputDevice( QString & errorString )
|
||||
{
|
||||
// Prepare for audio output
|
||||
int aoDriverId = ao_default_driver_id();
|
||||
ao_info * aoDrvInfo = ao_driver_info( aoDriverId );
|
||||
|
||||
if ( aoDriverId < 0 || !aoDrvInfo )
|
||||
{
|
||||
errorString = QObject::tr( "Cannot find usable audio output device." );
|
||||
return false;
|
||||
}
|
||||
|
||||
ao_sample_format aoSampleFormat;
|
||||
memset (&aoSampleFormat, 0, sizeof(aoSampleFormat) );
|
||||
aoSampleFormat.channels = codecContext_->channels;
|
||||
aoSampleFormat.rate = codecContext_->sample_rate;
|
||||
aoSampleFormat.byte_format = AO_FMT_NATIVE;
|
||||
aoSampleFormat.matrix = 0;
|
||||
aoSampleFormat.bits = qMin( 16, av_get_bytes_per_sample( codecContext_->sample_fmt ) << 3 );
|
||||
|
||||
if ( aoSampleFormat.bits == 0 )
|
||||
{
|
||||
errorString = QObject::tr( "Unsupported sample format." );
|
||||
return false;
|
||||
}
|
||||
|
||||
gdDebug( "ao_open_live(): %s: channels: %d, rate: %d, bits: %d\n",
|
||||
aoDrvInfo->name, aoSampleFormat.channels, aoSampleFormat.rate, aoSampleFormat.bits );
|
||||
|
||||
aoDevice_ = ao_open_live( aoDriverId, &aoSampleFormat, NULL );
|
||||
if ( !aoDevice_ )
|
||||
{
|
||||
errorString = QObject::tr( "ao_open_live() failed: " );
|
||||
|
||||
switch ( errno )
|
||||
{
|
||||
case AO_ENODRIVER:
|
||||
errorString += QObject::tr( "No driver." );
|
||||
break;
|
||||
case AO_ENOTLIVE:
|
||||
errorString += QObject::tr( "This driver is not a live output device." );
|
||||
break;
|
||||
case AO_EBADOPTION:
|
||||
errorString += QObject::tr( "A valid option key has an invalid value." );
|
||||
break;
|
||||
case AO_EOPENDEVICE:
|
||||
errorString += QObject::tr( "Cannot open the device: %1, channels: %2, rate: %3, bits: %4." )
|
||||
.arg( aoDrvInfo->short_name )
|
||||
.arg( aoSampleFormat.channels )
|
||||
.arg( aoSampleFormat.rate )
|
||||
.arg( aoSampleFormat.bits );
|
||||
break;
|
||||
default:
|
||||
errorString += QObject::tr( "Unknown error." );
|
||||
break;
|
||||
}
|
||||
|
||||
// only check device when qt version is greater than 6.2
|
||||
#if (QT_VERSION >= QT_VERSION_CHECK(6,2,0))
|
||||
QAudioDevice m_outputDevice = QMediaDevices::defaultAudioOutput();
|
||||
if(m_outputDevice.isNull()){
|
||||
errorString += QObject::tr( "Can not found default audio output device" );
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
audioOutput->setAudioFormat( codecContext_->sample_rate, codecContext_->channels );
|
||||
return true;
|
||||
}
|
||||
|
||||
void DecoderContext::closeOutputDevice()
|
||||
{
|
||||
// ao_close() is synchronous, it will wait until all audio streams flushed
|
||||
if ( aoDevice_ )
|
||||
{
|
||||
ao_close( aoDevice_ );
|
||||
aoDevice_ = NULL;
|
||||
}
|
||||
// if(audioOutput){
|
||||
// delete audioOutput;
|
||||
// audioOutput = 0;
|
||||
// }
|
||||
}
|
||||
|
||||
bool DecoderContext::play( QString & errorString )
|
||||
|
@ -440,79 +386,17 @@ bool DecoderContext::play( QString & errorString )
|
|||
return true;
|
||||
}
|
||||
|
||||
bool DecoderContext::normalizeAudio( AVFrame * frame, vector<char> & samples )
|
||||
bool DecoderContext::normalizeAudio( AVFrame * frame, vector<uint8_t > & samples )
|
||||
{
|
||||
int lineSize = 0;
|
||||
int dataSize = av_samples_get_buffer_size( &lineSize, codecContext_->channels,
|
||||
frame->nb_samples, codecContext_->sample_fmt, 1 );
|
||||
// int dataSize = av_samples_get_buffer_size( &lineSize, codecContext_->channels,
|
||||
// frame->nb_samples, codecContext_->sample_fmt, 1 );
|
||||
int dataSize = frame->nb_samples * 2 * 2;
|
||||
samples.resize( dataSize );
|
||||
uint8_t *data[2] = { 0 };
|
||||
data[0] = &samples.front(); //输出格式为AV_SAMPLE_FMT_S16(packet类型),所以转换后的LR两通道都存在data[0]中
|
||||
|
||||
// Portions from: https://code.google.com/p/lavfilters/source/browse/decoder/LAVAudio/LAVAudio.cpp
|
||||
// But this one use 8, 16, 32 bits integer, respectively.
|
||||
switch ( codecContext_->sample_fmt )
|
||||
{
|
||||
case AV_SAMPLE_FMT_U8:
|
||||
case AV_SAMPLE_FMT_S16:
|
||||
{
|
||||
samples.resize( dataSize );
|
||||
memcpy( &samples.front(), frame->data[0], lineSize );
|
||||
}
|
||||
break;
|
||||
// Planar
|
||||
case AV_SAMPLE_FMT_U8P:
|
||||
{
|
||||
samples.resize( dataSize );
|
||||
|
||||
uint8_t * out = ( uint8_t * )&samples.front();
|
||||
for ( int i = 0; i < frame->nb_samples; i++ )
|
||||
{
|
||||
for ( int ch = 0; ch < codecContext_->channels; ch++ )
|
||||
{
|
||||
*out++ = ( ( uint8_t * )frame->extended_data[ch] )[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AV_SAMPLE_FMT_S16P:
|
||||
{
|
||||
samples.resize( dataSize );
|
||||
|
||||
int16_t * out = ( int16_t * )&samples.front();
|
||||
for ( int i = 0; i < frame->nb_samples; i++ )
|
||||
{
|
||||
for ( int ch = 0; ch < codecContext_->channels; ch++ )
|
||||
{
|
||||
*out++ = ( ( int16_t * )frame->extended_data[ch] )[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AV_SAMPLE_FMT_S32:
|
||||
/* Pass through */
|
||||
case AV_SAMPLE_FMT_S32P:
|
||||
/* Pass through */
|
||||
case AV_SAMPLE_FMT_FLT:
|
||||
/* Pass through */
|
||||
case AV_SAMPLE_FMT_FLTP:
|
||||
/* Pass through */
|
||||
{
|
||||
samples.resize( dataSize / 2 );
|
||||
|
||||
uint8_t *out = ( uint8_t * )&samples.front();
|
||||
swr_convert( swr_, &out, frame->nb_samples, (const uint8_t**)frame->extended_data, frame->nb_samples );
|
||||
}
|
||||
break;
|
||||
case AV_SAMPLE_FMT_DBL:
|
||||
case AV_SAMPLE_FMT_DBLP:
|
||||
{
|
||||
samples.resize( dataSize / 4 );
|
||||
|
||||
uint8_t *out = ( uint8_t * )&samples.front();
|
||||
swr_convert( swr_, &out, frame->nb_samples, (const uint8_t**)frame->extended_data, frame->nb_samples );
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
swr_convert( swr_, data, frame->nb_samples, (const uint8_t**)frame->data, frame->nb_samples );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -522,9 +406,12 @@ void DecoderContext::playFrame( AVFrame * frame )
|
|||
if ( !frame )
|
||||
return;
|
||||
|
||||
vector<char> samples;
|
||||
vector<uint8_t> samples;
|
||||
if ( normalizeAudio( frame, samples ) )
|
||||
ao_play( aoDevice_, &samples.front(), samples.size() );
|
||||
{
|
||||
// ao_play( aoDevice_, &samples.front(), samples.size() );
|
||||
audioOutput->play(&samples.front(), samples.size());
|
||||
}
|
||||
}
|
||||
|
||||
DecoderThread::DecoderThread( QByteArray const & audioData, QObject * parent ) :
|
||||
|
|
|
@ -263,6 +263,7 @@ HEADERS += folding.hh \
|
|||
ankiconnector.h \
|
||||
article_inspect.h \
|
||||
articlewebpage.h \
|
||||
audiooutput.h \
|
||||
base/globalregex.hh \
|
||||
globalbroadcaster.h \
|
||||
headwordsmodel.h \
|
||||
|
@ -407,6 +408,7 @@ SOURCES += folding.cc \
|
|||
ankiconnector.cpp \
|
||||
article_inspect.cpp \
|
||||
articlewebpage.cpp \
|
||||
audiooutput.cpp \
|
||||
base/globalregex.cc \
|
||||
globalbroadcaster.cpp \
|
||||
headwordsmodel.cpp \
|
||||
|
|
Loading…
Reference in a new issue