New code style refactor

This commit is contained in:
Jack Andersen
2018-12-07 19:17:51 -10:00
parent 2c2c72bfd1
commit 058ea23a00
113 changed files with 23305 additions and 27650 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -2,295 +2,228 @@
#include "AudioVoiceEngine.hpp"
#include <cstring>
namespace boo
{
namespace boo {
void AudioMatrixMono::setDefaultMatrixCoefficients(AudioChannelSet acSet)
{
m_curSlewFrame = 0;
m_slewFrames = 0;
memset(&m_coefs, 0, sizeof(m_coefs));
switch (acSet)
{
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)] = 1.0;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontCenter)] = 1.0;
break;
default: break;
void AudioMatrixMono::setDefaultMatrixCoefficients(AudioChannelSet acSet) {
m_curSlewFrame = 0;
m_slewFrames = 0;
memset(&m_coefs, 0, sizeof(m_coefs));
switch (acSet) {
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)] = 1.0;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontCenter)] = 1.0;
break;
default:
break;
}
}
int16_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const int16_t* dataIn,
int16_t* dataOut, size_t samples) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t s = 0; s < samples; ++s, ++dataIn) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
}
}
return dataOut;
}
int16_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const int16_t* dataIn, int16_t* dataOut, size_t samples)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t s=0 ; s<samples ; ++s, ++dataIn)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
int32_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const int32_t* dataIn,
int32_t* dataOut, size_t samples) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t s = 0; s < samples; ++s, ++dataIn) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
}
return dataOut;
}
return dataOut;
}
int32_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const int32_t* dataIn, int32_t* dataOut, size_t samples)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t s=0 ; s<samples ; ++s, ++dataIn)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
float* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const float* dataIn, float* dataOut,
size_t samples) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t s = 0; s < samples; ++s, ++dataIn) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt);
++dataOut;
}
}
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + *dataIn * m_coefs.v[int(ch)];
++dataOut;
}
}
}
return dataOut;
}
return dataOut;
}
float* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const float* dataIn, float* dataOut, size_t samples)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t s=0 ; s<samples ; ++s, ++dataIn)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
void AudioMatrixStereo::setDefaultMatrixCoefficients(AudioChannelSet acSet) {
m_curSlewFrame = 0;
m_slewFrames = 0;
memset(&m_coefs, 0, sizeof(m_coefs));
switch (acSet) {
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
break;
default:
break;
}
}
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt);
++dataOut;
}
}
int16_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const int16_t* dataIn,
int16_t* dataOut, size_t frames) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t f = 0; f < frames; ++f, dataIn += 2) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
++m_curSlewFrame;
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + *dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut + *dataIn * m_coefs.v[int(ch)];
++dataOut;
}
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + dataIn[0] * m_coefs.v[int(ch)][0] + dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
}
return dataOut;
}
return dataOut;
}
void AudioMatrixStereo::setDefaultMatrixCoefficients(AudioChannelSet acSet)
{
m_curSlewFrame = 0;
m_slewFrames = 0;
memset(&m_coefs, 0, sizeof(m_coefs));
switch (acSet)
{
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
break;
default: break;
int32_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const int32_t* dataIn,
int32_t* dataOut, size_t frames) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t f = 0; f < frames; ++f, dataIn += 2) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + *dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + dataIn[0] * m_coefs.v[int(ch)][0] + dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
}
}
return dataOut;
}
int16_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const int16_t* dataIn, int16_t* dataOut, size_t frames)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t f=0 ; f<frames ; ++f, dataIn += 2)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
float* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const float* dataIn, float* dataOut,
size_t frames) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t f = 0; f < frames; ++f, dataIn += 2) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut +
*dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
}
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + *dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt);
++dataOut;
}
}
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut +
dataIn[0] * m_coefs.v[int(ch)][0] +
dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + dataIn[0] * m_coefs.v[int(ch)][0] + dataIn[1] * m_coefs.v[int(ch)][1];
++dataOut;
}
}
}
return dataOut;
}
int32_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const int32_t* dataIn, int32_t* dataOut, size_t frames)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t f=0 ; f<frames ; ++f, dataIn += 2)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut +
*dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
}
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut +
dataIn[0] * m_coefs.v[int(ch)][0] +
dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
}
}
return dataOut;
}
float* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const float* dataIn, float* dataOut, size_t frames)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t f=0 ; f<frames ; ++f, dataIn += 2)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut +
*dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt);
++dataOut;
}
}
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut +
dataIn[0] * m_coefs.v[int(ch)][0] +
dataIn[1] * m_coefs.v[int(ch)][1];
++dataOut;
}
}
}
}
return dataOut;
}
}
return dataOut;
}
} // namespace boo

View File

@@ -10,155 +10,138 @@
#include <immintrin.h>
#endif
namespace boo
{
namespace boo {
struct AudioVoiceEngineMixInfo;
static inline int16_t Clamp16(float in)
{
if (in < SHRT_MIN)
return SHRT_MIN;
else if (in > SHRT_MAX)
return SHRT_MAX;
return in;
static inline int16_t Clamp16(float in) {
if (in < SHRT_MIN)
return SHRT_MIN;
else if (in > SHRT_MAX)
return SHRT_MAX;
return in;
}
static inline int32_t Clamp32(float in)
{
if (in < INT_MIN)
return INT_MIN;
else if (in > INT_MAX)
return INT_MAX;
return in;
static inline int32_t Clamp32(float in) {
if (in < INT_MIN)
return INT_MIN;
else if (in > INT_MAX)
return INT_MAX;
return in;
}
class AudioMatrixMono
{
union Coefs
{
float v[8];
class AudioMatrixMono {
union Coefs {
float v[8];
#if __SSE__
__m128 q[2];
__m64 d[4];
__m128 q[2];
__m64 d[4];
#endif
};
Coefs m_coefs = {};
Coefs m_oldCoefs = {};
size_t m_slewFrames = 0;
size_t m_curSlewFrame = ~size_t(0);
};
Coefs m_coefs = {};
Coefs m_oldCoefs = {};
size_t m_slewFrames = 0;
size_t m_curSlewFrame = ~size_t(0);
public:
AudioMatrixMono() {setDefaultMatrixCoefficients(AudioChannelSet::Stereo);}
AudioMatrixMono() { setDefaultMatrixCoefficients(AudioChannelSet::Stereo); }
void setDefaultMatrixCoefficients(AudioChannelSet acSet);
void setMatrixCoefficients(const float coefs[8], size_t slewFrames=0)
{
m_slewFrames = slewFrames;
void setDefaultMatrixCoefficients(AudioChannelSet acSet);
void setMatrixCoefficients(const float coefs[8], size_t slewFrames = 0) {
m_slewFrames = slewFrames;
#if __SSE__
if (m_curSlewFrame != 0)
{
m_oldCoefs.q[0] = m_coefs.q[0];
m_oldCoefs.q[1] = m_coefs.q[1];
}
m_coefs.q[0] = _mm_loadu_ps(coefs);
m_coefs.q[1] = _mm_loadu_ps(&coefs[4]);
if (m_curSlewFrame != 0) {
m_oldCoefs.q[0] = m_coefs.q[0];
m_oldCoefs.q[1] = m_coefs.q[1];
}
m_coefs.q[0] = _mm_loadu_ps(coefs);
m_coefs.q[1] = _mm_loadu_ps(&coefs[4]);
#else
for (int i=0 ; i<8 ; ++i)
{
if (m_curSlewFrame != 0)
m_oldCoefs.v[i] = m_coefs.v[i];
m_coefs.v[i] = coefs[i];
}
for (int i = 0; i < 8; ++i) {
if (m_curSlewFrame != 0)
m_oldCoefs.v[i] = m_coefs.v[i];
m_coefs.v[i] = coefs[i];
}
#endif
m_curSlewFrame = 0;
}
m_curSlewFrame = 0;
}
int16_t* mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const int16_t* dataIn, int16_t* dataOut, size_t samples);
int32_t* mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const int32_t* dataIn, int32_t* dataOut, size_t samples);
float* mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const float* dataIn, float* dataOut, size_t samples);
int16_t* mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const int16_t* dataIn, int16_t* dataOut,
size_t samples);
int32_t* mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const int32_t* dataIn, int32_t* dataOut,
size_t samples);
float* mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const float* dataIn, float* dataOut, size_t samples);
bool isSilent() const
{
if (m_curSlewFrame < m_slewFrames)
for (int i=0 ; i<8 ; ++i)
if (m_oldCoefs.v[i] > FLT_EPSILON)
return false;
for (int i=0 ; i<8 ; ++i)
if (m_coefs.v[i] > FLT_EPSILON)
return false;
return true;
}
bool isSilent() const {
if (m_curSlewFrame < m_slewFrames)
for (int i = 0; i < 8; ++i)
if (m_oldCoefs.v[i] > FLT_EPSILON)
return false;
for (int i = 0; i < 8; ++i)
if (m_coefs.v[i] > FLT_EPSILON)
return false;
return true;
}
};
class AudioMatrixStereo
{
union Coefs
{
float v[8][2];
class AudioMatrixStereo {
union Coefs {
float v[8][2];
#if __SSE__
__m128 q[4];
__m64 d[8];
__m128 q[4];
__m64 d[8];
#endif
};
Coefs m_coefs = {};
Coefs m_oldCoefs = {};
size_t m_slewFrames = 0;
size_t m_curSlewFrame = ~size_t(0);
};
Coefs m_coefs = {};
Coefs m_oldCoefs = {};
size_t m_slewFrames = 0;
size_t m_curSlewFrame = ~size_t(0);
public:
AudioMatrixStereo() {setDefaultMatrixCoefficients(AudioChannelSet::Stereo);}
AudioMatrixStereo() { setDefaultMatrixCoefficients(AudioChannelSet::Stereo); }
void setDefaultMatrixCoefficients(AudioChannelSet acSet);
void setMatrixCoefficients(const float coefs[8][2], size_t slewFrames=0)
{
m_slewFrames = slewFrames;
void setDefaultMatrixCoefficients(AudioChannelSet acSet);
void setMatrixCoefficients(const float coefs[8][2], size_t slewFrames = 0) {
m_slewFrames = slewFrames;
#if __SSE__
if (m_curSlewFrame != 0)
{
m_oldCoefs.q[0] = m_coefs.q[0];
m_oldCoefs.q[1] = m_coefs.q[1];
m_oldCoefs.q[2] = m_coefs.q[2];
m_oldCoefs.q[3] = m_coefs.q[3];
}
m_coefs.q[0] = _mm_loadu_ps(coefs[0]);
m_coefs.q[1] = _mm_loadu_ps(coefs[2]);
m_coefs.q[2] = _mm_loadu_ps(coefs[4]);
m_coefs.q[3] = _mm_loadu_ps(coefs[6]);
if (m_curSlewFrame != 0) {
m_oldCoefs.q[0] = m_coefs.q[0];
m_oldCoefs.q[1] = m_coefs.q[1];
m_oldCoefs.q[2] = m_coefs.q[2];
m_oldCoefs.q[3] = m_coefs.q[3];
}
m_coefs.q[0] = _mm_loadu_ps(coefs[0]);
m_coefs.q[1] = _mm_loadu_ps(coefs[2]);
m_coefs.q[2] = _mm_loadu_ps(coefs[4]);
m_coefs.q[3] = _mm_loadu_ps(coefs[6]);
#else
for (int i=0 ; i<8 ; ++i)
{
if (m_curSlewFrame != 0)
{
m_oldCoefs.v[i][0] = m_coefs.v[i][0];
m_oldCoefs.v[i][1] = m_coefs.v[i][1];
}
m_coefs.v[i][0] = coefs[i][0];
m_coefs.v[i][1] = coefs[i][1];
}
for (int i = 0; i < 8; ++i) {
if (m_curSlewFrame != 0) {
m_oldCoefs.v[i][0] = m_coefs.v[i][0];
m_oldCoefs.v[i][1] = m_coefs.v[i][1];
}
m_coefs.v[i][0] = coefs[i][0];
m_coefs.v[i][1] = coefs[i][1];
}
#endif
m_curSlewFrame = 0;
}
m_curSlewFrame = 0;
}
int16_t* mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const int16_t* dataIn, int16_t* dataOut, size_t frames);
int32_t* mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const int32_t* dataIn, int32_t* dataOut, size_t frames);
float* mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const float* dataIn, float* dataOut, size_t frames);
int16_t* mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const int16_t* dataIn, int16_t* dataOut,
size_t frames);
int32_t* mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const int32_t* dataIn, int32_t* dataOut,
size_t frames);
float* mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const float* dataIn, float* dataOut, size_t frames);
bool isSilent() const
{
if (m_curSlewFrame < m_slewFrames)
for (int i=0 ; i<8 ; ++i)
if (m_oldCoefs.v[i][0] > FLT_EPSILON || m_oldCoefs.v[i][1] > FLT_EPSILON)
return false;
for (int i=0 ; i<8 ; ++i)
if (m_coefs.v[i][0] > FLT_EPSILON || m_coefs.v[i][1] > FLT_EPSILON)
return false;
return true;
}
bool isSilent() const {
if (m_curSlewFrame < m_slewFrames)
for (int i = 0; i < 8; ++i)
if (m_oldCoefs.v[i][0] > FLT_EPSILON || m_oldCoefs.v[i][1] > FLT_EPSILON)
return false;
for (int i = 0; i < 8; ++i)
if (m_coefs.v[i][0] > FLT_EPSILON || m_coefs.v[i][1] > FLT_EPSILON)
return false;
return true;
}
};
}
} // namespace boo

View File

@@ -4,531 +4,440 @@
#include <immintrin.h>
namespace boo
{
namespace boo {
typedef union
{
float v[4];
typedef union {
float v[4];
#if __SSE__
__m128 q;
__m64 d[2];
__m128 q;
__m64 d[2];
#endif
} TVectorUnion;
static constexpr TVectorUnion Min32Vec = {{INT32_MIN, INT32_MIN, INT32_MIN, INT32_MIN}};
static constexpr TVectorUnion Max32Vec = {{INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX}};
void AudioMatrixMono::setDefaultMatrixCoefficients(AudioChannelSet acSet)
{
m_curSlewFrame = 0;
m_slewFrames = 0;
m_coefs.q[0] = _mm_xor_ps(m_coefs.q[0], m_coefs.q[0]);
m_coefs.q[1] = _mm_xor_ps(m_coefs.q[1], m_coefs.q[1]);
switch (acSet)
{
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)] = 1.0;
void AudioMatrixMono::setDefaultMatrixCoefficients(AudioChannelSet acSet) {
m_curSlewFrame = 0;
m_slewFrames = 0;
m_coefs.q[0] = _mm_xor_ps(m_coefs.q[0], m_coefs.q[0]);
m_coefs.q[1] = _mm_xor_ps(m_coefs.q[1], m_coefs.q[1]);
switch (acSet) {
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)] = 1.0;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontCenter)] = 1.0;
break;
default:
break;
}
}
int16_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const int16_t* dataIn,
int16_t* dataOut, size_t samples) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t s = 0; s < samples; ++s, ++dataIn) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
}
}
return dataOut;
}
int32_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const int32_t* dataIn,
int32_t* dataOut, size_t samples) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t s = 0; s < samples; ++s, ++dataIn) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
float t = m_curSlewFrame / float(m_slewFrames);
float omt = 1.f - t;
switch (chmap.m_channelCount) {
case 2: {
++m_curSlewFrame;
float t2 = m_curSlewFrame / float(m_slewFrames);
float omt2 = 1.f - t2;
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(
_mm_mul_ps(_mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)), _mm_set_ps(t, t, t2, t2)),
_mm_mul_ps(_mm_shuffle_ps(m_oldCoefs.q[0], m_oldCoefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)),
_mm_set_ps(omt, omt, omt2, omt2)));
samps.q = _mm_cvtepi32_ps(_mm_set_epi32(dataIn[1], dataIn[0], dataIn[1], dataIn[0]));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
++s;
++dataIn;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontCenter)] = 1.0;
}
case 4: {
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[0], _mm_set1_ps(t)), _mm_mul_ps(m_oldCoefs.q[0], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
default: break;
}
}
}
case 6: {
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[0], _mm_set1_ps(t)), _mm_mul_ps(m_oldCoefs.q[0], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
int16_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const int16_t* dataIn, int16_t* dataOut, size_t samples)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t s=0 ; s<samples ; ++s, ++dataIn)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
dataOut += 4;
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
}
}
return dataOut;
}
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[1], _mm_set1_ps(t)), _mm_mul_ps(m_oldCoefs.q[1], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
int32_t* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const int32_t* dataIn, int32_t* dataOut, size_t samples)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t s=0 ; s<samples ; ++s, ++dataIn)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
float t = m_curSlewFrame / float(m_slewFrames);
float omt = 1.f - t;
out = reinterpret_cast<__m128i*>(dataOut);
__m128i loadOut = _mm_loadu_si128(out);
pre = _mm_add_ps(_mm_cvtepi32_ps(loadOut), _mm_mul_ps(coefs.q, samps.q));
_mm_storel_epi64(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
switch (chmap.m_channelCount)
{
case 2:
{
++m_curSlewFrame;
float t2 = m_curSlewFrame / float(m_slewFrames);
float omt2 = 1.f - t2;
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(_mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)),
_mm_set_ps(t, t, t2, t2)),
_mm_mul_ps(_mm_shuffle_ps(m_oldCoefs.q[0], m_oldCoefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)),
_mm_set_ps(omt, omt, omt2, omt2)));
samps.q = _mm_cvtepi32_ps(_mm_set_epi32(dataIn[1], dataIn[0], dataIn[1], dataIn[0]));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
++s;
++dataIn;
break;
}
case 4:
{
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[0], _mm_set1_ps(t)),
_mm_mul_ps(m_oldCoefs.q[0], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
}
case 6:
{
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[0], _mm_set1_ps(t)),
_mm_mul_ps(m_oldCoefs.q[0], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[1], _mm_set1_ps(t)),
_mm_mul_ps(m_oldCoefs.q[1], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
__m128i loadOut = _mm_loadu_si128(out);
pre = _mm_add_ps(_mm_cvtepi32_ps(loadOut), _mm_mul_ps(coefs.q, samps.q));
_mm_storel_epi64(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 2;
break;
}
case 8:
{
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[0], _mm_set1_ps(t)),
_mm_mul_ps(m_oldCoefs.q[0], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[1], _mm_set1_ps(t)),
_mm_mul_ps(m_oldCoefs.q[1], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
}
default:
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
break;
}
}
++m_curSlewFrame;
}
else
{
switch (chmap.m_channelCount)
{
case 2:
{
TVectorUnion coefs, samps;
coefs.q = _mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0));
samps.q = _mm_cvtepi32_ps(_mm_set_epi32(dataIn[1], dataIn[0], dataIn[1], dataIn[0]));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128i huh2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(out));
__m128 huh3 = _mm_cvtepi32_ps(huh2);
__m128 pre = _mm_add_ps(huh3, _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
++s;
++dataIn;
break;
}
case 4:
{
TVectorUnion samps;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[0], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
}
case 6:
{
TVectorUnion samps;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[0], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
__m128i loadOut = _mm_loadu_si128(out);
pre = _mm_add_ps(_mm_cvtepi32_ps(loadOut), _mm_mul_ps(m_coefs.q[1], samps.q));
_mm_storel_epi64(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 2;
break;
}
case 8:
{
TVectorUnion samps;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[0], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[1], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
}
default:
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
break;
}
}
}
}
return dataOut;
}
float* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info,
const float* dataIn, float* dataOut, size_t samples)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t s=0 ; s<samples ; ++s, ++dataIn)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
float t = m_curSlewFrame / float(m_slewFrames);
float omt = 1.f - t;
switch (chmap.m_channelCount)
{
case 2:
{
++m_curSlewFrame;
float t2 = m_curSlewFrame / float(m_slewFrames);
float omt2 = 1.f - t2;
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(_mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)),
_mm_set_ps(t, t, t2, t2)),
_mm_mul_ps(_mm_shuffle_ps(m_oldCoefs.q[0], m_oldCoefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)),
_mm_set_ps(omt, omt, omt2, omt2)));
samps.q = _mm_loadu_ps(dataIn);
samps.q = _mm_shuffle_ps(samps.q, samps.q, _MM_SHUFFLE(1, 0, 1, 0));
__m128 pre = _mm_add_ps(_mm_loadu_ps(dataOut), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_ps(dataOut, pre);
dataOut += 4;
++s;
++dataIn;
break;
}
default:
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt);
++dataOut;
}
}
break;
}
}
++m_curSlewFrame;
}
else
{
switch (chmap.m_channelCount)
{
case 2:
{
TVectorUnion coefs, samps;
coefs.q = _mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0));
samps.q = _mm_loadu_ps(dataIn);
samps.q = _mm_shuffle_ps(samps.q, samps.q, _MM_SHUFFLE(1, 0, 1, 0));
__m128 pre = _mm_add_ps(_mm_loadu_ps(dataOut), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_ps(dataOut, pre);
dataOut += 4;
++s;
++dataIn;
break;
}
default:
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut + *dataIn * m_coefs.v[int(ch)];
++dataOut;
}
}
break;
}
}
}
}
return dataOut;
}
void AudioMatrixStereo::setDefaultMatrixCoefficients(AudioChannelSet acSet)
{
m_curSlewFrame = 0;
m_slewFrames = 0;
m_coefs.q[0] = _mm_xor_ps(m_coefs.q[0], m_coefs.q[0]);
m_coefs.q[1] = _mm_xor_ps(m_coefs.q[1], m_coefs.q[1]);
m_coefs.q[2] = _mm_xor_ps(m_coefs.q[2], m_coefs.q[2]);
m_coefs.q[3] = _mm_xor_ps(m_coefs.q[3], m_coefs.q[3]);
switch (acSet)
{
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
dataOut += 2;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
}
case 8: {
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[0], _mm_set1_ps(t)), _mm_mul_ps(m_oldCoefs.q[0], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
coefs.q = _mm_add_ps(_mm_mul_ps(m_coefs.q[1], _mm_set1_ps(t)), _mm_mul_ps(m_oldCoefs.q[1], _mm_set1_ps(omt)));
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
default: break;
}
default: {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt));
++dataOut;
}
}
break;
}
}
++m_curSlewFrame;
} else {
switch (chmap.m_channelCount) {
case 2: {
TVectorUnion coefs, samps;
coefs.q = _mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0));
samps.q = _mm_cvtepi32_ps(_mm_set_epi32(dataIn[1], dataIn[0], dataIn[1], dataIn[0]));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128i huh2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(out));
__m128 huh3 = _mm_cvtepi32_ps(huh2);
__m128 pre = _mm_add_ps(huh3, _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
++s;
++dataIn;
break;
}
case 4: {
TVectorUnion samps;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[0], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
}
case 6: {
TVectorUnion samps;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[0], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
__m128i loadOut = _mm_loadu_si128(out);
pre = _mm_add_ps(_mm_cvtepi32_ps(loadOut), _mm_mul_ps(m_coefs.q[1], samps.q));
_mm_storel_epi64(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 2;
break;
}
case 8: {
TVectorUnion samps;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
__m128i* out = reinterpret_cast<__m128i*>(dataOut);
__m128 pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[0], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
samps.q = _mm_cvtepi32_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(dataIn)));
out = reinterpret_cast<__m128i*>(dataOut);
pre = _mm_add_ps(_mm_cvtepi32_ps(_mm_loadu_si128(out)), _mm_mul_ps(m_coefs.q[1], samps.q));
_mm_storeu_si128(out, _mm_cvttps_epi32(_mm_min_ps(_mm_max_ps(pre, Min32Vec.q), Max32Vec.q)));
dataOut += 4;
break;
}
default: {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + *dataIn * m_coefs.v[int(ch)]);
++dataOut;
}
}
break;
}
}
}
}
return dataOut;
}
int16_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const int16_t* dataIn, int16_t* dataOut, size_t frames)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t f=0 ; f<frames ; ++f, dataIn += 2)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
float* AudioMatrixMono::mixMonoSampleData(const AudioVoiceEngineMixInfo& info, const float* dataIn, float* dataOut,
size_t samples) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t s = 0; s < samples; ++s, ++dataIn) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
float t = m_curSlewFrame / float(m_slewFrames);
float omt = 1.f - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut +
*dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
}
switch (chmap.m_channelCount) {
case 2: {
++m_curSlewFrame;
float t2 = m_curSlewFrame / float(m_slewFrames);
float omt2 = 1.f - t2;
++m_curSlewFrame;
TVectorUnion coefs, samps;
coefs.q = _mm_add_ps(
_mm_mul_ps(_mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)), _mm_set_ps(t, t, t2, t2)),
_mm_mul_ps(_mm_shuffle_ps(m_oldCoefs.q[0], m_oldCoefs.q[0], _MM_SHUFFLE(1, 0, 1, 0)),
_mm_set_ps(omt, omt, omt2, omt2)));
samps.q = _mm_loadu_ps(dataIn);
samps.q = _mm_shuffle_ps(samps.q, samps.q, _MM_SHUFFLE(1, 0, 1, 0));
__m128 pre = _mm_add_ps(_mm_loadu_ps(dataOut), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_ps(dataOut, pre);
dataOut += 4;
++s;
++dataIn;
break;
}
default: {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + *dataIn * (m_coefs.v[int(ch)] * t + m_oldCoefs.v[int(ch)] * omt);
++dataOut;
}
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp16(*dataOut +
dataIn[0] * m_coefs.v[int(ch)][0] +
dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
break;
}
}
++m_curSlewFrame;
} else {
switch (chmap.m_channelCount) {
case 2: {
TVectorUnion coefs, samps;
coefs.q = _mm_shuffle_ps(m_coefs.q[0], m_coefs.q[0], _MM_SHUFFLE(1, 0, 1, 0));
samps.q = _mm_loadu_ps(dataIn);
samps.q = _mm_shuffle_ps(samps.q, samps.q, _MM_SHUFFLE(1, 0, 1, 0));
__m128 pre = _mm_add_ps(_mm_loadu_ps(dataOut), _mm_mul_ps(coefs.q, samps.q));
_mm_storeu_ps(dataOut, pre);
dataOut += 4;
++s;
++dataIn;
break;
}
default: {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + *dataIn * m_coefs.v[int(ch)];
++dataOut;
}
}
break;
}
}
}
return dataOut;
}
return dataOut;
}
int32_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const int32_t* dataIn, int32_t* dataOut, size_t frames)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t f=0 ; f<frames ; ++f, dataIn += 2)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
void AudioMatrixStereo::setDefaultMatrixCoefficients(AudioChannelSet acSet) {
m_curSlewFrame = 0;
m_slewFrames = 0;
m_coefs.q[0] = _mm_xor_ps(m_coefs.q[0], m_coefs.q[0]);
m_coefs.q[1] = _mm_xor_ps(m_coefs.q[1], m_coefs.q[1]);
m_coefs.q[2] = _mm_xor_ps(m_coefs.q[2], m_coefs.q[2]);
m_coefs.q[3] = _mm_xor_ps(m_coefs.q[3], m_coefs.q[3]);
switch (acSet) {
case AudioChannelSet::Stereo:
case AudioChannelSet::Quad:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
break;
case AudioChannelSet::Surround51:
case AudioChannelSet::Surround71:
m_coefs.v[int(AudioChannel::FrontLeft)][0] = 1.0;
m_coefs.v[int(AudioChannel::FrontRight)][1] = 1.0;
break;
default:
break;
}
}
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut +
*dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
}
int16_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const int16_t* dataIn,
int16_t* dataOut, size_t frames) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t f = 0; f < frames; ++f, dataIn += 2) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
++m_curSlewFrame;
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + *dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = Clamp32(*dataOut +
dataIn[0] * m_coefs.v[int(ch)][0] +
dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp16(*dataOut + dataIn[0] * m_coefs.v[int(ch)][0] + dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
}
return dataOut;
}
return dataOut;
}
float* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info,
const float* dataIn, float* dataOut, size_t frames)
{
const ChannelMap& chmap = info.m_channelMap;
for (size_t f=0 ; f<frames ; ++f, dataIn += 2)
{
if (m_slewFrames && m_curSlewFrame < m_slewFrames)
{
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
int32_t* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const int32_t* dataIn,
int32_t* dataOut, size_t frames) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t f = 0; f < frames; ++f, dataIn += 2) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut +
*dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt);
++dataOut;
}
}
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + *dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt));
++dataOut;
}
}
++m_curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chmap.m_channelCount ; ++c)
{
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown)
{
*dataOut = *dataOut +
dataIn[0] * m_coefs.v[int(ch)][0] +
dataIn[1] * m_coefs.v[int(ch)][1];
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = Clamp32(*dataOut + dataIn[0] * m_coefs.v[int(ch)][0] + dataIn[1] * m_coefs.v[int(ch)][1]);
++dataOut;
}
}
}
return dataOut;
}
return dataOut;
}
float* AudioMatrixStereo::mixStereoSampleData(const AudioVoiceEngineMixInfo& info, const float* dataIn, float* dataOut,
size_t frames) {
const ChannelMap& chmap = info.m_channelMap;
for (size_t f = 0; f < frames; ++f, dataIn += 2) {
if (m_slewFrames && m_curSlewFrame < m_slewFrames) {
double t = m_curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + *dataIn * (m_coefs.v[int(ch)][0] * t + m_oldCoefs.v[int(ch)][0] * omt) +
*dataIn * (m_coefs.v[int(ch)][1] * t + m_oldCoefs.v[int(ch)][1] * omt);
++dataOut;
}
}
++m_curSlewFrame;
} else {
for (unsigned c = 0; c < chmap.m_channelCount; ++c) {
AudioChannel ch = chmap.m_channels[c];
if (ch != AudioChannel::Unknown) {
*dataOut = *dataOut + dataIn[0] * m_coefs.v[int(ch)][0] + dataIn[1] * m_coefs.v[int(ch)][1];
++dataOut;
}
}
}
}
return dataOut;
}
} // namespace boo

View File

@@ -7,86 +7,71 @@
#undef min
#undef max
namespace boo
{
namespace boo {
AudioSubmix::AudioSubmix(BaseAudioVoiceEngine& root, IAudioSubmixCallback* cb, int busId, bool mainOut)
: ListNode<AudioSubmix, BaseAudioVoiceEngine*, IAudioSubmix>(&root), m_busId(busId), m_mainOut(mainOut), m_cb(cb)
{
if (mainOut)
setSendLevel(m_head->m_mainSubmix.get(), 1.f, false);
: ListNode<AudioSubmix, BaseAudioVoiceEngine*, IAudioSubmix>(&root), m_busId(busId), m_mainOut(mainOut), m_cb(cb) {
if (mainOut)
setSendLevel(m_head->m_mainSubmix.get(), 1.f, false);
}
AudioSubmix::~AudioSubmix()
{
m_head->m_submixesDirty = true;
}
AudioSubmix::~AudioSubmix() { m_head->m_submixesDirty = true; }
AudioSubmix*& AudioSubmix::_getHeadPtr(BaseAudioVoiceEngine* head) { return head->m_submixHead; }
std::unique_lock<std::recursive_mutex> AudioSubmix::_getHeadLock(BaseAudioVoiceEngine* head)
{ return std::unique_lock<std::recursive_mutex>{head->m_dataMutex}; }
std::unique_lock<std::recursive_mutex> AudioSubmix::destructorLock()
{ return std::unique_lock<std::recursive_mutex>{m_head->m_dataMutex}; }
bool AudioSubmix::_isDirectDependencyOf(AudioSubmix* send)
{
return m_sendGains.find(send) != m_sendGains.cend();
std::unique_lock<std::recursive_mutex> AudioSubmix::_getHeadLock(BaseAudioVoiceEngine* head) {
return std::unique_lock<std::recursive_mutex>{head->m_dataMutex};
}
std::unique_lock<std::recursive_mutex> AudioSubmix::destructorLock() {
return std::unique_lock<std::recursive_mutex>{m_head->m_dataMutex};
}
bool AudioSubmix::_mergeC3(std::list<AudioSubmix*>& output,
std::vector<std::list<AudioSubmix*>>& lists)
{
for (auto outerIt = lists.begin() ; outerIt != lists.cend() ; ++outerIt)
{
if (outerIt->empty())
continue;
AudioSubmix* smx = outerIt->front();
bool found = false;
for (auto innerIt = lists.begin() ; innerIt != lists.cend() ; ++innerIt)
{
if (innerIt->empty() || outerIt == innerIt)
continue;
if (smx == innerIt->front())
{
innerIt->pop_front();
found = true;
}
}
if (found)
{
outerIt->pop_front();
output.push_back(smx);
return true;
}
bool AudioSubmix::_isDirectDependencyOf(AudioSubmix* send) { return m_sendGains.find(send) != m_sendGains.cend(); }
bool AudioSubmix::_mergeC3(std::list<AudioSubmix*>& output, std::vector<std::list<AudioSubmix*>>& lists) {
for (auto outerIt = lists.begin(); outerIt != lists.cend(); ++outerIt) {
if (outerIt->empty())
continue;
AudioSubmix* smx = outerIt->front();
bool found = false;
for (auto innerIt = lists.begin(); innerIt != lists.cend(); ++innerIt) {
if (innerIt->empty() || outerIt == innerIt)
continue;
if (smx == innerIt->front()) {
innerIt->pop_front();
found = true;
}
}
return false;
if (found) {
outerIt->pop_front();
output.push_back(smx);
return true;
}
}
return false;
}
std::list<AudioSubmix*> AudioSubmix::_linearizeC3()
{
std::vector<std::list<AudioSubmix*>> lists = {{}};
if (m_head->m_submixHead)
for (AudioSubmix& smx : *m_head->m_submixHead)
{
if (&smx == this)
continue;
if (smx._isDirectDependencyOf(this))
lists[0].push_back(&smx);
}
lists.reserve(lists[0].size() + 1);
for (AudioSubmix* smx : lists[0])
lists.push_back(smx->_linearizeC3());
std::list<AudioSubmix*> AudioSubmix::_linearizeC3() {
std::vector<std::list<AudioSubmix*>> lists = {{}};
if (m_head->m_submixHead)
for (AudioSubmix& smx : *m_head->m_submixHead) {
if (&smx == this)
continue;
if (smx._isDirectDependencyOf(this))
lists[0].push_back(&smx);
}
lists.reserve(lists[0].size() + 1);
for (AudioSubmix* smx : lists[0])
lists.push_back(smx->_linearizeC3());
std::list<AudioSubmix*> ret = {this};
while (_mergeC3(ret, lists)) {}
return ret;
std::list<AudioSubmix*> ret = {this};
while (_mergeC3(ret, lists)) {}
return ret;
}
template <typename T>
void AudioSubmix::_zeroFill()
{
if (_getScratch<T>().size())
std::fill(_getScratch<T>().begin(), _getScratch<T>().end(), 0);
void AudioSubmix::_zeroFill() {
if (_getScratch<T>().size())
std::fill(_getScratch<T>().begin(), _getScratch<T>().end(), 0);
}
template void AudioSubmix::_zeroFill<int16_t>();
@@ -94,16 +79,15 @@ template void AudioSubmix::_zeroFill<int32_t>();
template void AudioSubmix::_zeroFill<float>();
template <typename T>
T* AudioSubmix::_getMergeBuf(size_t frames)
{
if (_getRedirect<T>())
return _getRedirect<T>();
T* AudioSubmix::_getMergeBuf(size_t frames) {
if (_getRedirect<T>())
return _getRedirect<T>();
size_t sampleCount = frames * m_head->clientMixInfo().m_channelMap.m_channelCount;
if (_getScratch<T>().size() < sampleCount)
_getScratch<T>().resize(sampleCount);
size_t sampleCount = frames * m_head->clientMixInfo().m_channelMap.m_channelCount;
if (_getScratch<T>().size() < sampleCount)
_getScratch<T>().resize(sampleCount);
return _getScratch<T>().data();
return _getScratch<T>().data();
}
template int16_t* AudioSubmix::_getMergeBuf<int16_t>(size_t frames);
@@ -111,143 +95,116 @@ template int32_t* AudioSubmix::_getMergeBuf<int32_t>(size_t frames);
template float* AudioSubmix::_getMergeBuf<float>(size_t frames);
template <typename T>
static inline T ClampInt(float in)
{
if (std::is_floating_point<T>())
{
return in; // Allow subsequent mixing stages to work with over-saturated values
}
else
{
constexpr T MAX = std::numeric_limits<T>::max();
constexpr T MIN = std::numeric_limits<T>::min();
static inline T ClampInt(float in) {
if (std::is_floating_point<T>()) {
return in; // Allow subsequent mixing stages to work with over-saturated values
} else {
constexpr T MAX = std::numeric_limits<T>::max();
constexpr T MIN = std::numeric_limits<T>::min();
if (in < MIN)
return MIN;
else if (in > MAX)
return MAX;
else
return in;
}
if (in < MIN)
return MIN;
else if (in > MAX)
return MAX;
else
return in;
}
}
template <typename T>
size_t AudioSubmix::_pumpAndMix(size_t frames)
{
const ChannelMap& chMap = m_head->clientMixInfo().m_channelMap;
size_t chanCount = chMap.m_channelCount;
size_t AudioSubmix::_pumpAndMix(size_t frames) {
const ChannelMap& chMap = m_head->clientMixInfo().m_channelMap;
size_t chanCount = chMap.m_channelCount;
if (_getRedirect<T>())
{
if (m_cb && m_cb->canApplyEffect())
m_cb->applyEffect(_getRedirect<T>(), frames, chMap, m_head->mixInfo().m_sampleRate);
_getRedirect<T>() += chanCount * frames;
}
else
{
size_t sampleCount = frames * chanCount;
if (_getScratch<T>().size() < sampleCount)
_getScratch<T>().resize(sampleCount);
if (m_cb && m_cb->canApplyEffect())
m_cb->applyEffect(_getScratch<T>().data(), frames, chMap, m_head->mixInfo().m_sampleRate);
if (_getRedirect<T>()) {
if (m_cb && m_cb->canApplyEffect())
m_cb->applyEffect(_getRedirect<T>(), frames, chMap, m_head->mixInfo().m_sampleRate);
_getRedirect<T>() += chanCount * frames;
} else {
size_t sampleCount = frames * chanCount;
if (_getScratch<T>().size() < sampleCount)
_getScratch<T>().resize(sampleCount);
if (m_cb && m_cb->canApplyEffect())
m_cb->applyEffect(_getScratch<T>().data(), frames, chMap, m_head->mixInfo().m_sampleRate);
size_t curSlewFrame = m_slewFrames;
for (auto& smx : m_sendGains)
{
curSlewFrame = m_curSlewFrame;
AudioSubmix& sm = *reinterpret_cast<AudioSubmix*>(smx.first);
auto it = _getScratch<T>().begin();
T* dataOut = sm._getMergeBuf<T>(frames);
size_t curSlewFrame = m_slewFrames;
for (auto& smx : m_sendGains) {
curSlewFrame = m_curSlewFrame;
AudioSubmix& sm = *reinterpret_cast<AudioSubmix*>(smx.first);
auto it = _getScratch<T>().begin();
T* dataOut = sm._getMergeBuf<T>(frames);
for (size_t f=0 ; f<frames ; ++f)
{
if (m_slewFrames && curSlewFrame < m_slewFrames)
{
double t = curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (size_t f = 0; f < frames; ++f) {
if (m_slewFrames && curSlewFrame < m_slewFrames) {
double t = curSlewFrame / double(m_slewFrames);
double omt = 1.0 - t;
for (unsigned c=0 ; c<chanCount ; ++c)
{
*dataOut = ClampInt<T>(*dataOut + *it * (smx.second[1] * t + smx.second[0] * omt));
++it;
++dataOut;
}
for (unsigned c = 0; c < chanCount; ++c) {
*dataOut = ClampInt<T>(*dataOut + *it * (smx.second[1] * t + smx.second[0] * omt));
++it;
++dataOut;
}
++curSlewFrame;
}
else
{
for (unsigned c=0 ; c<chanCount ; ++c)
{
*dataOut = ClampInt<T>(*dataOut + *it * smx.second[1]);
++it;
++dataOut;
}
}
}
++curSlewFrame;
} else {
for (unsigned c = 0; c < chanCount; ++c) {
*dataOut = ClampInt<T>(*dataOut + *it * smx.second[1]);
++it;
++dataOut;
}
}
m_curSlewFrame += curSlewFrame;
}
}
m_curSlewFrame += curSlewFrame;
}
return frames;
return frames;
}
template size_t AudioSubmix::_pumpAndMix<int16_t>(size_t frames);
template size_t AudioSubmix::_pumpAndMix<int32_t>(size_t frames);
template size_t AudioSubmix::_pumpAndMix<float>(size_t frames);
void AudioSubmix::_resetOutputSampleRate()
{
if (m_cb)
m_cb->resetOutputSampleRate(m_head->mixInfo().m_sampleRate);
void AudioSubmix::_resetOutputSampleRate() {
if (m_cb)
m_cb->resetOutputSampleRate(m_head->mixInfo().m_sampleRate);
}
void AudioSubmix::resetSendLevels()
{
if (m_sendGains.empty())
return;
m_sendGains.clear();
void AudioSubmix::resetSendLevels() {
if (m_sendGains.empty())
return;
m_sendGains.clear();
m_head->m_submixesDirty = true;
}
void AudioSubmix::setSendLevel(IAudioSubmix* submix, float level, bool slew) {
auto search = m_sendGains.find(submix);
if (search == m_sendGains.cend()) {
search = m_sendGains.emplace(submix, std::array<float, 2>{1.f, 1.f}).first;
m_head->m_submixesDirty = true;
}
m_slewFrames = slew ? m_head->m_5msFrames : 0;
m_curSlewFrame = 0;
search->second[0] = search->second[1];
search->second[1] = level;
}
void AudioSubmix::setSendLevel(IAudioSubmix* submix, float level, bool slew)
{
auto search = m_sendGains.find(submix);
if (search == m_sendGains.cend())
{
search = m_sendGains.emplace(submix, std::array<float, 2>{1.f, 1.f}).first;
m_head->m_submixesDirty = true;
}
const AudioVoiceEngineMixInfo& AudioSubmix::mixInfo() const { return m_head->mixInfo(); }
m_slewFrames = slew ? m_head->m_5msFrames : 0;
m_curSlewFrame = 0;
double AudioSubmix::getSampleRate() const { return mixInfo().m_sampleRate; }
search->second[0] = search->second[1];
search->second[1] = level;
SubmixFormat AudioSubmix::getSampleFormat() const {
switch (mixInfo().m_sampleFormat) {
case SOXR_INT16_I:
default:
return SubmixFormat::Int16;
case SOXR_INT32_I:
return SubmixFormat::Int32;
case SOXR_FLOAT32_I:
return SubmixFormat::Float;
}
}
const AudioVoiceEngineMixInfo& AudioSubmix::mixInfo() const
{
return m_head->mixInfo();
}
double AudioSubmix::getSampleRate() const
{
return mixInfo().m_sampleRate;
}
SubmixFormat AudioSubmix::getSampleFormat() const
{
switch (mixInfo().m_sampleFormat)
{
case SOXR_INT16_I:
default:
return SubmixFormat::Int16;
case SOXR_INT32_I:
return SubmixFormat::Int32;
case SOXR_FLOAT32_I:
return SubmixFormat::Float;
}
}
}
} // namespace boo

View File

@@ -3,7 +3,7 @@
#include "boo/audiodev/IAudioSubmix.hpp"
#include <list>
#include <vector>
#include <array>
#include <array>
#include <unordered_map>
#include "Common.hpp"
@@ -15,88 +15,107 @@ struct AudioUnitVoiceEngine;
struct VSTVoiceEngine;
struct WAVOutVoiceEngine;
namespace boo
{
namespace boo {
class BaseAudioVoiceEngine;
class AudioVoice;
struct AudioVoiceEngineMixInfo;
/* Output gains for each mix-send/channel */
class AudioSubmix : public ListNode<AudioSubmix, BaseAudioVoiceEngine*, IAudioSubmix>
{
friend class BaseAudioVoiceEngine;
friend class AudioVoiceMono;
friend class AudioVoiceStereo;
friend struct WASAPIAudioVoiceEngine;
friend struct ::AudioUnitVoiceEngine;
friend struct ::VSTVoiceEngine;
friend struct ::WAVOutVoiceEngine;
class AudioSubmix : public ListNode<AudioSubmix, BaseAudioVoiceEngine*, IAudioSubmix> {
friend class BaseAudioVoiceEngine;
friend class AudioVoiceMono;
friend class AudioVoiceStereo;
friend struct WASAPIAudioVoiceEngine;
friend struct ::AudioUnitVoiceEngine;
friend struct ::VSTVoiceEngine;
friend struct ::WAVOutVoiceEngine;
/* Mixer-engine relationships */
int m_busId;
bool m_mainOut;
/* Mixer-engine relationships */
int m_busId;
bool m_mainOut;
/* Callback (effect source, optional) */
IAudioSubmixCallback* m_cb;
/* Callback (effect source, optional) */
IAudioSubmixCallback* m_cb;
/* Slew state for output gains */
size_t m_slewFrames = 0;
size_t m_curSlewFrame = 0;
/* Slew state for output gains */
size_t m_slewFrames = 0;
size_t m_curSlewFrame = 0;
/* Output gains for each mix-send/channel */
std::unordered_map<IAudioSubmix*, std::array<float, 2>> m_sendGains;
/* Output gains for each mix-send/channel */
std::unordered_map<IAudioSubmix*, std::array<float, 2>> m_sendGains;
/* Temporary scratch buffers for accumulating submix audio */
std::vector<int16_t> m_scratch16;
std::vector<int32_t> m_scratch32;
std::vector<float> m_scratchFlt;
template <typename T> std::vector<T>& _getScratch();
/* Temporary scratch buffers for accumulating submix audio */
std::vector<int16_t> m_scratch16;
std::vector<int32_t> m_scratch32;
std::vector<float> m_scratchFlt;
template <typename T>
std::vector<T>& _getScratch();
/* Override scratch buffers with alternate destination */
int16_t* m_redirect16 = nullptr;
int32_t* m_redirect32 = nullptr;
float* m_redirectFlt = nullptr;
template <typename T> T*& _getRedirect();
/* Override scratch buffers with alternate destination */
int16_t* m_redirect16 = nullptr;
int32_t* m_redirect32 = nullptr;
float* m_redirectFlt = nullptr;
template <typename T>
T*& _getRedirect();
/* C3-linearization support (to mitigate a potential diamond problem on 'clever' submix routes) */
bool _isDirectDependencyOf(AudioSubmix* send);
std::list<AudioSubmix*> _linearizeC3();
static bool _mergeC3(std::list<AudioSubmix*>& output,
std::vector<std::list<AudioSubmix*>>& lists);
/* C3-linearization support (to mitigate a potential diamond problem on 'clever' submix routes) */
bool _isDirectDependencyOf(AudioSubmix* send);
std::list<AudioSubmix*> _linearizeC3();
static bool _mergeC3(std::list<AudioSubmix*>& output, std::vector<std::list<AudioSubmix*>>& lists);
/* Fill scratch buffers with silence for new mix cycle */
template <typename T> void _zeroFill();
/* Fill scratch buffers with silence for new mix cycle */
template <typename T>
void _zeroFill();
/* Receive audio from a single voice / submix */
template <typename T> T* _getMergeBuf(size_t frames);
/* Receive audio from a single voice / submix */
template <typename T>
T* _getMergeBuf(size_t frames);
/* Mix scratch buffers into sends */
template <typename T> size_t _pumpAndMix(size_t frames);
/* Mix scratch buffers into sends */
template <typename T>
size_t _pumpAndMix(size_t frames);
void _resetOutputSampleRate();
void _resetOutputSampleRate();
public:
static AudioSubmix*& _getHeadPtr(BaseAudioVoiceEngine* head);
static std::unique_lock<std::recursive_mutex> _getHeadLock(BaseAudioVoiceEngine* head);
std::unique_lock<std::recursive_mutex> destructorLock();
static AudioSubmix*& _getHeadPtr(BaseAudioVoiceEngine* head);
static std::unique_lock<std::recursive_mutex> _getHeadLock(BaseAudioVoiceEngine* head);
std::unique_lock<std::recursive_mutex> destructorLock();
AudioSubmix(BaseAudioVoiceEngine& root, IAudioSubmixCallback* cb, int busId, bool mainOut);
~AudioSubmix();
AudioSubmix(BaseAudioVoiceEngine& root, IAudioSubmixCallback* cb, int busId, bool mainOut);
~AudioSubmix();
void resetSendLevels();
void setSendLevel(IAudioSubmix* submix, float level, bool slew);
const AudioVoiceEngineMixInfo& mixInfo() const;
double getSampleRate() const;
SubmixFormat getSampleFormat() const;
void resetSendLevels();
void setSendLevel(IAudioSubmix* submix, float level, bool slew);
const AudioVoiceEngineMixInfo& mixInfo() const;
double getSampleRate() const;
SubmixFormat getSampleFormat() const;
};
template <> inline std::vector<int16_t>& AudioSubmix::_getScratch() { return m_scratch16; }
template <> inline std::vector<int32_t>& AudioSubmix::_getScratch() { return m_scratch32; }
template <> inline std::vector<float>& AudioSubmix::_getScratch() { return m_scratchFlt; }
template <> inline int16_t*& AudioSubmix::_getRedirect<int16_t>() { return m_redirect16; }
template <> inline int32_t*& AudioSubmix::_getRedirect<int32_t>() { return m_redirect32; }
template <> inline float*& AudioSubmix::_getRedirect<float>() { return m_redirectFlt; }
template <>
inline std::vector<int16_t>& AudioSubmix::_getScratch() {
return m_scratch16;
}
template <>
inline std::vector<int32_t>& AudioSubmix::_getScratch() {
return m_scratch32;
}
template <>
inline std::vector<float>& AudioSubmix::_getScratch() {
return m_scratchFlt;
}
template <>
inline int16_t*& AudioSubmix::_getRedirect<int16_t>() {
return m_redirect16;
}
template <>
inline int32_t*& AudioSubmix::_getRedirect<int32_t>() {
return m_redirect32;
}
template <>
inline float*& AudioSubmix::_getRedirect<float>() {
return m_redirectFlt;
}
} // namespace boo

View File

@@ -3,383 +3,303 @@
#include "logvisor/logvisor.hpp"
#include <cmath>
namespace boo
{
namespace boo {
static logvisor::Module Log("boo::AudioVoice");
static AudioMatrixMono DefaultMonoMtx;
static AudioMatrixStereo DefaultStereoMtx;
AudioVoice::AudioVoice(BaseAudioVoiceEngine& root,
IAudioVoiceCallback* cb, bool dynamicRate)
: ListNode<AudioVoice, BaseAudioVoiceEngine*, IAudioVoice>(&root), m_cb(cb), m_dynamicRate(dynamicRate)
{}
AudioVoice::AudioVoice(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, bool dynamicRate)
: ListNode<AudioVoice, BaseAudioVoiceEngine*, IAudioVoice>(&root), m_cb(cb), m_dynamicRate(dynamicRate) {}
AudioVoice::~AudioVoice()
{
soxr_delete(m_src);
}
AudioVoice::~AudioVoice() { soxr_delete(m_src); }
AudioVoice*& AudioVoice::_getHeadPtr(BaseAudioVoiceEngine* head) { return head->m_voiceHead; }
std::unique_lock<std::recursive_mutex> AudioVoice::_getHeadLock(BaseAudioVoiceEngine* head)
{ return std::unique_lock<std::recursive_mutex>{head->m_dataMutex}; }
std::unique_lock<std::recursive_mutex> AudioVoice::destructorLock()
{ return std::unique_lock<std::recursive_mutex>{m_head->m_dataMutex}; }
std::unique_lock<std::recursive_mutex> AudioVoice::_getHeadLock(BaseAudioVoiceEngine* head) {
return std::unique_lock<std::recursive_mutex>{head->m_dataMutex};
}
std::unique_lock<std::recursive_mutex> AudioVoice::destructorLock() {
return std::unique_lock<std::recursive_mutex>{m_head->m_dataMutex};
}
void AudioVoice::_setPitchRatio(double ratio, bool slew)
{
if (m_dynamicRate)
{
m_sampleRatio = ratio * m_sampleRateIn / m_sampleRateOut;
soxr_error_t err = soxr_set_io_ratio(m_src, m_sampleRatio, slew ? m_head->m_5msFrames : 0);
if (err)
{
Log.report(logvisor::Fatal, "unable to set resampler rate: %s", soxr_strerror(err));
m_setPitchRatio = false;
return;
}
void AudioVoice::_setPitchRatio(double ratio, bool slew) {
if (m_dynamicRate) {
m_sampleRatio = ratio * m_sampleRateIn / m_sampleRateOut;
soxr_error_t err = soxr_set_io_ratio(m_src, m_sampleRatio, slew ? m_head->m_5msFrames : 0);
if (err) {
Log.report(logvisor::Fatal, "unable to set resampler rate: %s", soxr_strerror(err));
m_setPitchRatio = false;
return;
}
m_setPitchRatio = false;
}
m_setPitchRatio = false;
}
void AudioVoice::_midUpdate()
{
if (m_resetSampleRate)
_resetSampleRate(m_deferredSampleRate);
if (m_setPitchRatio)
_setPitchRatio(m_pitchRatio, m_slew);
void AudioVoice::_midUpdate() {
if (m_resetSampleRate)
_resetSampleRate(m_deferredSampleRate);
if (m_setPitchRatio)
_setPitchRatio(m_pitchRatio, m_slew);
}
void AudioVoice::setPitchRatio(double ratio, bool slew)
{
m_setPitchRatio = true;
m_pitchRatio = ratio;
m_slew = slew;
void AudioVoice::setPitchRatio(double ratio, bool slew) {
m_setPitchRatio = true;
m_pitchRatio = ratio;
m_slew = slew;
}
void AudioVoice::resetSampleRate(double sampleRate)
{
m_resetSampleRate = true;
m_deferredSampleRate = sampleRate;
void AudioVoice::resetSampleRate(double sampleRate) {
m_resetSampleRate = true;
m_deferredSampleRate = sampleRate;
}
void AudioVoice::start()
{
m_running = true;
void AudioVoice::start() { m_running = true; }
void AudioVoice::stop() { m_running = false; }
AudioVoiceMono::AudioVoiceMono(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, double sampleRate, bool dynamicRate)
: AudioVoice(root, cb, dynamicRate) {
_resetSampleRate(sampleRate);
}
void AudioVoice::stop()
{
m_running = false;
}
void AudioVoiceMono::_resetSampleRate(double sampleRate) {
soxr_delete(m_src);
AudioVoiceMono::AudioVoiceMono(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb,
double sampleRate, bool dynamicRate)
: AudioVoice(root, cb, dynamicRate)
{
_resetSampleRate(sampleRate);
}
double rateOut = m_head->mixInfo().m_sampleRate;
soxr_datatype_t formatOut = m_head->mixInfo().m_sampleFormat;
soxr_io_spec_t ioSpec = soxr_io_spec(SOXR_INT16_I, formatOut);
soxr_quality_spec_t qSpec = soxr_quality_spec(SOXR_20_BITQ, m_dynamicRate ? SOXR_VR : 0);
void AudioVoiceMono::_resetSampleRate(double sampleRate)
{
soxr_delete(m_src);
soxr_error_t err;
m_src = soxr_create(sampleRate, rateOut, 1, &err, &ioSpec, &qSpec, nullptr);
double rateOut = m_head->mixInfo().m_sampleRate;
soxr_datatype_t formatOut = m_head->mixInfo().m_sampleFormat;
soxr_io_spec_t ioSpec = soxr_io_spec(SOXR_INT16_I, formatOut);
soxr_quality_spec_t qSpec = soxr_quality_spec(SOXR_20_BITQ, m_dynamicRate ? SOXR_VR : 0);
soxr_error_t err;
m_src = soxr_create(sampleRate, rateOut, 1,
&err, &ioSpec, &qSpec, nullptr);
if (err)
{
Log.report(logvisor::Fatal, "unable to create soxr resampler: %s", soxr_strerror(err));
m_resetSampleRate = false;
return;
}
m_sampleRateIn = sampleRate;
m_sampleRateOut = rateOut;
m_sampleRatio = m_sampleRateIn / m_sampleRateOut;
soxr_set_input_fn(m_src, soxr_input_fn_t(SRCCallback), this, 0);
_setPitchRatio(m_pitchRatio, false);
if (err) {
Log.report(logvisor::Fatal, "unable to create soxr resampler: %s", soxr_strerror(err));
m_resetSampleRate = false;
return;
}
m_sampleRateIn = sampleRate;
m_sampleRateOut = rateOut;
m_sampleRatio = m_sampleRateIn / m_sampleRateOut;
soxr_set_input_fn(m_src, soxr_input_fn_t(SRCCallback), this, 0);
_setPitchRatio(m_pitchRatio, false);
m_resetSampleRate = false;
}
size_t AudioVoiceMono::SRCCallback(AudioVoiceMono* ctx, int16_t** data, size_t frames)
{
std::vector<int16_t>& scratchIn = ctx->m_head->m_scratchIn;
if (scratchIn.size() < frames)
scratchIn.resize(frames);
*data = scratchIn.data();
if (ctx->m_silentOut)
{
memset(scratchIn.data(), 0, frames * 2);
return frames;
}
else
return ctx->m_cb->supplyAudio(*ctx, frames, scratchIn.data());
size_t AudioVoiceMono::SRCCallback(AudioVoiceMono* ctx, int16_t** data, size_t frames) {
std::vector<int16_t>& scratchIn = ctx->m_head->m_scratchIn;
if (scratchIn.size() < frames)
scratchIn.resize(frames);
*data = scratchIn.data();
if (ctx->m_silentOut) {
memset(scratchIn.data(), 0, frames * 2);
return frames;
} else
return ctx->m_cb->supplyAudio(*ctx, frames, scratchIn.data());
}
bool AudioVoiceMono::isSilent() const
{
if (m_sendMatrices.size())
{
for (auto& mtx : m_sendMatrices)
if (!mtx.second.isSilent())
return false;
return true;
}
else
{
return DefaultMonoMtx.isSilent();
}
bool AudioVoiceMono::isSilent() const {
if (m_sendMatrices.size()) {
for (auto& mtx : m_sendMatrices)
if (!mtx.second.isSilent())
return false;
return true;
} else {
return DefaultMonoMtx.isSilent();
}
}
template <typename T>
size_t AudioVoiceMono::_pumpAndMix(size_t frames)
{
auto& scratchPre = m_head->_getScratchPre<T>();
if (scratchPre.size() < frames)
scratchPre.resize(frames + 2);
size_t AudioVoiceMono::_pumpAndMix(size_t frames) {
auto& scratchPre = m_head->_getScratchPre<T>();
if (scratchPre.size() < frames)
scratchPre.resize(frames + 2);
auto& scratchPost = m_head->_getScratchPost<T>();
if (scratchPost.size() < frames)
scratchPost.resize(frames + 2);
auto& scratchPost = m_head->_getScratchPost<T>();
if (scratchPost.size() < frames)
scratchPost.resize(frames + 2);
double dt = frames / m_sampleRateOut;
m_cb->preSupplyAudio(*this, dt);
_midUpdate();
double dt = frames / m_sampleRateOut;
m_cb->preSupplyAudio(*this, dt);
_midUpdate();
if (isSilent())
{
int16_t* dummy;
SRCCallback(this, &dummy, size_t(std::ceil(frames * m_sampleRatio)));
return 0;
if (isSilent()) {
int16_t* dummy;
SRCCallback(this, &dummy, size_t(std::ceil(frames * m_sampleRatio)));
return 0;
}
size_t oDone = soxr_output(m_src, scratchPre.data(), frames);
if (oDone) {
if (m_sendMatrices.size()) {
for (auto& mtx : m_sendMatrices) {
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratchPre.data(), scratchPost.data());
mtx.second.mixMonoSampleData(m_head->clientMixInfo(), scratchPost.data(), smx._getMergeBuf<T>(oDone), oDone);
}
} else {
AudioSubmix& smx = *m_head->m_mainSubmix;
m_cb->routeAudio(oDone, 1, dt, m_head->m_mainSubmix->m_busId, scratchPre.data(), scratchPost.data());
DefaultMonoMtx.mixMonoSampleData(m_head->clientMixInfo(), scratchPost.data(), smx._getMergeBuf<T>(oDone), oDone);
}
}
size_t oDone = soxr_output(m_src, scratchPre.data(), frames);
if (oDone)
{
if (m_sendMatrices.size())
{
for (auto& mtx : m_sendMatrices)
{
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratchPre.data(), scratchPost.data());
mtx.second.mixMonoSampleData(m_head->clientMixInfo(), scratchPost.data(),
smx._getMergeBuf<T>(oDone), oDone);
}
}
else
{
AudioSubmix& smx = *m_head->m_mainSubmix;
m_cb->routeAudio(oDone, 1, dt, m_head->m_mainSubmix->m_busId, scratchPre.data(), scratchPost.data());
DefaultMonoMtx.mixMonoSampleData(m_head->clientMixInfo(), scratchPost.data(),
smx._getMergeBuf<T>(oDone), oDone);
}
}
return oDone;
return oDone;
}
void AudioVoiceMono::resetChannelLevels()
{
m_head->m_submixesDirty = true;
m_sendMatrices.clear();
void AudioVoiceMono::resetChannelLevels() {
m_head->m_submixesDirty = true;
m_sendMatrices.clear();
}
void AudioVoiceMono::setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew)
{
if (!submix)
submix = m_head->m_mainSubmix.get();
void AudioVoiceMono::setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew) {
if (!submix)
submix = m_head->m_mainSubmix.get();
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixMono{}).first;
search->second.setMatrixCoefficients(coefs, slew ? m_head->m_5msFrames : 0);
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixMono{}).first;
search->second.setMatrixCoefficients(coefs, slew ? m_head->m_5msFrames : 0);
}
void AudioVoiceMono::setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew)
{
float newCoefs[8] =
{
coefs[0][0],
coefs[1][0],
coefs[2][0],
coefs[3][0],
coefs[4][0],
coefs[5][0],
coefs[6][0],
coefs[7][0]
};
void AudioVoiceMono::setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew) {
float newCoefs[8] = {coefs[0][0], coefs[1][0], coefs[2][0], coefs[3][0],
coefs[4][0], coefs[5][0], coefs[6][0], coefs[7][0]};
if (!submix)
submix = m_head->m_mainSubmix.get();
if (!submix)
submix = m_head->m_mainSubmix.get();
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixMono{}).first;
search->second.setMatrixCoefficients(newCoefs, slew ? m_head->m_5msFrames : 0);
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixMono{}).first;
search->second.setMatrixCoefficients(newCoefs, slew ? m_head->m_5msFrames : 0);
}
AudioVoiceStereo::AudioVoiceStereo(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb,
double sampleRate, bool dynamicRate)
: AudioVoice(root, cb, dynamicRate)
{
_resetSampleRate(sampleRate);
AudioVoiceStereo::AudioVoiceStereo(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, double sampleRate,
bool dynamicRate)
: AudioVoice(root, cb, dynamicRate) {
_resetSampleRate(sampleRate);
}
void AudioVoiceStereo::_resetSampleRate(double sampleRate)
{
soxr_delete(m_src);
void AudioVoiceStereo::_resetSampleRate(double sampleRate) {
soxr_delete(m_src);
double rateOut = m_head->mixInfo().m_sampleRate;
soxr_datatype_t formatOut = m_head->mixInfo().m_sampleFormat;
soxr_io_spec_t ioSpec = soxr_io_spec(SOXR_INT16_I, formatOut);
soxr_quality_spec_t qSpec = soxr_quality_spec(SOXR_20_BITQ, m_dynamicRate ? SOXR_VR : 0);
double rateOut = m_head->mixInfo().m_sampleRate;
soxr_datatype_t formatOut = m_head->mixInfo().m_sampleFormat;
soxr_io_spec_t ioSpec = soxr_io_spec(SOXR_INT16_I, formatOut);
soxr_quality_spec_t qSpec = soxr_quality_spec(SOXR_20_BITQ, m_dynamicRate ? SOXR_VR : 0);
soxr_error_t err;
m_src = soxr_create(sampleRate, rateOut, 2,
&err, &ioSpec, &qSpec, nullptr);
soxr_error_t err;
m_src = soxr_create(sampleRate, rateOut, 2, &err, &ioSpec, &qSpec, nullptr);
if (!m_src)
{
Log.report(logvisor::Fatal, "unable to create soxr resampler: %s", soxr_strerror(err));
m_resetSampleRate = false;
return;
}
m_sampleRateIn = sampleRate;
m_sampleRateOut = rateOut;
m_sampleRatio = m_sampleRateIn / m_sampleRateOut;
soxr_set_input_fn(m_src, soxr_input_fn_t(SRCCallback), this, 0);
_setPitchRatio(m_pitchRatio, false);
if (!m_src) {
Log.report(logvisor::Fatal, "unable to create soxr resampler: %s", soxr_strerror(err));
m_resetSampleRate = false;
return;
}
m_sampleRateIn = sampleRate;
m_sampleRateOut = rateOut;
m_sampleRatio = m_sampleRateIn / m_sampleRateOut;
soxr_set_input_fn(m_src, soxr_input_fn_t(SRCCallback), this, 0);
_setPitchRatio(m_pitchRatio, false);
m_resetSampleRate = false;
}
size_t AudioVoiceStereo::SRCCallback(AudioVoiceStereo* ctx, int16_t** data, size_t frames)
{
std::vector<int16_t>& scratchIn = ctx->m_head->m_scratchIn;
size_t samples = frames * 2;
if (scratchIn.size() < samples)
scratchIn.resize(samples);
*data = scratchIn.data();
if (ctx->m_silentOut)
{
memset(scratchIn.data(), 0, samples * 2);
return frames;
}
else
return ctx->m_cb->supplyAudio(*ctx, frames, scratchIn.data());
size_t AudioVoiceStereo::SRCCallback(AudioVoiceStereo* ctx, int16_t** data, size_t frames) {
std::vector<int16_t>& scratchIn = ctx->m_head->m_scratchIn;
size_t samples = frames * 2;
if (scratchIn.size() < samples)
scratchIn.resize(samples);
*data = scratchIn.data();
if (ctx->m_silentOut) {
memset(scratchIn.data(), 0, samples * 2);
return frames;
} else
return ctx->m_cb->supplyAudio(*ctx, frames, scratchIn.data());
}
bool AudioVoiceStereo::isSilent() const
{
if (m_sendMatrices.size())
{
for (auto& mtx : m_sendMatrices)
if (!mtx.second.isSilent())
return false;
return true;
}
else
{
return DefaultStereoMtx.isSilent();
}
bool AudioVoiceStereo::isSilent() const {
if (m_sendMatrices.size()) {
for (auto& mtx : m_sendMatrices)
if (!mtx.second.isSilent())
return false;
return true;
} else {
return DefaultStereoMtx.isSilent();
}
}
template <typename T>
size_t AudioVoiceStereo::_pumpAndMix(size_t frames)
{
size_t samples = frames * 2;
size_t AudioVoiceStereo::_pumpAndMix(size_t frames) {
size_t samples = frames * 2;
auto& scratchPre = m_head->_getScratchPre<T>();
if (scratchPre.size() < samples)
scratchPre.resize(samples + 4);
auto& scratchPre = m_head->_getScratchPre<T>();
if (scratchPre.size() < samples)
scratchPre.resize(samples + 4);
auto& scratchPost = m_head->_getScratchPost<T>();
if (scratchPost.size() < samples)
scratchPost.resize(samples + 4);
auto& scratchPost = m_head->_getScratchPost<T>();
if (scratchPost.size() < samples)
scratchPost.resize(samples + 4);
double dt = frames / m_sampleRateOut;
m_cb->preSupplyAudio(*this, dt);
_midUpdate();
if (isSilent())
{
int16_t* dummy;
SRCCallback(this, &dummy, size_t(std::ceil(frames * m_sampleRatio)));
return 0;
double dt = frames / m_sampleRateOut;
m_cb->preSupplyAudio(*this, dt);
_midUpdate();
if (isSilent()) {
int16_t* dummy;
SRCCallback(this, &dummy, size_t(std::ceil(frames * m_sampleRatio)));
return 0;
}
size_t oDone = soxr_output(m_src, scratchPre.data(), frames);
if (oDone) {
if (m_sendMatrices.size()) {
for (auto& mtx : m_sendMatrices) {
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratchPre.data(), scratchPost.data());
mtx.second.mixStereoSampleData(m_head->clientMixInfo(), scratchPost.data(), smx._getMergeBuf<T>(oDone), oDone);
}
} else {
AudioSubmix& smx = *m_head->m_mainSubmix;
m_cb->routeAudio(oDone, 2, dt, m_head->m_mainSubmix->m_busId, scratchPre.data(), scratchPost.data());
DefaultStereoMtx.mixStereoSampleData(m_head->clientMixInfo(), scratchPost.data(), smx._getMergeBuf<T>(oDone),
oDone);
}
size_t oDone = soxr_output(m_src, scratchPre.data(), frames);
}
if (oDone)
{
if (m_sendMatrices.size())
{
for (auto& mtx : m_sendMatrices)
{
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratchPre.data(), scratchPost.data());
mtx.second.mixStereoSampleData(m_head->clientMixInfo(), scratchPost.data(),
smx._getMergeBuf<T>(oDone), oDone);
}
}
else
{
AudioSubmix& smx = *m_head->m_mainSubmix;
m_cb->routeAudio(oDone, 2, dt, m_head->m_mainSubmix->m_busId, scratchPre.data(), scratchPost.data());
DefaultStereoMtx.mixStereoSampleData(m_head->clientMixInfo(), scratchPost.data(),
smx._getMergeBuf<T>(oDone), oDone);
}
}
return oDone;
return oDone;
}
void AudioVoiceStereo::resetChannelLevels()
{
m_head->m_submixesDirty = true;
m_sendMatrices.clear();
void AudioVoiceStereo::resetChannelLevels() {
m_head->m_submixesDirty = true;
m_sendMatrices.clear();
}
void AudioVoiceStereo::setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew)
{
float newCoefs[8][2] =
{
{coefs[0], coefs[0]},
{coefs[1], coefs[1]},
{coefs[2], coefs[2]},
{coefs[3], coefs[3]},
{coefs[4], coefs[4]},
{coefs[5], coefs[5]},
{coefs[6], coefs[6]},
{coefs[7], coefs[7]}
};
void AudioVoiceStereo::setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew) {
float newCoefs[8][2] = {{coefs[0], coefs[0]}, {coefs[1], coefs[1]}, {coefs[2], coefs[2]}, {coefs[3], coefs[3]},
{coefs[4], coefs[4]}, {coefs[5], coefs[5]}, {coefs[6], coefs[6]}, {coefs[7], coefs[7]}};
if (!submix)
submix = m_head->m_mainSubmix.get();
if (!submix)
submix = m_head->m_mainSubmix.get();
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixStereo{}).first;
search->second.setMatrixCoefficients(newCoefs, slew ? m_head->m_5msFrames : 0);
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixStereo{}).first;
search->second.setMatrixCoefficients(newCoefs, slew ? m_head->m_5msFrames : 0);
}
void AudioVoiceStereo::setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew)
{
if (!submix)
submix = m_head->m_mainSubmix.get();
void AudioVoiceStereo::setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew) {
if (!submix)
submix = m_head->m_mainSubmix.get();
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixStereo{}).first;
search->second.setMatrixCoefficients(coefs, slew ? m_head->m_5msFrames : 0);
auto search = m_sendMatrices.find(submix);
if (search == m_sendMatrices.cend())
search = m_sendMatrices.emplace(submix, AudioMatrixStereo{}).first;
search->second.setMatrixCoefficients(coefs, slew ? m_head->m_5msFrames : 0);
}
}
} // namespace boo

View File

@@ -12,121 +12,124 @@ struct AudioUnitVoiceEngine;
struct VSTVoiceEngine;
struct WAVOutVoiceEngine;
namespace boo
{
namespace boo {
class BaseAudioVoiceEngine;
struct AudioVoiceEngineMixInfo;
struct IAudioSubmix;
class AudioVoice : public ListNode<AudioVoice, BaseAudioVoiceEngine*, IAudioVoice>
{
friend class BaseAudioVoiceEngine;
friend class AudioSubmix;
friend struct WASAPIAudioVoiceEngine;
friend struct ::AudioUnitVoiceEngine;
friend struct ::VSTVoiceEngine;
friend struct ::WAVOutVoiceEngine;
class AudioVoice : public ListNode<AudioVoice, BaseAudioVoiceEngine*, IAudioVoice> {
friend class BaseAudioVoiceEngine;
friend class AudioSubmix;
friend struct WASAPIAudioVoiceEngine;
friend struct ::AudioUnitVoiceEngine;
friend struct ::VSTVoiceEngine;
friend struct ::WAVOutVoiceEngine;
protected:
/* Callback (audio source) */
IAudioVoiceCallback* m_cb;
/* Callback (audio source) */
IAudioVoiceCallback* m_cb;
/* Sample-rate converter */
soxr_t m_src = nullptr;
double m_sampleRateIn;
double m_sampleRateOut;
bool m_dynamicRate;
/* Sample-rate converter */
soxr_t m_src = nullptr;
double m_sampleRateIn;
double m_sampleRateOut;
bool m_dynamicRate;
/* Running bool */
bool m_running = false;
/* Running bool */
bool m_running = false;
/* Deferred sample-rate reset */
bool m_resetSampleRate = false;
double m_deferredSampleRate;
virtual void _resetSampleRate(double sampleRate)=0;
/* Deferred sample-rate reset */
bool m_resetSampleRate = false;
double m_deferredSampleRate;
virtual void _resetSampleRate(double sampleRate) = 0;
/* Deferred pitch ratio set */
bool m_setPitchRatio = false;
double m_pitchRatio = 1.0;
double m_sampleRatio = 1.0;
bool m_slew = false;
void _setPitchRatio(double ratio, bool slew);
/* Deferred pitch ratio set */
bool m_setPitchRatio = false;
double m_pitchRatio = 1.0;
double m_sampleRatio = 1.0;
bool m_slew = false;
void _setPitchRatio(double ratio, bool slew);
/* Mid-pump update */
void _midUpdate();
/* Mid-pump update */
void _midUpdate();
virtual size_t pumpAndMix16(size_t frames)=0;
virtual size_t pumpAndMix32(size_t frames)=0;
virtual size_t pumpAndMixFlt(size_t frames)=0;
template <typename T> size_t pumpAndMix(size_t frames);
virtual size_t pumpAndMix16(size_t frames) = 0;
virtual size_t pumpAndMix32(size_t frames) = 0;
virtual size_t pumpAndMixFlt(size_t frames) = 0;
template <typename T>
size_t pumpAndMix(size_t frames);
AudioVoice(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, bool dynamicRate);
AudioVoice(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, bool dynamicRate);
public:
static AudioVoice*& _getHeadPtr(BaseAudioVoiceEngine* head);
static std::unique_lock<std::recursive_mutex> _getHeadLock(BaseAudioVoiceEngine* head);
std::unique_lock<std::recursive_mutex> destructorLock();
static AudioVoice*& _getHeadPtr(BaseAudioVoiceEngine* head);
static std::unique_lock<std::recursive_mutex> _getHeadLock(BaseAudioVoiceEngine* head);
std::unique_lock<std::recursive_mutex> destructorLock();
~AudioVoice();
void resetSampleRate(double sampleRate);
void setPitchRatio(double ratio, bool slew);
void start();
void stop();
double getSampleRateIn() const {return m_sampleRateIn;}
double getSampleRateOut() const {return m_sampleRateOut;}
};
template <> inline size_t AudioVoice::pumpAndMix<int16_t>(size_t frames) { return pumpAndMix16(frames); }
template <> inline size_t AudioVoice::pumpAndMix<int32_t>(size_t frames) { return pumpAndMix32(frames); }
template <> inline size_t AudioVoice::pumpAndMix<float>(size_t frames) { return pumpAndMixFlt(frames); }
class AudioVoiceMono : public AudioVoice
{
std::unordered_map<IAudioSubmix*, AudioMatrixMono> m_sendMatrices;
bool m_silentOut = false;
void _resetSampleRate(double sampleRate);
static size_t SRCCallback(AudioVoiceMono* ctx,
int16_t** data, size_t requestedLen);
bool isSilent() const;
template <typename T> size_t _pumpAndMix(size_t frames);
size_t pumpAndMix16(size_t frames) { return _pumpAndMix<int16_t>(frames); }
size_t pumpAndMix32(size_t frames) { return _pumpAndMix<int32_t>(frames); }
size_t pumpAndMixFlt(size_t frames) { return _pumpAndMix<float>(frames); }
public:
AudioVoiceMono(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb,
double sampleRate, bool dynamicRate);
void resetChannelLevels();
void setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew);
void setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew);
};
class AudioVoiceStereo : public AudioVoice
{
std::unordered_map<IAudioSubmix*, AudioMatrixStereo> m_sendMatrices;
bool m_silentOut = false;
void _resetSampleRate(double sampleRate);
static size_t SRCCallback(AudioVoiceStereo* ctx,
int16_t** data, size_t requestedLen);
bool isSilent() const;
template <typename T> size_t _pumpAndMix(size_t frames);
size_t pumpAndMix16(size_t frames) { return _pumpAndMix<int16_t>(frames); }
size_t pumpAndMix32(size_t frames) { return _pumpAndMix<int32_t>(frames); }
size_t pumpAndMixFlt(size_t frames) { return _pumpAndMix<float>(frames); }
public:
AudioVoiceStereo(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb,
double sampleRate, bool dynamicRate);
void resetChannelLevels();
void setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew);
void setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew);
~AudioVoice();
void resetSampleRate(double sampleRate);
void setPitchRatio(double ratio, bool slew);
void start();
void stop();
double getSampleRateIn() const { return m_sampleRateIn; }
double getSampleRateOut() const { return m_sampleRateOut; }
};
template <>
inline size_t AudioVoice::pumpAndMix<int16_t>(size_t frames) {
return pumpAndMix16(frames);
}
template <>
inline size_t AudioVoice::pumpAndMix<int32_t>(size_t frames) {
return pumpAndMix32(frames);
}
template <>
inline size_t AudioVoice::pumpAndMix<float>(size_t frames) {
return pumpAndMixFlt(frames);
}
class AudioVoiceMono : public AudioVoice {
std::unordered_map<IAudioSubmix*, AudioMatrixMono> m_sendMatrices;
bool m_silentOut = false;
void _resetSampleRate(double sampleRate);
static size_t SRCCallback(AudioVoiceMono* ctx, int16_t** data, size_t requestedLen);
bool isSilent() const;
template <typename T>
size_t _pumpAndMix(size_t frames);
size_t pumpAndMix16(size_t frames) { return _pumpAndMix<int16_t>(frames); }
size_t pumpAndMix32(size_t frames) { return _pumpAndMix<int32_t>(frames); }
size_t pumpAndMixFlt(size_t frames) { return _pumpAndMix<float>(frames); }
public:
AudioVoiceMono(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, double sampleRate, bool dynamicRate);
void resetChannelLevels();
void setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew);
void setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew);
};
class AudioVoiceStereo : public AudioVoice {
std::unordered_map<IAudioSubmix*, AudioMatrixStereo> m_sendMatrices;
bool m_silentOut = false;
void _resetSampleRate(double sampleRate);
static size_t SRCCallback(AudioVoiceStereo* ctx, int16_t** data, size_t requestedLen);
bool isSilent() const;
template <typename T>
size_t _pumpAndMix(size_t frames);
size_t pumpAndMix16(size_t frames) { return _pumpAndMix<int16_t>(frames); }
size_t pumpAndMix32(size_t frames) { return _pumpAndMix<int32_t>(frames); }
size_t pumpAndMixFlt(size_t frames) { return _pumpAndMix<float>(frames); }
public:
AudioVoiceStereo(BaseAudioVoiceEngine& root, IAudioVoiceCallback* cb, double sampleRate, bool dynamicRate);
void resetChannelLevels();
void setMonoChannelLevels(IAudioSubmix* submix, const float coefs[8], bool slew);
void setStereoChannelLevels(IAudioSubmix* submix, const float coefs[8][2], bool slew);
};
} // namespace boo

View File

@@ -1,156 +1,123 @@
#include "AudioVoiceEngine.hpp"
#include <cassert>
namespace boo
{
namespace boo {
BaseAudioVoiceEngine::~BaseAudioVoiceEngine()
{
m_mainSubmix.reset();
assert(m_voiceHead == nullptr && "Dangling voices detected");
assert(m_submixHead == nullptr && "Dangling submixes detected");
BaseAudioVoiceEngine::~BaseAudioVoiceEngine() {
m_mainSubmix.reset();
assert(m_voiceHead == nullptr && "Dangling voices detected");
assert(m_submixHead == nullptr && "Dangling submixes detected");
}
template <typename T>
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, T* dataOut)
{
if (dataOut)
memset(dataOut, 0, sizeof(T) * frames * m_mixInfo.m_channelMap.m_channelCount);
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, T* dataOut) {
if (dataOut)
memset(dataOut, 0, sizeof(T) * frames * m_mixInfo.m_channelMap.m_channelCount);
if (m_ltRtProcessing) {
size_t sampleCount = m_5msFrames * 5;
if (_getLtRtIn<T>().size() < sampleCount)
_getLtRtIn<T>().resize(sampleCount);
m_mainSubmix->_getRedirect<T>() = _getLtRtIn<T>().data();
} else {
m_mainSubmix->_getRedirect<T>() = dataOut;
}
if (m_submixesDirty) {
m_linearizedSubmixes = m_mainSubmix->_linearizeC3();
m_submixesDirty = false;
}
size_t remFrames = frames;
while (remFrames) {
size_t thisFrames;
if (remFrames < m_5msFrames) {
thisFrames = remFrames;
if (m_engineCallback)
m_engineCallback->on5MsInterval(*this, thisFrames / double(m_5msFrames) * 5.0 / 1000.0);
} else {
thisFrames = m_5msFrames;
if (m_engineCallback)
m_engineCallback->on5MsInterval(*this, 5.0 / 1000.0);
}
if (m_ltRtProcessing)
{
size_t sampleCount = m_5msFrames * 5;
if (_getLtRtIn<T>().size() < sampleCount)
_getLtRtIn<T>().resize(sampleCount);
m_mainSubmix->_getRedirect<T>() = _getLtRtIn<T>().data();
}
else
{
m_mainSubmix->_getRedirect<T>() = dataOut;
std::fill(_getLtRtIn<T>().begin(), _getLtRtIn<T>().end(), 0.f);
for (auto it = m_linearizedSubmixes.rbegin(); it != m_linearizedSubmixes.rend(); ++it)
(*it)->_zeroFill<T>();
if (m_voiceHead)
for (AudioVoice& vox : *m_voiceHead)
if (vox.m_running)
vox.pumpAndMix<T>(thisFrames);
for (auto it = m_linearizedSubmixes.rbegin(); it != m_linearizedSubmixes.rend(); ++it)
(*it)->_pumpAndMix<T>(thisFrames);
remFrames -= thisFrames;
if (!dataOut)
continue;
if (m_ltRtProcessing) {
m_ltRtProcessing->Process(_getLtRtIn<T>().data(), dataOut, int(thisFrames));
m_mainSubmix->_getRedirect<T>() = _getLtRtIn<T>().data();
}
if (m_submixesDirty)
{
m_linearizedSubmixes = m_mainSubmix->_linearizeC3();
m_submixesDirty = false;
}
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
for (size_t i = 0; i < sampleCount; ++i)
dataOut[i] *= m_totalVol;
size_t remFrames = frames;
while (remFrames)
{
size_t thisFrames;
if (remFrames < m_5msFrames)
{
thisFrames = remFrames;
if (m_engineCallback)
m_engineCallback->on5MsInterval(*this, thisFrames / double(m_5msFrames) * 5.0 / 1000.0);
}
else
{
thisFrames = m_5msFrames;
if (m_engineCallback)
m_engineCallback->on5MsInterval(*this, 5.0 / 1000.0);
}
dataOut += sampleCount;
}
if (m_ltRtProcessing)
std::fill(_getLtRtIn<T>().begin(), _getLtRtIn<T>().end(), 0.f);
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
(*it)->_zeroFill<T>();
if (m_voiceHead)
for (AudioVoice& vox : *m_voiceHead)
if (vox.m_running)
vox.pumpAndMix<T>(thisFrames);
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
(*it)->_pumpAndMix<T>(thisFrames);
remFrames -= thisFrames;
if (!dataOut)
continue;
if (m_ltRtProcessing)
{
m_ltRtProcessing->Process(_getLtRtIn<T>().data(), dataOut, int(thisFrames));
m_mainSubmix->_getRedirect<T>() = _getLtRtIn<T>().data();
}
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
for (size_t i=0 ; i<sampleCount ; ++i)
dataOut[i] *= m_totalVol;
dataOut += sampleCount;
}
if (m_engineCallback)
m_engineCallback->onPumpCycleComplete(*this);
if (m_engineCallback)
m_engineCallback->onPumpCycleComplete(*this);
}
template void BaseAudioVoiceEngine::_pumpAndMixVoices<int16_t>(size_t frames, int16_t* dataOut);
template void BaseAudioVoiceEngine::_pumpAndMixVoices<int32_t>(size_t frames, int32_t* dataOut);
template void BaseAudioVoiceEngine::_pumpAndMixVoices<float>(size_t frames, float* dataOut);
void BaseAudioVoiceEngine::_resetSampleRate()
{
if (m_voiceHead)
for (boo::AudioVoice& vox : *m_voiceHead)
vox._resetSampleRate(vox.m_sampleRateIn);
if (m_submixHead)
for (boo::AudioSubmix& smx : *m_submixHead)
smx._resetOutputSampleRate();
void BaseAudioVoiceEngine::_resetSampleRate() {
if (m_voiceHead)
for (boo::AudioVoice& vox : *m_voiceHead)
vox._resetSampleRate(vox.m_sampleRateIn);
if (m_submixHead)
for (boo::AudioSubmix& smx : *m_submixHead)
smx._resetOutputSampleRate();
}
ObjToken<IAudioVoice>
BaseAudioVoiceEngine::allocateNewMonoVoice(double sampleRate,
IAudioVoiceCallback* cb,
bool dynamicPitch)
{
return {new AudioVoiceMono(*this, cb, sampleRate, dynamicPitch)};
ObjToken<IAudioVoice> BaseAudioVoiceEngine::allocateNewMonoVoice(double sampleRate, IAudioVoiceCallback* cb,
bool dynamicPitch) {
return {new AudioVoiceMono(*this, cb, sampleRate, dynamicPitch)};
}
ObjToken<IAudioVoice>
BaseAudioVoiceEngine::allocateNewStereoVoice(double sampleRate,
IAudioVoiceCallback* cb,
bool dynamicPitch)
{
return {new AudioVoiceStereo(*this, cb, sampleRate, dynamicPitch)};
ObjToken<IAudioVoice> BaseAudioVoiceEngine::allocateNewStereoVoice(double sampleRate, IAudioVoiceCallback* cb,
bool dynamicPitch) {
return {new AudioVoiceStereo(*this, cb, sampleRate, dynamicPitch)};
}
ObjToken<IAudioSubmix>
BaseAudioVoiceEngine::allocateNewSubmix(bool mainOut, IAudioSubmixCallback* cb, int busId)
{
return {new AudioSubmix(*this, cb, busId, mainOut)};
ObjToken<IAudioSubmix> BaseAudioVoiceEngine::allocateNewSubmix(bool mainOut, IAudioSubmixCallback* cb, int busId) {
return {new AudioSubmix(*this, cb, busId, mainOut)};
}
void BaseAudioVoiceEngine::setCallbackInterface(IAudioVoiceEngineCallback* cb)
{
m_engineCallback = cb;
void BaseAudioVoiceEngine::setCallbackInterface(IAudioVoiceEngineCallback* cb) { m_engineCallback = cb; }
void BaseAudioVoiceEngine::setVolume(float vol) { m_totalVol = vol; }
bool BaseAudioVoiceEngine::enableLtRt(bool enable) {
if (enable && m_mixInfo.m_channelMap.m_channelCount == 2 && m_mixInfo.m_channels == AudioChannelSet::Stereo)
m_ltRtProcessing = std::make_unique<LtRtProcessing>(m_5msFrames, m_mixInfo);
else
m_ltRtProcessing.reset();
return m_ltRtProcessing.operator bool();
}
void BaseAudioVoiceEngine::setVolume(float vol)
{
m_totalVol = vol;
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::mixInfo() const { return m_mixInfo; }
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::clientMixInfo() const {
return m_ltRtProcessing ? m_ltRtProcessing->inMixInfo() : m_mixInfo;
}
bool BaseAudioVoiceEngine::enableLtRt(bool enable)
{
if (enable && m_mixInfo.m_channelMap.m_channelCount == 2 &&
m_mixInfo.m_channels == AudioChannelSet::Stereo)
m_ltRtProcessing = std::make_unique<LtRtProcessing>(m_5msFrames, m_mixInfo);
else
m_ltRtProcessing.reset();
return m_ltRtProcessing.operator bool();
}
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::mixInfo() const
{
return m_mixInfo;
}
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::clientMixInfo() const
{
return m_ltRtProcessing ? m_ltRtProcessing->inMixInfo() : m_mixInfo;
}
}
} // namespace boo

View File

@@ -8,87 +8,110 @@
#include <functional>
#include <mutex>
namespace boo
{
namespace boo {
/** Base class for managing mixing and sample-rate-conversion amongst active voices */
class BaseAudioVoiceEngine : public IAudioVoiceEngine
{
class BaseAudioVoiceEngine : public IAudioVoiceEngine {
protected:
friend class AudioVoice;
friend class AudioSubmix;
friend class AudioVoiceMono;
friend class AudioVoiceStereo;
float m_totalVol = 1.f;
AudioVoiceEngineMixInfo m_mixInfo;
std::recursive_mutex m_dataMutex;
AudioVoice* m_voiceHead = nullptr;
AudioSubmix* m_submixHead = nullptr;
size_t m_5msFrames = 0;
IAudioVoiceEngineCallback* m_engineCallback = nullptr;
friend class AudioVoice;
friend class AudioSubmix;
friend class AudioVoiceMono;
friend class AudioVoiceStereo;
float m_totalVol = 1.f;
AudioVoiceEngineMixInfo m_mixInfo;
std::recursive_mutex m_dataMutex;
AudioVoice* m_voiceHead = nullptr;
AudioSubmix* m_submixHead = nullptr;
size_t m_5msFrames = 0;
IAudioVoiceEngineCallback* m_engineCallback = nullptr;
/* Shared scratch buffers for accumulating audio data for resampling */
std::vector<int16_t> m_scratchIn;
std::vector<int16_t> m_scratch16Pre;
std::vector<int32_t> m_scratch32Pre;
std::vector<float> m_scratchFltPre;
template <typename T> std::vector<T>& _getScratchPre();
std::vector<int16_t> m_scratch16Post;
std::vector<int32_t> m_scratch32Post;
std::vector<float> m_scratchFltPost;
template <typename T> std::vector<T>& _getScratchPost();
/* Shared scratch buffers for accumulating audio data for resampling */
std::vector<int16_t> m_scratchIn;
std::vector<int16_t> m_scratch16Pre;
std::vector<int32_t> m_scratch32Pre;
std::vector<float> m_scratchFltPre;
template <typename T>
std::vector<T>& _getScratchPre();
std::vector<int16_t> m_scratch16Post;
std::vector<int32_t> m_scratch32Post;
std::vector<float> m_scratchFltPost;
template <typename T>
std::vector<T>& _getScratchPost();
/* LtRt processing if enabled */
std::unique_ptr<LtRtProcessing> m_ltRtProcessing;
std::vector<int16_t> m_ltRtIn16;
std::vector<int32_t> m_ltRtIn32;
std::vector<float> m_ltRtInFlt;
template <typename T> std::vector<T>& _getLtRtIn();
/* LtRt processing if enabled */
std::unique_ptr<LtRtProcessing> m_ltRtProcessing;
std::vector<int16_t> m_ltRtIn16;
std::vector<int32_t> m_ltRtIn32;
std::vector<float> m_ltRtInFlt;
template <typename T>
std::vector<T>& _getLtRtIn();
std::unique_ptr<AudioSubmix> m_mainSubmix;
std::list<AudioSubmix*> m_linearizedSubmixes;
bool m_submixesDirty = true;
std::unique_ptr<AudioSubmix> m_mainSubmix;
std::list<AudioSubmix*> m_linearizedSubmixes;
bool m_submixesDirty = true;
template <typename T>
void _pumpAndMixVoices(size_t frames, T* dataOut);
template <typename T>
void _pumpAndMixVoices(size_t frames, T* dataOut);
void _resetSampleRate();
void _resetSampleRate();
public:
BaseAudioVoiceEngine() : m_mainSubmix(std::make_unique<AudioSubmix>(*this, nullptr, -1, false)) {}
~BaseAudioVoiceEngine();
ObjToken<IAudioVoice> allocateNewMonoVoice(double sampleRate,
IAudioVoiceCallback* cb,
bool dynamicPitch=false);
BaseAudioVoiceEngine() : m_mainSubmix(std::make_unique<AudioSubmix>(*this, nullptr, -1, false)) {}
~BaseAudioVoiceEngine();
ObjToken<IAudioVoice> allocateNewMonoVoice(double sampleRate, IAudioVoiceCallback* cb, bool dynamicPitch = false);
ObjToken<IAudioVoice> allocateNewStereoVoice(double sampleRate,
IAudioVoiceCallback* cb,
bool dynamicPitch=false);
ObjToken<IAudioVoice> allocateNewStereoVoice(double sampleRate, IAudioVoiceCallback* cb, bool dynamicPitch = false);
ObjToken<IAudioSubmix> allocateNewSubmix(bool mainOut, IAudioSubmixCallback* cb, int busId);
ObjToken<IAudioSubmix> allocateNewSubmix(bool mainOut, IAudioSubmixCallback* cb, int busId);
void setCallbackInterface(IAudioVoiceEngineCallback* cb);
void setCallbackInterface(IAudioVoiceEngineCallback* cb);
void setVolume(float vol);
bool enableLtRt(bool enable);
const AudioVoiceEngineMixInfo& mixInfo() const;
const AudioVoiceEngineMixInfo& clientMixInfo() const;
AudioChannelSet getAvailableSet() {return clientMixInfo().m_channels;}
void pumpAndMixVoices() {}
size_t get5MsFrames() const {return m_5msFrames;}
void setVolume(float vol);
bool enableLtRt(bool enable);
const AudioVoiceEngineMixInfo& mixInfo() const;
const AudioVoiceEngineMixInfo& clientMixInfo() const;
AudioChannelSet getAvailableSet() { return clientMixInfo().m_channels; }
void pumpAndMixVoices() {}
size_t get5MsFrames() const { return m_5msFrames; }
};
template <> inline std::vector<int16_t>& BaseAudioVoiceEngine::_getScratchPre<int16_t>() { return m_scratch16Pre; }
template <> inline std::vector<int32_t>& BaseAudioVoiceEngine::_getScratchPre<int32_t>() { return m_scratch32Pre; }
template <> inline std::vector<float>& BaseAudioVoiceEngine::_getScratchPre<float>() { return m_scratchFltPre; }
template <> inline std::vector<int16_t>& BaseAudioVoiceEngine::_getScratchPost<int16_t>() { return m_scratch16Post; }
template <> inline std::vector<int32_t>& BaseAudioVoiceEngine::_getScratchPost<int32_t>() { return m_scratch32Post; }
template <> inline std::vector<float>& BaseAudioVoiceEngine::_getScratchPost<float>() { return m_scratchFltPost; }
template <> inline std::vector<int16_t>& BaseAudioVoiceEngine::_getLtRtIn<int16_t>() { return m_ltRtIn16; }
template <> inline std::vector<int32_t>& BaseAudioVoiceEngine::_getLtRtIn<int32_t>() { return m_ltRtIn32; }
template <> inline std::vector<float>& BaseAudioVoiceEngine::_getLtRtIn<float>() { return m_ltRtInFlt; }
template <>
inline std::vector<int16_t>& BaseAudioVoiceEngine::_getScratchPre<int16_t>() {
return m_scratch16Pre;
}
template <>
inline std::vector<int32_t>& BaseAudioVoiceEngine::_getScratchPre<int32_t>() {
return m_scratch32Pre;
}
template <>
inline std::vector<float>& BaseAudioVoiceEngine::_getScratchPre<float>() {
return m_scratchFltPre;
}
template <>
inline std::vector<int16_t>& BaseAudioVoiceEngine::_getScratchPost<int16_t>() {
return m_scratch16Post;
}
template <>
inline std::vector<int32_t>& BaseAudioVoiceEngine::_getScratchPost<int32_t>() {
return m_scratch32Post;
}
template <>
inline std::vector<float>& BaseAudioVoiceEngine::_getScratchPost<float>() {
return m_scratchFltPost;
}
template <>
inline std::vector<int16_t>& BaseAudioVoiceEngine::_getLtRtIn<int16_t>() {
return m_ltRtIn16;
}
template <>
inline std::vector<int32_t>& BaseAudioVoiceEngine::_getLtRtIn<int32_t>() {
return m_ltRtIn32;
}
template <>
inline std::vector<float>& BaseAudioVoiceEngine::_getLtRtIn<float>() {
return m_ltRtInFlt;
}
} // namespace boo

View File

@@ -4,19 +4,16 @@
#include "../Common.hpp"
#include "boo/audiodev/IAudioVoice.hpp"
namespace boo
{
namespace boo {
/** Pertinent information from audio backend about optimal mixed-audio representation */
struct AudioVoiceEngineMixInfo
{
double m_sampleRate = 32000.0;
soxr_datatype_t m_sampleFormat = SOXR_FLOAT32_I;
unsigned m_bitsPerSample = 32;
AudioChannelSet m_channels = AudioChannelSet::Stereo;
ChannelMap m_channelMap = {2, {AudioChannel::FrontLeft, AudioChannel::FrontRight}};
size_t m_periodFrames = 160;
struct AudioVoiceEngineMixInfo {
double m_sampleRate = 32000.0;
soxr_datatype_t m_sampleFormat = SOXR_FLOAT32_I;
unsigned m_bitsPerSample = 32;
AudioChannelSet m_channels = AudioChannelSet::Stereo;
ChannelMap m_channelMap = {2, {AudioChannel::FrontLeft, AudioChannel::FrontRight}};
size_t m_periodFrames = 160;
};
}
} // namespace boo

View File

@@ -7,304 +7,259 @@
#include <alsa/asoundlib.h>
#include <signal.h>
namespace boo
{
namespace boo {
extern logvisor::Module ALSALog;
static inline double TimespecToDouble(struct timespec& ts)
{
return ts.tv_sec + ts.tv_nsec / 1.0e9;
}
static inline double TimespecToDouble(struct timespec& ts) { return ts.tv_sec + ts.tv_nsec / 1.0e9; }
struct LinuxMidi : BaseAudioVoiceEngine
{
std::unordered_map<std::string, IMIDIPort*> m_openHandles;
void _addOpenHandle(const char* name, IMIDIPort* port)
{
m_openHandles[name] = port;
struct LinuxMidi : BaseAudioVoiceEngine {
std::unordered_map<std::string, IMIDIPort*> m_openHandles;
void _addOpenHandle(const char* name, IMIDIPort* port) { m_openHandles[name] = port; }
void _removeOpenHandle(IMIDIPort* port) {
for (auto it = m_openHandles.begin(); it != m_openHandles.end();) {
if (it->second == port) {
it = m_openHandles.erase(it);
continue;
}
++it;
}
void _removeOpenHandle(IMIDIPort* port)
{
for (auto it = m_openHandles.begin(); it != m_openHandles.end();)
{
if (it->second == port)
{
it = m_openHandles.erase(it);
continue;
}
++it;
}
~LinuxMidi() {
for (auto& p : m_openHandles)
p.second->_disown();
}
std::vector<std::pair<std::string, std::string>> enumerateMIDIInputs() const {
std::vector<std::pair<std::string, std::string>> ret;
int status;
int card = -1; /* use -1 to prime the pump of iterating through card list */
if ((status = snd_card_next(&card)) < 0)
return {};
if (card < 0)
return {};
snd_rawmidi_info_t* info;
snd_rawmidi_info_malloc(&info);
while (card >= 0) {
snd_ctl_t* ctl;
char name[32];
int device = -1;
int status;
sprintf(name, "hw:%d", card);
if ((status = snd_ctl_open(&ctl, name, 0)) < 0)
continue;
do {
status = snd_ctl_rawmidi_next_device(ctl, &device);
if (status < 0)
break;
if (device >= 0) {
sprintf(name + strlen(name), ",%d", device);
auto search = m_openHandles.find(name);
if (search != m_openHandles.cend()) {
ret.push_back(std::make_pair(name, search->second->description()));
continue;
}
snd_rawmidi_t* midi;
if (!snd_rawmidi_open(&midi, nullptr, name, SND_RAWMIDI_NONBLOCK)) {
snd_rawmidi_info(midi, info);
ret.push_back(std::make_pair(name, snd_rawmidi_info_get_name(info)));
snd_rawmidi_close(midi);
}
}
} while (device >= 0);
snd_ctl_close(ctl);
if ((status = snd_card_next(&card)) < 0)
break;
}
~LinuxMidi()
{
for (auto& p : m_openHandles)
p.second->_disown();
}
snd_rawmidi_info_free(info);
std::vector<std::pair<std::string, std::string>> enumerateMIDIInputs() const
{
std::vector<std::pair<std::string, std::string>> ret;
int status;
int card = -1; /* use -1 to prime the pump of iterating through card list */
return ret;
}
if ((status = snd_card_next(&card)) < 0)
return {};
if (card < 0)
return {};
bool supportsVirtualMIDIIn() const { return true; }
snd_rawmidi_info_t* info;
snd_rawmidi_info_malloc(&info);
static void MIDIFreeProc(void* midiStatus) { snd_rawmidi_status_free((snd_rawmidi_status_t*)midiStatus); }
while (card >= 0)
{
snd_ctl_t *ctl;
char name[32];
int device = -1;
int status;
sprintf(name, "hw:%d", card);
if ((status = snd_ctl_open(&ctl, name, 0)) < 0)
continue;
static void MIDIReceiveProc(snd_rawmidi_t* midi, const ReceiveFunctor& receiver) {
logvisor::RegisterThreadName("Boo MIDI");
snd_rawmidi_status_t* midiStatus;
snd_rawmidi_status_malloc(&midiStatus);
pthread_cleanup_push(MIDIFreeProc, midiStatus);
do {
status = snd_ctl_rawmidi_next_device(ctl, &device);
if (status < 0)
break;
if (device >= 0)
{
sprintf(name + strlen(name), ",%d", device);
auto search = m_openHandles.find(name);
if (search != m_openHandles.cend())
{
ret.push_back(std::make_pair(name, search->second->description()));
continue;
}
snd_rawmidi_t* midi;
if (!snd_rawmidi_open(&midi, nullptr, name, SND_RAWMIDI_NONBLOCK))
{
snd_rawmidi_info(midi, info);
ret.push_back(std::make_pair(name, snd_rawmidi_info_get_name(info)));
snd_rawmidi_close(midi);
}
}
} while (device >= 0);
snd_ctl_close(ctl);
if ((status = snd_card_next(&card)) < 0)
break;
uint8_t buf[512];
while (true) {
snd_htimestamp_t ts;
snd_rawmidi_status(midi, midiStatus);
snd_rawmidi_status_get_tstamp(midiStatus, &ts);
int rdBytes = snd_rawmidi_read(midi, buf, 512);
if (rdBytes < 0) {
if (rdBytes != -EINTR) {
ALSALog.report(logvisor::Error, "MIDI connection lost");
break;
}
continue;
}
snd_rawmidi_info_free(info);
return ret;
int oldtype;
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldtype);
receiver(std::vector<uint8_t>(std::cbegin(buf), std::cbegin(buf) + rdBytes), TimespecToDouble(ts));
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldtype);
pthread_testcancel();
}
bool supportsVirtualMIDIIn() const
{
return true;
pthread_cleanup_pop(1);
}
struct MIDIIn : public IMIDIIn {
snd_rawmidi_t* m_midi;
std::thread m_midiThread;
MIDIIn(LinuxMidi* parent, snd_rawmidi_t* midi, bool virt, ReceiveFunctor&& receiver)
: IMIDIIn(parent, virt, std::move(receiver))
, m_midi(midi)
, m_midiThread(std::bind(MIDIReceiveProc, m_midi, m_receiver)) {}
~MIDIIn() {
if (m_parent)
static_cast<LinuxMidi*>(m_parent)->_removeOpenHandle(this);
pthread_cancel(m_midiThread.native_handle());
if (m_midiThread.joinable())
m_midiThread.join();
snd_rawmidi_close(m_midi);
}
static void MIDIFreeProc(void* midiStatus)
{
snd_rawmidi_status_free((snd_rawmidi_status_t*)midiStatus);
std::string description() const {
snd_rawmidi_info_t* info;
snd_rawmidi_info_alloca(&info);
snd_rawmidi_info(m_midi, info);
std::string ret = snd_rawmidi_info_get_name(info);
return ret;
}
};
struct MIDIOut : public IMIDIOut {
snd_rawmidi_t* m_midi;
MIDIOut(LinuxMidi* parent, snd_rawmidi_t* midi, bool virt) : IMIDIOut(parent, virt), m_midi(midi) {}
~MIDIOut() {
if (m_parent)
static_cast<LinuxMidi*>(m_parent)->_removeOpenHandle(this);
snd_rawmidi_close(m_midi);
}
static void MIDIReceiveProc(snd_rawmidi_t* midi, const ReceiveFunctor& receiver)
{
logvisor::RegisterThreadName("Boo MIDI");
snd_rawmidi_status_t* midiStatus;
snd_rawmidi_status_malloc(&midiStatus);
pthread_cleanup_push(MIDIFreeProc, midiStatus);
uint8_t buf[512];
while (true)
{
snd_htimestamp_t ts;
snd_rawmidi_status(midi, midiStatus);
snd_rawmidi_status_get_tstamp(midiStatus, &ts);
int rdBytes = snd_rawmidi_read(midi, buf, 512);
if (rdBytes < 0)
{
if (rdBytes != -EINTR)
{
ALSALog.report(logvisor::Error, "MIDI connection lost");
break;
}
continue;
}
int oldtype;
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldtype);
receiver(std::vector<uint8_t>(std::cbegin(buf), std::cbegin(buf) + rdBytes), TimespecToDouble(ts));
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldtype);
pthread_testcancel();
}
pthread_cleanup_pop(1);
std::string description() const {
snd_rawmidi_info_t* info;
snd_rawmidi_info_alloca(&info);
snd_rawmidi_info(m_midi, info);
std::string ret = snd_rawmidi_info_get_name(info);
return ret;
}
struct MIDIIn : public IMIDIIn
{
snd_rawmidi_t* m_midi;
std::thread m_midiThread;
size_t send(const void* buf, size_t len) const { return size_t(std::max(0l, snd_rawmidi_write(m_midi, buf, len))); }
};
MIDIIn(LinuxMidi* parent, snd_rawmidi_t* midi, bool virt, ReceiveFunctor&& receiver)
: IMIDIIn(parent, virt, std::move(receiver)), m_midi(midi),
m_midiThread(std::bind(MIDIReceiveProc, m_midi, m_receiver)) {}
struct MIDIInOut : public IMIDIInOut {
snd_rawmidi_t* m_midiIn;
snd_rawmidi_t* m_midiOut;
std::thread m_midiThread;
~MIDIIn()
{
if (m_parent)
static_cast<LinuxMidi*>(m_parent)->_removeOpenHandle(this);
pthread_cancel(m_midiThread.native_handle());
if (m_midiThread.joinable())
m_midiThread.join();
snd_rawmidi_close(m_midi);
}
MIDIInOut(LinuxMidi* parent, snd_rawmidi_t* midiIn, snd_rawmidi_t* midiOut, bool virt, ReceiveFunctor&& receiver)
: IMIDIInOut(parent, virt, std::move(receiver))
, m_midiIn(midiIn)
, m_midiOut(midiOut)
, m_midiThread(std::bind(MIDIReceiveProc, m_midiIn, m_receiver)) {}
std::string description() const
{
snd_rawmidi_info_t* info;
snd_rawmidi_info_alloca(&info);
snd_rawmidi_info(m_midi, info);
std::string ret = snd_rawmidi_info_get_name(info);
return ret;
}
};
struct MIDIOut : public IMIDIOut
{
snd_rawmidi_t* m_midi;
MIDIOut(LinuxMidi* parent, snd_rawmidi_t* midi, bool virt)
: IMIDIOut(parent, virt), m_midi(midi) {}
~MIDIOut()
{
if (m_parent)
static_cast<LinuxMidi*>(m_parent)->_removeOpenHandle(this);
snd_rawmidi_close(m_midi);
}
std::string description() const
{
snd_rawmidi_info_t* info;
snd_rawmidi_info_alloca(&info);
snd_rawmidi_info(m_midi, info);
std::string ret = snd_rawmidi_info_get_name(info);
return ret;
}
size_t send(const void* buf, size_t len) const
{
return size_t(std::max(0l, snd_rawmidi_write(m_midi, buf, len)));
}
};
struct MIDIInOut : public IMIDIInOut
{
snd_rawmidi_t* m_midiIn;
snd_rawmidi_t* m_midiOut;
std::thread m_midiThread;
MIDIInOut(LinuxMidi* parent, snd_rawmidi_t* midiIn, snd_rawmidi_t* midiOut, bool virt, ReceiveFunctor&& receiver)
: IMIDIInOut(parent, virt, std::move(receiver)), m_midiIn(midiIn), m_midiOut(midiOut),
m_midiThread(std::bind(MIDIReceiveProc, m_midiIn, m_receiver)) {}
~MIDIInOut()
{
if (m_parent)
static_cast<LinuxMidi*>(m_parent)->_removeOpenHandle(this);
pthread_cancel(m_midiThread.native_handle());
if (m_midiThread.joinable())
m_midiThread.join();
snd_rawmidi_close(m_midiIn);
snd_rawmidi_close(m_midiOut);
}
std::string description() const
{
snd_rawmidi_info_t* info;
snd_rawmidi_info_alloca(&info);
snd_rawmidi_info(m_midiIn, info);
std::string ret = snd_rawmidi_info_get_name(info);
return ret;
}
size_t send(const void* buf, size_t len) const
{
return size_t(std::max(0l, snd_rawmidi_write(m_midiOut, buf, len)));
}
};
std::unique_ptr<IMIDIIn> newVirtualMIDIIn(ReceiveFunctor&& receiver)
{
int status;
snd_rawmidi_t* midi;
status = snd_rawmidi_open(&midi, nullptr, "virtual", 0);
if (status)
return {};
return std::make_unique<MIDIIn>(nullptr, midi, true, std::move(receiver));
~MIDIInOut() {
if (m_parent)
static_cast<LinuxMidi*>(m_parent)->_removeOpenHandle(this);
pthread_cancel(m_midiThread.native_handle());
if (m_midiThread.joinable())
m_midiThread.join();
snd_rawmidi_close(m_midiIn);
snd_rawmidi_close(m_midiOut);
}
std::unique_ptr<IMIDIOut> newVirtualMIDIOut()
{
int status;
snd_rawmidi_t* midi;
status = snd_rawmidi_open(nullptr, &midi, "virtual", 0);
if (status)
return {};
return std::make_unique<MIDIOut>(nullptr, midi, true);
std::string description() const {
snd_rawmidi_info_t* info;
snd_rawmidi_info_alloca(&info);
snd_rawmidi_info(m_midiIn, info);
std::string ret = snd_rawmidi_info_get_name(info);
return ret;
}
std::unique_ptr<IMIDIInOut> newVirtualMIDIInOut(ReceiveFunctor&& receiver)
{
int status;
snd_rawmidi_t* midiIn;
snd_rawmidi_t* midiOut;
status = snd_rawmidi_open(&midiIn, &midiOut, "virtual", 0);
if (status)
return {};
return std::make_unique<MIDIInOut>(nullptr, midiIn, midiOut, true, std::move(receiver));
size_t send(const void* buf, size_t len) const {
return size_t(std::max(0l, snd_rawmidi_write(m_midiOut, buf, len)));
}
};
std::unique_ptr<IMIDIIn> newRealMIDIIn(const char* name, ReceiveFunctor&& receiver)
{
snd_rawmidi_t* midi;
int status = snd_rawmidi_open(&midi, nullptr, name, 0);
if (status)
return {};
auto ret = std::make_unique<MIDIIn>(this, midi, true, std::move(receiver));
_addOpenHandle(name, ret.get());
return ret;
}
std::unique_ptr<IMIDIIn> newVirtualMIDIIn(ReceiveFunctor&& receiver) {
int status;
snd_rawmidi_t* midi;
status = snd_rawmidi_open(&midi, nullptr, "virtual", 0);
if (status)
return {};
return std::make_unique<MIDIIn>(nullptr, midi, true, std::move(receiver));
}
std::unique_ptr<IMIDIOut> newRealMIDIOut(const char* name)
{
snd_rawmidi_t* midi;
int status = snd_rawmidi_open(nullptr, &midi, name, 0);
if (status)
return {};
auto ret = std::make_unique<MIDIOut>(this, midi, true);
_addOpenHandle(name, ret.get());
return ret;
}
std::unique_ptr<IMIDIOut> newVirtualMIDIOut() {
int status;
snd_rawmidi_t* midi;
status = snd_rawmidi_open(nullptr, &midi, "virtual", 0);
if (status)
return {};
return std::make_unique<MIDIOut>(nullptr, midi, true);
}
std::unique_ptr<IMIDIInOut> newRealMIDIInOut(const char* name, ReceiveFunctor&& receiver)
{
snd_rawmidi_t* midiIn;
snd_rawmidi_t* midiOut;
int status = snd_rawmidi_open(&midiIn, &midiOut, name, 0);
if (status)
return {};
auto ret = std::make_unique<MIDIInOut>(this, midiIn, midiOut, true, std::move(receiver));
_addOpenHandle(name, ret.get());
return ret;
}
std::unique_ptr<IMIDIInOut> newVirtualMIDIInOut(ReceiveFunctor&& receiver) {
int status;
snd_rawmidi_t* midiIn;
snd_rawmidi_t* midiOut;
status = snd_rawmidi_open(&midiIn, &midiOut, "virtual", 0);
if (status)
return {};
return std::make_unique<MIDIInOut>(nullptr, midiIn, midiOut, true, std::move(receiver));
}
bool useMIDILock() const {return true;}
std::unique_ptr<IMIDIIn> newRealMIDIIn(const char* name, ReceiveFunctor&& receiver) {
snd_rawmidi_t* midi;
int status = snd_rawmidi_open(&midi, nullptr, name, 0);
if (status)
return {};
auto ret = std::make_unique<MIDIIn>(this, midi, true, std::move(receiver));
_addOpenHandle(name, ret.get());
return ret;
}
std::unique_ptr<IMIDIOut> newRealMIDIOut(const char* name) {
snd_rawmidi_t* midi;
int status = snd_rawmidi_open(nullptr, &midi, name, 0);
if (status)
return {};
auto ret = std::make_unique<MIDIOut>(this, midi, true);
_addOpenHandle(name, ret.get());
return ret;
}
std::unique_ptr<IMIDIInOut> newRealMIDIInOut(const char* name, ReceiveFunctor&& receiver) {
snd_rawmidi_t* midiIn;
snd_rawmidi_t* midiOut;
int status = snd_rawmidi_open(&midiIn, &midiOut, name, 0);
if (status)
return {};
auto ret = std::make_unique<MIDIInOut>(this, midiIn, midiOut, true, std::move(receiver));
_addOpenHandle(name, ret.get());
return ret;
}
bool useMIDILock() const { return true; }
};
}
} // namespace boo

View File

@@ -5,28 +5,23 @@
#undef min
#undef max
namespace boo
{
namespace boo {
template <typename T>
inline T ClampFull(float in)
{
if(std::is_floating_point<T>())
{
return std::min<T>(std::max<T>(in, -1.f), 1.f);
}
else
{
constexpr T MAX = std::numeric_limits<T>::max();
constexpr T MIN = std::numeric_limits<T>::min();
inline T ClampFull(float in) {
if (std::is_floating_point<T>()) {
return std::min<T>(std::max<T>(in, -1.f), 1.f);
} else {
constexpr T MAX = std::numeric_limits<T>::max();
constexpr T MIN = std::numeric_limits<T>::min();
if (in < MIN)
return MIN;
else if (in > MAX)
return MAX;
else
return in;
}
if (in < MIN)
return MIN;
else if (in > MAX)
return MAX;
else
return in;
}
}
#if INTEL_IPP
@@ -38,148 +33,130 @@ inline T ClampFull(float in)
#if USE_LPF
static constexpr int FirTaps = 27;
FIRFilter12k::FIRFilter12k(int windowFrames, double sampleRate)
{
Ipp64f* taps = ippsMalloc_64f(FirTaps);
Ipp32f* taps32 = ippsMalloc_32f(FirTaps);
int sizeSpec, sizeBuf;
FIRFilter12k::FIRFilter12k(int windowFrames, double sampleRate) {
Ipp64f* taps = ippsMalloc_64f(FirTaps);
Ipp32f* taps32 = ippsMalloc_32f(FirTaps);
int sizeSpec, sizeBuf;
ippsFIRGenGetBufferSize(FirTaps, &sizeBuf);
m_firBuffer = ippsMalloc_8u(sizeBuf);
ippsFIRGenLowpass_64f(12000.0 / sampleRate, taps, FirTaps, ippWinBartlett, ippTrue, m_firBuffer);
ippsConvert_64f32f(taps, taps32, FirTaps);
ippsFree(taps);
ippsFree(m_firBuffer);
ippsFIRGenGetBufferSize(FirTaps, &sizeBuf);
m_firBuffer = ippsMalloc_8u(sizeBuf);
ippsFIRGenLowpass_64f(12000.0 / sampleRate, taps, FirTaps, ippWinBartlett, ippTrue, m_firBuffer);
ippsConvert_64f32f(taps, taps32, FirTaps);
ippsFree(taps);
ippsFree(m_firBuffer);
m_dlySrc = ippsMalloc_32f(FirTaps);
m_dlySrc = ippsMalloc_32f(FirTaps);
ippsFIRSRGetSize(FirTaps, ipp32f, &sizeSpec, &sizeBuf);
m_firSpec = (IppsFIRSpec_32f*)ippsMalloc_8u(sizeSpec);
m_firBuffer = ippsMalloc_8u(sizeBuf);
ippsFIRSRInit_32f(taps32, FirTaps, ippAlgDirect, m_firSpec);
ippsFree(taps32);
ippsFIRSRGetSize(FirTaps, ipp32f, &sizeSpec, &sizeBuf);
m_firSpec = (IppsFIRSpec_32f*)ippsMalloc_8u(sizeSpec);
m_firBuffer = ippsMalloc_8u(sizeBuf);
ippsFIRSRInit_32f(taps32, FirTaps, ippAlgDirect, m_firSpec);
ippsFree(taps32);
m_inBuf = ippsMalloc_32f(windowFrames);
m_inBuf = ippsMalloc_32f(windowFrames);
}
FIRFilter12k::~FIRFilter12k()
{
ippsFree(m_firSpec);
ippsFree(m_firBuffer);
ippsFree(m_dlySrc);
ippsFree(m_inBuf);
FIRFilter12k::~FIRFilter12k() {
ippsFree(m_firSpec);
ippsFree(m_firBuffer);
ippsFree(m_dlySrc);
ippsFree(m_inBuf);
}
void FIRFilter12k::Process(Ipp32f* buf, int windowFrames)
{
ippsZero_32f(m_dlySrc, FirTaps);
ippsMove_32f(buf, m_inBuf, windowFrames);
ippsFIRSR_32f(m_inBuf, buf, windowFrames, m_firSpec, m_dlySrc, nullptr, m_firBuffer);
void FIRFilter12k::Process(Ipp32f* buf, int windowFrames) {
ippsZero_32f(m_dlySrc, FirTaps);
ippsMove_32f(buf, m_inBuf, windowFrames);
ippsFIRSR_32f(m_inBuf, buf, windowFrames, m_firSpec, m_dlySrc, nullptr, m_firBuffer);
}
#endif
WindowedHilbert::WindowedHilbert(int windowFrames, double sampleRate) :
WindowedHilbert::WindowedHilbert(int windowFrames, double sampleRate)
:
#if USE_LPF
m_fir(windowFrames, sampleRate),
m_fir(windowFrames, sampleRate)
,
#endif
m_windowFrames(windowFrames),
m_halfFrames(windowFrames / 2),
m_inputBuf(ippsMalloc_32f(m_windowFrames * 2 + m_halfFrames)),
m_outputBuf(ippsMalloc_32fc(m_windowFrames * 4)),
m_hammingTable(ippsMalloc_32f(m_halfFrames))
{
ippsZero_32f(m_inputBuf, m_windowFrames * 2 + m_halfFrames);
ippsZero_32fc(m_outputBuf, m_windowFrames * 4);
m_output[0] = m_outputBuf;
m_output[1] = m_output[0] + m_windowFrames;
m_output[2] = m_output[1] + m_windowFrames;
m_output[3] = m_output[2] + m_windowFrames;
int sizeSpec, sizeBuf;
ippsHilbertGetSize_32f32fc(m_windowFrames, ippAlgHintFast, &sizeSpec, &sizeBuf);
m_spec = (IppsHilbertSpec*)ippMalloc(sizeSpec);
m_buffer = (Ipp8u*)ippMalloc(sizeBuf);
ippsHilbertInit_32f32fc(m_windowFrames, ippAlgHintFast, m_spec, m_buffer);
m_windowFrames(windowFrames)
, m_halfFrames(windowFrames / 2)
, m_inputBuf(ippsMalloc_32f(m_windowFrames * 2 + m_halfFrames))
, m_outputBuf(ippsMalloc_32fc(m_windowFrames * 4))
, m_hammingTable(ippsMalloc_32f(m_halfFrames)) {
ippsZero_32f(m_inputBuf, m_windowFrames * 2 + m_halfFrames);
ippsZero_32fc(m_outputBuf, m_windowFrames * 4);
m_output[0] = m_outputBuf;
m_output[1] = m_output[0] + m_windowFrames;
m_output[2] = m_output[1] + m_windowFrames;
m_output[3] = m_output[2] + m_windowFrames;
int sizeSpec, sizeBuf;
ippsHilbertGetSize_32f32fc(m_windowFrames, ippAlgHintFast, &sizeSpec, &sizeBuf);
m_spec = (IppsHilbertSpec*)ippMalloc(sizeSpec);
m_buffer = (Ipp8u*)ippMalloc(sizeBuf);
ippsHilbertInit_32f32fc(m_windowFrames, ippAlgHintFast, m_spec, m_buffer);
for (int i=0 ; i<m_halfFrames ; ++i)
m_hammingTable[i] = Ipp32f(std::cos(M_PI * (i / double(m_halfFrames) + 1.0)) * 0.5 + 0.5);
for (int i = 0; i < m_halfFrames; ++i)
m_hammingTable[i] = Ipp32f(std::cos(M_PI * (i / double(m_halfFrames) + 1.0)) * 0.5 + 0.5);
}
WindowedHilbert::~WindowedHilbert()
{
ippFree(m_spec);
ippFree(m_buffer);
ippsFree(m_inputBuf);
ippsFree(m_outputBuf);
ippsFree(m_hammingTable);
WindowedHilbert::~WindowedHilbert() {
ippFree(m_spec);
ippFree(m_buffer);
ippsFree(m_inputBuf);
ippsFree(m_outputBuf);
ippsFree(m_hammingTable);
}
void WindowedHilbert::_AddWindow()
{
void WindowedHilbert::_AddWindow() {
#if USE_LPF
Ipp32f* inBufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
m_fir.Process(inBufBase, m_windowFrames);
Ipp32f* inBufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
m_fir.Process(inBufBase, m_windowFrames);
#endif
if (m_bufIdx)
{
/* Mirror last half of samples to start of input buffer */
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * 2];
ippsCopy_32f(bufBase, m_inputBuf, m_halfFrames);
ippsHilbert_32f32fc(&m_inputBuf[m_windowFrames],
m_output[2], m_spec, m_buffer);
ippsHilbert_32f32fc(&m_inputBuf[m_windowFrames + m_halfFrames],
m_output[3], m_spec, m_buffer);
}
else
{
ippsHilbert_32f32fc(&m_inputBuf[0],
m_output[0], m_spec, m_buffer);
ippsHilbert_32f32fc(&m_inputBuf[m_halfFrames],
m_output[1], m_spec, m_buffer);
}
m_bufIdx ^= 1;
if (m_bufIdx) {
/* Mirror last half of samples to start of input buffer */
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * 2];
ippsCopy_32f(bufBase, m_inputBuf, m_halfFrames);
ippsHilbert_32f32fc(&m_inputBuf[m_windowFrames], m_output[2], m_spec, m_buffer);
ippsHilbert_32f32fc(&m_inputBuf[m_windowFrames + m_halfFrames], m_output[3], m_spec, m_buffer);
} else {
ippsHilbert_32f32fc(&m_inputBuf[0], m_output[0], m_spec, m_buffer);
ippsHilbert_32f32fc(&m_inputBuf[m_halfFrames], m_output[1], m_spec, m_buffer);
}
m_bufIdx ^= 1;
}
void WindowedHilbert::AddWindow(const float* input, int stride)
{
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
for (int i=0 ; i<m_windowFrames ; ++i)
bufBase[i] = input[i * stride];
_AddWindow();
void WindowedHilbert::AddWindow(const float* input, int stride) {
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
for (int i = 0; i < m_windowFrames; ++i)
bufBase[i] = input[i * stride];
_AddWindow();
}
void WindowedHilbert::AddWindow(const int32_t* input, int stride)
{
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
for (int i=0 ; i<m_windowFrames ; ++i)
bufBase[i] = input[i * stride] / (float(INT32_MAX) + 1.f);
_AddWindow();
void WindowedHilbert::AddWindow(const int32_t* input, int stride) {
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
for (int i = 0; i < m_windowFrames; ++i)
bufBase[i] = input[i * stride] / (float(INT32_MAX) + 1.f);
_AddWindow();
}
void WindowedHilbert::AddWindow(const int16_t* input, int stride)
{
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
for (int i=0 ; i<m_windowFrames ; ++i)
bufBase[i] = input[i * stride] / (float(INT16_MAX) + 1.f);
_AddWindow();
void WindowedHilbert::AddWindow(const int16_t* input, int stride) {
Ipp32f* bufBase = &m_inputBuf[m_windowFrames * m_bufIdx + m_halfFrames];
for (int i = 0; i < m_windowFrames; ++i)
bufBase[i] = input[i * stride] / (float(INT16_MAX) + 1.f);
_AddWindow();
}
template <typename T>
void WindowedHilbert::Output(T* output, float lCoef, float rCoef) const
{
int first, middle, last;
if (m_bufIdx)
{
first = 3;
middle = 0;
last = 1;
}
else
{
first = 1;
middle = 2;
last = 3;
}
void WindowedHilbert::Output(T* output, float lCoef, float rCoef) const {
int first, middle, last;
if (m_bufIdx) {
first = 3;
middle = 0;
last = 1;
} else {
first = 1;
middle = 2;
last = 3;
}
#if 0
for (int i=0 ; i<m_windowFrames ; ++i)
@@ -191,27 +168,23 @@ void WindowedHilbert::Output(T* output, float lCoef, float rCoef) const
return;
#endif
int i, t;
for (i=0, t=0 ; i<m_halfFrames ; ++i, ++t)
{
float tmp = m_output[first][m_halfFrames + i].im * (1.f - m_hammingTable[t]) +
m_output[middle][i].im * m_hammingTable[t];
output[i*2] = ClampFull<T>(output[i*2] + tmp * lCoef);
output[i*2+1] = ClampFull<T>(output[i*2+1] + tmp * rCoef);
}
for (; i<m_windowFrames-m_halfFrames ; ++i)
{
float tmp = m_output[middle][i].im;
output[i*2] = ClampFull<T>(output[i*2] + tmp * lCoef);
output[i*2+1] = ClampFull<T>(output[i*2+1] + tmp * rCoef);
}
for (t=0 ; i<m_windowFrames ; ++i, ++t)
{
float tmp = m_output[middle][i].im * (1.f - m_hammingTable[t]) +
m_output[last][t].im * m_hammingTable[t];
output[i*2] = ClampFull<T>(output[i*2] + tmp * lCoef);
output[i*2+1] = ClampFull<T>(output[i*2+1] + tmp * rCoef);
}
int i, t;
for (i = 0, t = 0; i < m_halfFrames; ++i, ++t) {
float tmp =
m_output[first][m_halfFrames + i].im * (1.f - m_hammingTable[t]) + m_output[middle][i].im * m_hammingTable[t];
output[i * 2] = ClampFull<T>(output[i * 2] + tmp * lCoef);
output[i * 2 + 1] = ClampFull<T>(output[i * 2 + 1] + tmp * rCoef);
}
for (; i < m_windowFrames - m_halfFrames; ++i) {
float tmp = m_output[middle][i].im;
output[i * 2] = ClampFull<T>(output[i * 2] + tmp * lCoef);
output[i * 2 + 1] = ClampFull<T>(output[i * 2 + 1] + tmp * rCoef);
}
for (t = 0; i < m_windowFrames; ++i, ++t) {
float tmp = m_output[middle][i].im * (1.f - m_hammingTable[t]) + m_output[last][t].im * m_hammingTable[t];
output[i * 2] = ClampFull<T>(output[i * 2] + tmp * lCoef);
output[i * 2 + 1] = ClampFull<T>(output[i * 2 + 1] + tmp * rCoef);
}
}
template void WindowedHilbert::Output<int16_t>(int16_t* output, float lCoef, float rCoef) const;
@@ -220,53 +193,71 @@ template void WindowedHilbert::Output<float>(float* output, float lCoef, float r
#endif
template <> int16_t* LtRtProcessing::_getInBuf<int16_t>() { return m_16Buffer.get(); }
template <> int32_t* LtRtProcessing::_getInBuf<int32_t>() { return m_32Buffer.get(); }
template <> float* LtRtProcessing::_getInBuf<float>() { return m_fltBuffer.get(); }
template <>
int16_t* LtRtProcessing::_getInBuf<int16_t>() {
return m_16Buffer.get();
}
template <>
int32_t* LtRtProcessing::_getInBuf<int32_t>() {
return m_32Buffer.get();
}
template <>
float* LtRtProcessing::_getInBuf<float>() {
return m_fltBuffer.get();
}
template <> int16_t* LtRtProcessing::_getOutBuf<int16_t>() { return m_16Buffer.get() + m_outputOffset; }
template <> int32_t* LtRtProcessing::_getOutBuf<int32_t>() { return m_32Buffer.get() + m_outputOffset; }
template <> float* LtRtProcessing::_getOutBuf<float>() { return m_fltBuffer.get() + m_outputOffset; }
template <>
int16_t* LtRtProcessing::_getOutBuf<int16_t>() {
return m_16Buffer.get() + m_outputOffset;
}
template <>
int32_t* LtRtProcessing::_getOutBuf<int32_t>() {
return m_32Buffer.get() + m_outputOffset;
}
template <>
float* LtRtProcessing::_getOutBuf<float>() {
return m_fltBuffer.get() + m_outputOffset;
}
LtRtProcessing::LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo)
: m_inMixInfo(mixInfo), m_windowFrames(_5msFrames * 4), m_halfFrames(m_windowFrames / 2),
m_outputOffset(m_windowFrames * 5 * 2)
: m_inMixInfo(mixInfo)
, m_windowFrames(_5msFrames * 4)
, m_halfFrames(m_windowFrames / 2)
, m_outputOffset(m_windowFrames * 5 * 2)
#if INTEL_IPP
, m_hilbertSL(m_windowFrames, mixInfo.m_sampleRate),
m_hilbertSR(m_windowFrames, mixInfo.m_sampleRate)
, m_hilbertSL(m_windowFrames, mixInfo.m_sampleRate)
, m_hilbertSR(m_windowFrames, mixInfo.m_sampleRate)
#endif
{
m_inMixInfo.m_channels = AudioChannelSet::Surround51;
m_inMixInfo.m_channelMap.m_channelCount = 5;
m_inMixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_inMixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_inMixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
m_inMixInfo.m_channelMap.m_channels[3] = AudioChannel::RearLeft;
m_inMixInfo.m_channelMap.m_channels[4] = AudioChannel::RearRight;
m_inMixInfo.m_channels = AudioChannelSet::Surround51;
m_inMixInfo.m_channelMap.m_channelCount = 5;
m_inMixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_inMixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_inMixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
m_inMixInfo.m_channelMap.m_channels[3] = AudioChannel::RearLeft;
m_inMixInfo.m_channelMap.m_channels[4] = AudioChannel::RearRight;
int samples = m_windowFrames * (5 * 2 + 2 * 2);
switch (mixInfo.m_sampleFormat)
{
case SOXR_INT16_I:
m_16Buffer.reset(new int16_t[samples]);
memset(m_16Buffer.get(), 0, sizeof(int16_t) * samples);
break;
case SOXR_INT32_I:
m_32Buffer.reset(new int32_t[samples]);
memset(m_32Buffer.get(), 0, sizeof(int32_t) * samples);
break;
case SOXR_FLOAT32_I:
m_fltBuffer.reset(new float[samples]);
memset(m_fltBuffer.get(), 0, sizeof(float) * samples);
break;
default:
break;
}
int samples = m_windowFrames * (5 * 2 + 2 * 2);
switch (mixInfo.m_sampleFormat) {
case SOXR_INT16_I:
m_16Buffer.reset(new int16_t[samples]);
memset(m_16Buffer.get(), 0, sizeof(int16_t) * samples);
break;
case SOXR_INT32_I:
m_32Buffer.reset(new int32_t[samples]);
memset(m_32Buffer.get(), 0, sizeof(int32_t) * samples);
break;
case SOXR_FLOAT32_I:
m_fltBuffer.reset(new float[samples]);
memset(m_fltBuffer.get(), 0, sizeof(float) * samples);
break;
default:
break;
}
}
template <typename T>
void LtRtProcessing::Process(const T* input, T* output, int frameCount)
{
void LtRtProcessing::Process(const T* input, T* output, int frameCount) {
#if 0
for (int i=0 ; i<frameCount ; ++i)
{
@@ -276,90 +267,81 @@ void LtRtProcessing::Process(const T* input, T* output, int frameCount)
return;
#endif
int outFramesRem = frameCount;
T* inBuf = _getInBuf<T>();
T* outBuf = _getOutBuf<T>();
int tail = std::min(m_windowFrames * 2, m_bufferTail + frameCount);
int samples = (tail - m_bufferTail) * 5;
memmove(&inBuf[m_bufferTail * 5], input, samples * sizeof(T));
//printf("input %d to %d\n", tail - m_bufferTail, m_bufferTail);
input += samples;
frameCount -= tail - m_bufferTail;
int outFramesRem = frameCount;
T* inBuf = _getInBuf<T>();
T* outBuf = _getOutBuf<T>();
int tail = std::min(m_windowFrames * 2, m_bufferTail + frameCount);
int samples = (tail - m_bufferTail) * 5;
memmove(&inBuf[m_bufferTail * 5], input, samples * sizeof(T));
// printf("input %d to %d\n", tail - m_bufferTail, m_bufferTail);
input += samples;
frameCount -= tail - m_bufferTail;
int head = std::min(m_windowFrames * 2, m_bufferHead + outFramesRem);
samples = (head - m_bufferHead) * 2;
memmove(output, outBuf + m_bufferHead * 2, samples * sizeof(T));
//printf("output %d from %d\n", head - m_bufferHead, m_bufferHead);
output += samples;
outFramesRem -= head - m_bufferHead;
int head = std::min(m_windowFrames * 2, m_bufferHead + outFramesRem);
samples = (head - m_bufferHead) * 2;
memmove(output, outBuf + m_bufferHead * 2, samples * sizeof(T));
// printf("output %d from %d\n", head - m_bufferHead, m_bufferHead);
output += samples;
outFramesRem -= head - m_bufferHead;
int bufIdx = m_bufferTail / m_windowFrames;
if (tail / m_windowFrames > bufIdx)
{
T* in = &inBuf[bufIdx * m_windowFrames * 5];
T* out = &outBuf[bufIdx * m_windowFrames * 2];
int bufIdx = m_bufferTail / m_windowFrames;
if (tail / m_windowFrames > bufIdx) {
T* in = &inBuf[bufIdx * m_windowFrames * 5];
T* out = &outBuf[bufIdx * m_windowFrames * 2];
#if INTEL_IPP
m_hilbertSL.AddWindow(in + 3, 5);
m_hilbertSR.AddWindow(in + 4, 5);
m_hilbertSL.AddWindow(in + 3, 5);
m_hilbertSR.AddWindow(in + 4, 5);
#endif
// x(:,1) + sqrt(.5)*x(:,3) + sqrt(19/25)*x(:,4) + sqrt(6/25)*x(:,5)
// x(:,2) + sqrt(.5)*x(:,3) - sqrt(6/25)*x(:,4) - sqrt(19/25)*x(:,5)
if (bufIdx)
{
int delayI = -m_halfFrames;
for (int i=0 ; i<m_windowFrames ; ++i, ++delayI)
{
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
//printf("in %d out %d\n", bufIdx * m_5msFrames + delayI, bufIdx * m_5msFrames + i);
}
}
else
{
int delayI = m_windowFrames * 2 - m_halfFrames;
int i;
for (i=0 ; i<m_halfFrames ; ++i, ++delayI)
{
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
//printf("in %d out %d\n", bufIdx * m_5msFrames + delayI, bufIdx * m_5msFrames + i);
}
delayI = 0;
for (; i<m_windowFrames ; ++i, ++delayI)
{
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
//printf("in %d out %d\n", bufIdx * m_5msFrames + delayI, bufIdx * m_5msFrames + i);
}
}
// x(:,1) + sqrt(.5)*x(:,3) + sqrt(19/25)*x(:,4) + sqrt(6/25)*x(:,5)
// x(:,2) + sqrt(.5)*x(:,3) - sqrt(6/25)*x(:,4) - sqrt(19/25)*x(:,5)
if (bufIdx) {
int delayI = -m_halfFrames;
for (int i = 0; i < m_windowFrames; ++i, ++delayI) {
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
// printf("in %d out %d\n", bufIdx * m_5msFrames + delayI, bufIdx * m_5msFrames + i);
}
} else {
int delayI = m_windowFrames * 2 - m_halfFrames;
int i;
for (i = 0; i < m_halfFrames; ++i, ++delayI) {
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
// printf("in %d out %d\n", bufIdx * m_5msFrames + delayI, bufIdx * m_5msFrames + i);
}
delayI = 0;
for (; i < m_windowFrames; ++i, ++delayI) {
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
// printf("in %d out %d\n", bufIdx * m_5msFrames + delayI, bufIdx * m_5msFrames + i);
}
}
#if INTEL_IPP
m_hilbertSL.Output(out, 0.8717798f, 0.4898979f);
m_hilbertSR.Output(out, -0.4898979f, -0.8717798f);
m_hilbertSL.Output(out, 0.8717798f, 0.4898979f);
m_hilbertSR.Output(out, -0.4898979f, -0.8717798f);
#endif
}
m_bufferTail = (tail == m_windowFrames * 2) ? 0 : tail;
m_bufferHead = (head == m_windowFrames * 2) ? 0 : head;
}
m_bufferTail = (tail == m_windowFrames * 2) ? 0 : tail;
m_bufferHead = (head == m_windowFrames * 2) ? 0 : head;
if (frameCount)
{
samples = frameCount * 5;
memmove(inBuf, input, samples * sizeof(T));
//printf("input %d to %d\n", frameCount, 0);
m_bufferTail = frameCount;
}
if (frameCount) {
samples = frameCount * 5;
memmove(inBuf, input, samples * sizeof(T));
// printf("input %d to %d\n", frameCount, 0);
m_bufferTail = frameCount;
}
if (outFramesRem)
{
samples = outFramesRem * 2;
memmove(output, outBuf, samples * sizeof(T));
//printf("output %d from %d\n", outFramesRem, 0);
m_bufferHead = outFramesRem;
}
if (outFramesRem) {
samples = outFramesRem * 2;
memmove(output, outBuf, samples * sizeof(T));
// printf("output %d from %d\n", outFramesRem, 0);
m_bufferHead = outFramesRem;
}
}
template void LtRtProcessing::Process<int16_t>(const int16_t* input, int16_t* output, int frameCount);
template void LtRtProcessing::Process<int32_t>(const int32_t* input, int32_t* output, int frameCount);
template void LtRtProcessing::Process<float>(const float* input, float* output, int frameCount);
}
} // namespace boo

View File

@@ -9,73 +9,73 @@
#include "ipp.h"
#endif
namespace boo
{
namespace boo {
#if INTEL_IPP
#define USE_LPF 0
#if USE_LPF
class FIRFilter12k
{
IppsFIRSpec_32f* m_firSpec;
Ipp8u* m_firBuffer;
Ipp32f* m_dlySrc;
Ipp32f* m_inBuf;
class FIRFilter12k {
IppsFIRSpec_32f* m_firSpec;
Ipp8u* m_firBuffer;
Ipp32f* m_dlySrc;
Ipp32f* m_inBuf;
public:
explicit FIRFilter12k(int windowFrames, double sampleRate);
~FIRFilter12k();
void Process(Ipp32f* buf, int windowFrames);
explicit FIRFilter12k(int windowFrames, double sampleRate);
~FIRFilter12k();
void Process(Ipp32f* buf, int windowFrames);
};
#endif
class WindowedHilbert
{
class WindowedHilbert {
#if USE_LPF
FIRFilter12k m_fir;
FIRFilter12k m_fir;
#endif
IppsHilbertSpec* m_spec;
Ipp8u* m_buffer;
int m_windowFrames, m_halfFrames;
int m_bufIdx = 0;
Ipp32f* m_inputBuf;
Ipp32fc* m_outputBuf;
Ipp32fc* m_output[4];
Ipp32f* m_hammingTable;
void _AddWindow();
IppsHilbertSpec* m_spec;
Ipp8u* m_buffer;
int m_windowFrames, m_halfFrames;
int m_bufIdx = 0;
Ipp32f* m_inputBuf;
Ipp32fc* m_outputBuf;
Ipp32fc* m_output[4];
Ipp32f* m_hammingTable;
void _AddWindow();
public:
explicit WindowedHilbert(int windowFrames, double sampleRate);
~WindowedHilbert();
void AddWindow(const float* input, int stride);
void AddWindow(const int32_t* input, int stride);
void AddWindow(const int16_t* input, int stride);
template <typename T>
void Output(T* output, float lCoef, float rCoef) const;
explicit WindowedHilbert(int windowFrames, double sampleRate);
~WindowedHilbert();
void AddWindow(const float* input, int stride);
void AddWindow(const int32_t* input, int stride);
void AddWindow(const int16_t* input, int stride);
template <typename T>
void Output(T* output, float lCoef, float rCoef) const;
};
#endif
class LtRtProcessing
{
AudioVoiceEngineMixInfo m_inMixInfo;
int m_windowFrames;
int m_halfFrames;
int m_outputOffset;
int m_bufferTail = 0;
int m_bufferHead = 0;
std::unique_ptr<int16_t[]> m_16Buffer;
std::unique_ptr<int32_t[]> m_32Buffer;
std::unique_ptr<float[]> m_fltBuffer;
class LtRtProcessing {
AudioVoiceEngineMixInfo m_inMixInfo;
int m_windowFrames;
int m_halfFrames;
int m_outputOffset;
int m_bufferTail = 0;
int m_bufferHead = 0;
std::unique_ptr<int16_t[]> m_16Buffer;
std::unique_ptr<int32_t[]> m_32Buffer;
std::unique_ptr<float[]> m_fltBuffer;
#if INTEL_IPP
WindowedHilbert m_hilbertSL, m_hilbertSR;
WindowedHilbert m_hilbertSL, m_hilbertSR;
#endif
template <typename T> T* _getInBuf();
template <typename T> T* _getOutBuf();
template <typename T>
T* _getInBuf();
template <typename T>
T* _getOutBuf();
public:
LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo);
template <typename T>
void Process(const T* input, T* output, int frameCount);
const AudioVoiceEngineMixInfo& inMixInfo() const { return m_inMixInfo; }
LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo);
template <typename T>
void Process(const T* input, T* output, int frameCount);
const AudioVoiceEngineMixInfo& inMixInfo() const { return m_inMixInfo; }
};
}
} // namespace boo

View File

@@ -1,12 +1,11 @@
#include "MIDICommon.hpp"
#include "boo/audiodev/IMIDIPort.hpp"
namespace boo
{
namespace boo {
IMIDIPort::~IMIDIPort() {}
IMIDIIn::~IMIDIIn() {}
IMIDIOut::~IMIDIOut() {}
IMIDIInOut::~IMIDIInOut() {}
}
} // namespace boo

View File

@@ -1,30 +1,27 @@
#pragma once
namespace boo
{
namespace boo {
enum class Status
{
NoteOff = 0x80,
NoteOn = 0x90,
NotePressure = 0xA0,
ControlChange = 0xB0,
ProgramChange = 0xC0,
ChannelPressure = 0xD0,
PitchBend = 0xE0,
SysEx = 0xF0,
TimecodeQuarterFrame = 0xF1,
SongPositionPointer = 0xF2,
SongSelect = 0xF3,
TuneRequest = 0xF6,
SysExTerm = 0xF7,
TimingClock = 0xF8,
Start = 0xFA,
Continue = 0xFB,
Stop = 0xFC,
ActiveSensing = 0xFE,
Reset = 0xFF,
enum class Status {
NoteOff = 0x80,
NoteOn = 0x90,
NotePressure = 0xA0,
ControlChange = 0xB0,
ProgramChange = 0xC0,
ChannelPressure = 0xD0,
PitchBend = 0xE0,
SysEx = 0xF0,
TimecodeQuarterFrame = 0xF1,
SongPositionPointer = 0xF2,
SongSelect = 0xF3,
TuneRequest = 0xF6,
SysExTerm = 0xF7,
TimingClock = 0xF8,
Start = 0xFA,
Continue = 0xFB,
Stop = 0xFC,
ActiveSensing = 0xFE,
Reset = 0xFF,
};
}

View File

@@ -3,207 +3,183 @@
#include <memory>
#include <algorithm>
namespace boo
{
namespace boo {
static inline uint8_t clamp7(uint8_t val) {return std::max(0, std::min(127, int(val)));}
static inline uint8_t clamp7(uint8_t val) { return std::max(0, std::min(127, int(val))); }
bool MIDIDecoder::_readContinuedValue(std::vector<uint8_t>::const_iterator& it,
std::vector<uint8_t>::const_iterator end,
uint32_t& valOut)
{
std::vector<uint8_t>::const_iterator end, uint32_t& valOut) {
uint8_t a = *it++;
valOut = a & 0x7f;
if (a & 0x80) {
if (it == end)
return false;
valOut <<= 7;
a = *it++;
valOut |= a & 0x7f;
if (a & 0x80) {
if (it == end)
return false;
valOut <<= 7;
a = *it++;
valOut |= a & 0x7f;
}
}
return true;
}
std::vector<uint8_t>::const_iterator MIDIDecoder::receiveBytes(std::vector<uint8_t>::const_iterator begin,
std::vector<uint8_t>::const_iterator end) {
std::vector<uint8_t>::const_iterator it = begin;
while (it != end) {
uint8_t a = *it++;
valOut = a & 0x7f;
uint8_t b;
if (a & 0x80)
{
m_status = a;
else
it--;
if (m_status == 0xff) {
/* Meta events (ignored for now) */
if (it == end)
return begin;
a = *it++;
uint32_t length;
_readContinuedValue(it, end, length);
it += length;
} else {
uint8_t chan = m_status & 0xf;
switch (Status(m_status & 0xf0)) {
case Status::NoteOff: {
if (it == end)
return false;
valOut <<= 7;
return begin;
a = *it++;
valOut |= a & 0x7f;
if (a & 0x80)
{
if (it == end)
return false;
valOut <<= 7;
a = *it++;
valOut |= a & 0x7f;
if (it == end)
return begin;
b = *it++;
m_out.noteOff(chan, clamp7(a), clamp7(b));
break;
}
case Status::NoteOn: {
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.noteOn(chan, clamp7(a), clamp7(b));
break;
}
case Status::NotePressure: {
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.notePressure(chan, clamp7(a), clamp7(b));
break;
}
case Status::ControlChange: {
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.controlChange(chan, clamp7(a), clamp7(b));
break;
}
case Status::ProgramChange: {
if (it == end)
return begin;
a = *it++;
m_out.programChange(chan, clamp7(a));
break;
}
case Status::ChannelPressure: {
if (it == end)
return begin;
a = *it++;
m_out.channelPressure(chan, clamp7(a));
break;
}
case Status::PitchBend: {
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.pitchBend(chan, clamp7(b) * 128 + clamp7(a));
break;
}
case Status::SysEx: {
switch (Status(m_status & 0xff)) {
case Status::SysEx: {
uint32_t len;
if (!_readContinuedValue(it, end, len) || end - it < len)
return begin;
m_out.sysex(&*it, len);
break;
}
}
return true;
}
std::vector<uint8_t>::const_iterator
MIDIDecoder::receiveBytes(std::vector<uint8_t>::const_iterator begin,
std::vector<uint8_t>::const_iterator end)
{
std::vector<uint8_t>::const_iterator it = begin;
while (it != end)
{
uint8_t a = *it++;
uint8_t b;
if (a & 0x80)
m_status = a;
else
it--;
if (m_status == 0xff)
{
/* Meta events (ignored for now) */
if (it == end)
return begin;
a = *it++;
uint32_t length;
_readContinuedValue(it, end, length);
it += length;
} else
{
uint8_t chan = m_status & 0xf;
switch (Status(m_status & 0xf0))
{
case Status::NoteOff:
{
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.noteOff(chan, clamp7(a), clamp7(b));
break;
}
case Status::NoteOn:
{
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.noteOn(chan, clamp7(a), clamp7(b));
break;
}
case Status::NotePressure:
{
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.notePressure(chan, clamp7(a), clamp7(b));
break;
}
case Status::ControlChange:
{
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.controlChange(chan, clamp7(a), clamp7(b));
break;
}
case Status::ProgramChange:
{
if (it == end)
return begin;
a = *it++;
m_out.programChange(chan, clamp7(a));
break;
}
case Status::ChannelPressure:
{
if (it == end)
return begin;
a = *it++;
m_out.channelPressure(chan, clamp7(a));
break;
}
case Status::PitchBend:
{
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.pitchBend(chan, clamp7(b) * 128 + clamp7(a));
break;
}
case Status::SysEx:
{
switch (Status(m_status & 0xff))
{
case Status::SysEx:
{
uint32_t len;
if (!_readContinuedValue(it, end, len) || end - it < len)
return begin;
m_out.sysex(&*it, len);
break;
}
case Status::TimecodeQuarterFrame:
{
if (it == end)
return begin;
a = *it++;
m_out.timeCodeQuarterFrame(a >> 4 & 0x7, a & 0xf);
break;
}
case Status::SongPositionPointer:
{
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.songPositionPointer(clamp7(b) * 128 + clamp7(a));
break;
}
case Status::SongSelect:
{
if (it == end)
return begin;
a = *it++;
m_out.songSelect(clamp7(a));
break;
}
case Status::TuneRequest:
m_out.tuneRequest();
break;
case Status::Start:
m_out.startSeq();
break;
case Status::Continue:
m_out.continueSeq();
break;
case Status::Stop:
m_out.stopSeq();
break;
case Status::Reset:
m_out.reset();
break;
case Status::SysExTerm:
case Status::TimingClock:
case Status::ActiveSensing:
default:
break;
}
break;
}
default:
break;
}
case Status::TimecodeQuarterFrame: {
if (it == end)
return begin;
a = *it++;
m_out.timeCodeQuarterFrame(a >> 4 & 0x7, a & 0xf);
break;
}
case Status::SongPositionPointer: {
if (it == end)
return begin;
a = *it++;
if (it == end)
return begin;
b = *it++;
m_out.songPositionPointer(clamp7(b) * 128 + clamp7(a));
break;
}
case Status::SongSelect: {
if (it == end)
return begin;
a = *it++;
m_out.songSelect(clamp7(a));
break;
}
case Status::TuneRequest:
m_out.tuneRequest();
break;
case Status::Start:
m_out.startSeq();
break;
case Status::Continue:
m_out.continueSeq();
break;
case Status::Stop:
m_out.stopSeq();
break;
case Status::Reset:
m_out.reset();
break;
case Status::SysExTerm:
case Status::TimingClock:
case Status::ActiveSensing:
default:
break;
}
break;
}
default:
break;
}
}
return it;
}
return it;
}
}
} // namespace boo

View File

@@ -1,228 +1,182 @@
#include "boo/audiodev/MIDIEncoder.hpp"
#include "MIDICommon.hpp"
namespace boo
{
namespace boo {
template <class Sender>
void MIDIEncoder<Sender>::_sendMessage(const uint8_t* data, size_t len)
{
if (data[0] == m_status)
m_sender.send(data + 1, len - 1);
else
{
if (data[0] & 0x80)
m_status = data[0];
m_sender.send(data, len);
}
}
template <class Sender>
void MIDIEncoder<Sender>::_sendContinuedValue(uint32_t val)
{
uint8_t send[3] = {};
uint8_t* ptr = nullptr;
if (val >= 0x4000)
{
ptr = &send[0];
send[0] = 0x80 | ((val / 0x4000) & 0x7f);
send[1] = 0x80;
val &= 0x3fff;
}
if (val >= 0x80)
{
if (!ptr)
ptr = &send[1];
send[1] = 0x80 | ((val / 0x80) & 0x7f);
}
if (!ptr)
ptr = &send[2];
send[2] = val & 0x7f;
m_sender.send(ptr, 3 - (ptr - send));
}
template <class Sender>
void MIDIEncoder<Sender>::noteOff(uint8_t chan, uint8_t key, uint8_t velocity)
{
uint8_t cmd[3] = {uint8_t(int(Status::NoteOff) | (chan & 0xf)),
uint8_t(key & 0x7f), uint8_t(velocity & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::noteOn(uint8_t chan, uint8_t key, uint8_t velocity)
{
uint8_t cmd[3] = {uint8_t(int(Status::NoteOn) | (chan & 0xf)),
uint8_t(key & 0x7f), uint8_t(velocity & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::notePressure(uint8_t chan, uint8_t key, uint8_t pressure)
{
uint8_t cmd[3] = {uint8_t(int(Status::NotePressure) | (chan & 0xf)),
uint8_t(key & 0x7f), uint8_t(pressure & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::controlChange(uint8_t chan, uint8_t control, uint8_t value)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
uint8_t(control & 0x7f), uint8_t(value & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::programChange(uint8_t chan, uint8_t program)
{
uint8_t cmd[2] = {uint8_t(int(Status::ProgramChange) | (chan & 0xf)),
uint8_t(program & 0x7f)};
_sendMessage(cmd, 2);
}
template <class Sender>
void MIDIEncoder<Sender>::channelPressure(uint8_t chan, uint8_t pressure)
{
uint8_t cmd[2] = {uint8_t(int(Status::ChannelPressure) | (chan & 0xf)),
uint8_t(pressure & 0x7f)};
_sendMessage(cmd, 2);
}
template <class Sender>
void MIDIEncoder<Sender>::pitchBend(uint8_t chan, int16_t pitch)
{
uint8_t cmd[3] = {uint8_t(int(Status::PitchBend) | (chan & 0xf)),
uint8_t((pitch % 128) & 0x7f), uint8_t((pitch / 128) & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::allSoundOff(uint8_t chan)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
120, 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::resetAllControllers(uint8_t chan)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
121, 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::localControl(uint8_t chan, bool on)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
122, uint8_t(on ? 127 : 0)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::allNotesOff(uint8_t chan)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
123, 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::omniMode(uint8_t chan, bool on)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
uint8_t(on ? 125 : 124), 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::polyMode(uint8_t chan, bool on)
{
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)),
uint8_t(on ? 127 : 126), 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::sysex(const void* data, size_t len)
{
uint8_t cmd = uint8_t(Status::SysEx);
_sendMessage(&cmd, 1);
_sendContinuedValue(len);
void MIDIEncoder<Sender>::_sendMessage(const uint8_t* data, size_t len) {
if (data[0] == m_status)
m_sender.send(data + 1, len - 1);
else {
if (data[0] & 0x80)
m_status = data[0];
m_sender.send(data, len);
cmd = uint8_t(Status::SysExTerm);
_sendMessage(&cmd, 1);
}
}
template <class Sender>
void MIDIEncoder<Sender>::timeCodeQuarterFrame(uint8_t message, uint8_t value)
{
uint8_t cmd[2] = {uint8_t(int(Status::TimecodeQuarterFrame)),
uint8_t((message & 0x7 << 4) | (value & 0xf))};
_sendMessage(cmd, 2);
void MIDIEncoder<Sender>::_sendContinuedValue(uint32_t val) {
uint8_t send[3] = {};
uint8_t* ptr = nullptr;
if (val >= 0x4000) {
ptr = &send[0];
send[0] = 0x80 | ((val / 0x4000) & 0x7f);
send[1] = 0x80;
val &= 0x3fff;
}
if (val >= 0x80) {
if (!ptr)
ptr = &send[1];
send[1] = 0x80 | ((val / 0x80) & 0x7f);
}
if (!ptr)
ptr = &send[2];
send[2] = val & 0x7f;
m_sender.send(ptr, 3 - (ptr - send));
}
template <class Sender>
void MIDIEncoder<Sender>::songPositionPointer(uint16_t pointer)
{
uint8_t cmd[3] = {uint8_t(int(Status::SongPositionPointer)),
uint8_t((pointer % 128) & 0x7f), uint8_t((pointer / 128) & 0x7f)};
_sendMessage(cmd, 3);
void MIDIEncoder<Sender>::noteOff(uint8_t chan, uint8_t key, uint8_t velocity) {
uint8_t cmd[3] = {uint8_t(int(Status::NoteOff) | (chan & 0xf)), uint8_t(key & 0x7f), uint8_t(velocity & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::songSelect(uint8_t song)
{
uint8_t cmd[2] = {uint8_t(int(Status::TimecodeQuarterFrame)),
uint8_t(song & 0x7f)};
_sendMessage(cmd, 2);
void MIDIEncoder<Sender>::noteOn(uint8_t chan, uint8_t key, uint8_t velocity) {
uint8_t cmd[3] = {uint8_t(int(Status::NoteOn) | (chan & 0xf)), uint8_t(key & 0x7f), uint8_t(velocity & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::tuneRequest()
{
uint8_t cmd = uint8_t(Status::TuneRequest);
_sendMessage(&cmd, 1);
}
template <class Sender>
void MIDIEncoder<Sender>::startSeq()
{
uint8_t cmd = uint8_t(Status::Start);
_sendMessage(&cmd, 1);
void MIDIEncoder<Sender>::notePressure(uint8_t chan, uint8_t key, uint8_t pressure) {
uint8_t cmd[3] = {uint8_t(int(Status::NotePressure) | (chan & 0xf)), uint8_t(key & 0x7f), uint8_t(pressure & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::continueSeq()
{
uint8_t cmd = uint8_t(Status::Continue);
_sendMessage(&cmd, 1);
void MIDIEncoder<Sender>::controlChange(uint8_t chan, uint8_t control, uint8_t value) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), uint8_t(control & 0x7f), uint8_t(value & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::stopSeq()
{
uint8_t cmd = uint8_t(Status::Stop);
_sendMessage(&cmd, 1);
void MIDIEncoder<Sender>::programChange(uint8_t chan, uint8_t program) {
uint8_t cmd[2] = {uint8_t(int(Status::ProgramChange) | (chan & 0xf)), uint8_t(program & 0x7f)};
_sendMessage(cmd, 2);
}
template <class Sender>
void MIDIEncoder<Sender>::channelPressure(uint8_t chan, uint8_t pressure) {
uint8_t cmd[2] = {uint8_t(int(Status::ChannelPressure) | (chan & 0xf)), uint8_t(pressure & 0x7f)};
_sendMessage(cmd, 2);
}
template <class Sender>
void MIDIEncoder<Sender>::reset()
{
uint8_t cmd = uint8_t(Status::Reset);
_sendMessage(&cmd, 1);
void MIDIEncoder<Sender>::pitchBend(uint8_t chan, int16_t pitch) {
uint8_t cmd[3] = {uint8_t(int(Status::PitchBend) | (chan & 0xf)), uint8_t((pitch % 128) & 0x7f),
uint8_t((pitch / 128) & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::allSoundOff(uint8_t chan) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), 120, 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::resetAllControllers(uint8_t chan) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), 121, 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::localControl(uint8_t chan, bool on) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), 122, uint8_t(on ? 127 : 0)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::allNotesOff(uint8_t chan) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), 123, 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::omniMode(uint8_t chan, bool on) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), uint8_t(on ? 125 : 124), 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::polyMode(uint8_t chan, bool on) {
uint8_t cmd[3] = {uint8_t(int(Status::ControlChange) | (chan & 0xf)), uint8_t(on ? 127 : 126), 0};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::sysex(const void* data, size_t len) {
uint8_t cmd = uint8_t(Status::SysEx);
_sendMessage(&cmd, 1);
_sendContinuedValue(len);
m_sender.send(data, len);
cmd = uint8_t(Status::SysExTerm);
_sendMessage(&cmd, 1);
}
template <class Sender>
void MIDIEncoder<Sender>::timeCodeQuarterFrame(uint8_t message, uint8_t value) {
uint8_t cmd[2] = {uint8_t(int(Status::TimecodeQuarterFrame)), uint8_t((message & 0x7 << 4) | (value & 0xf))};
_sendMessage(cmd, 2);
}
template <class Sender>
void MIDIEncoder<Sender>::songPositionPointer(uint16_t pointer) {
uint8_t cmd[3] = {uint8_t(int(Status::SongPositionPointer)), uint8_t((pointer % 128) & 0x7f),
uint8_t((pointer / 128) & 0x7f)};
_sendMessage(cmd, 3);
}
template <class Sender>
void MIDIEncoder<Sender>::songSelect(uint8_t song) {
uint8_t cmd[2] = {uint8_t(int(Status::TimecodeQuarterFrame)), uint8_t(song & 0x7f)};
_sendMessage(cmd, 2);
}
template <class Sender>
void MIDIEncoder<Sender>::tuneRequest() {
uint8_t cmd = uint8_t(Status::TuneRequest);
_sendMessage(&cmd, 1);
}
template <class Sender>
void MIDIEncoder<Sender>::startSeq() {
uint8_t cmd = uint8_t(Status::Start);
_sendMessage(&cmd, 1);
}
template <class Sender>
void MIDIEncoder<Sender>::continueSeq() {
uint8_t cmd = uint8_t(Status::Continue);
_sendMessage(&cmd, 1);
}
template <class Sender>
void MIDIEncoder<Sender>::stopSeq() {
uint8_t cmd = uint8_t(Status::Stop);
_sendMessage(&cmd, 1);
}
template <class Sender>
void MIDIEncoder<Sender>::reset() {
uint8_t cmd = uint8_t(Status::Reset);
_sendMessage(&cmd, 1);
}
template class MIDIEncoder<IMIDIOut>;
template class MIDIEncoder<IMIDIInOut>;
}
} // namespace boo

View File

@@ -6,420 +6,357 @@
#include <pulse/pulseaudio.h>
#include <unistd.h>
namespace boo
{
namespace boo {
static logvisor::Module Log("boo::PulseAudio");
logvisor::Module ALSALog("boo::ALSA");
static const uint64_t StereoChans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) |
(1 << PA_CHANNEL_POSITION_FRONT_RIGHT);
static const uint64_t StereoChans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) | (1 << PA_CHANNEL_POSITION_FRONT_RIGHT);
static const uint64_t QuadChans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) |
(1 << PA_CHANNEL_POSITION_FRONT_RIGHT) |
(1 << PA_CHANNEL_POSITION_REAR_LEFT) |
(1 << PA_CHANNEL_POSITION_REAR_RIGHT);
static const uint64_t QuadChans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) | (1 << PA_CHANNEL_POSITION_FRONT_RIGHT) |
(1 << PA_CHANNEL_POSITION_REAR_LEFT) | (1 << PA_CHANNEL_POSITION_REAR_RIGHT);
static const uint64_t S51Chans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) |
(1 << PA_CHANNEL_POSITION_FRONT_RIGHT) |
(1 << PA_CHANNEL_POSITION_REAR_LEFT) |
(1 << PA_CHANNEL_POSITION_REAR_RIGHT) |
(1 << PA_CHANNEL_POSITION_FRONT_CENTER) |
(1 << PA_CHANNEL_POSITION_LFE);
static const uint64_t S51Chans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) | (1 << PA_CHANNEL_POSITION_FRONT_RIGHT) |
(1 << PA_CHANNEL_POSITION_REAR_LEFT) | (1 << PA_CHANNEL_POSITION_REAR_RIGHT) |
(1 << PA_CHANNEL_POSITION_FRONT_CENTER) | (1 << PA_CHANNEL_POSITION_LFE);
static const uint64_t S71Chans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) |
(1 << PA_CHANNEL_POSITION_FRONT_RIGHT) |
(1 << PA_CHANNEL_POSITION_REAR_LEFT) |
(1 << PA_CHANNEL_POSITION_REAR_RIGHT) |
(1 << PA_CHANNEL_POSITION_FRONT_CENTER) |
(1 << PA_CHANNEL_POSITION_LFE) |
(1 << PA_CHANNEL_POSITION_SIDE_LEFT) |
(1 << PA_CHANNEL_POSITION_SIDE_RIGHT);
static const uint64_t S71Chans = (1 << PA_CHANNEL_POSITION_FRONT_LEFT) | (1 << PA_CHANNEL_POSITION_FRONT_RIGHT) |
(1 << PA_CHANNEL_POSITION_REAR_LEFT) | (1 << PA_CHANNEL_POSITION_REAR_RIGHT) |
(1 << PA_CHANNEL_POSITION_FRONT_CENTER) | (1 << PA_CHANNEL_POSITION_LFE) |
(1 << PA_CHANNEL_POSITION_SIDE_LEFT) | (1 << PA_CHANNEL_POSITION_SIDE_RIGHT);
struct PulseAudioVoiceEngine : LinuxMidi
{
pa_mainloop* m_mainloop = nullptr;
pa_context* m_ctx = nullptr;
pa_stream* m_stream = nullptr;
std::string m_sinkName;
bool m_handleMove = false;
pa_sample_spec m_sampleSpec = {};
pa_channel_map m_chanMap = {};
struct PulseAudioVoiceEngine : LinuxMidi {
pa_mainloop* m_mainloop = nullptr;
pa_context* m_ctx = nullptr;
pa_stream* m_stream = nullptr;
std::string m_sinkName;
bool m_handleMove = false;
pa_sample_spec m_sampleSpec = {};
pa_channel_map m_chanMap = {};
int _paWaitReady()
{
int retval = 0;
while (pa_context_get_state(m_ctx) < PA_CONTEXT_READY)
pa_mainloop_iterate(m_mainloop, 1, &retval);
return retval;
int _paWaitReady() {
int retval = 0;
while (pa_context_get_state(m_ctx) < PA_CONTEXT_READY)
pa_mainloop_iterate(m_mainloop, 1, &retval);
return retval;
}
int _paStreamWaitReady() {
int retval = 0;
while (pa_stream_get_state(m_stream) < PA_STREAM_READY)
pa_mainloop_iterate(m_mainloop, 1, &retval);
return retval;
}
int _paIterate(pa_operation* op) const {
int retval = 0;
while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(m_mainloop, 1, &retval);
return retval;
}
bool _setupSink() {
if (m_stream) {
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
m_stream = nullptr;
}
int _paStreamWaitReady()
{
int retval = 0;
while (pa_stream_get_state(m_stream) < PA_STREAM_READY)
pa_mainloop_iterate(m_mainloop, 1, &retval);
return retval;
pa_operation* op;
m_sampleSpec.format = PA_SAMPLE_INVALID;
op = pa_context_get_sink_info_by_name(m_ctx, m_sinkName.c_str(), pa_sink_info_cb_t(_getSinkInfoReply), this);
_paIterate(op);
pa_operation_unref(op);
if (m_sampleSpec.format == PA_SAMPLE_INVALID) {
Log.report(logvisor::Error, "Unable to setup audio stream");
goto err;
}
int _paIterate(pa_operation* op) const
{
int retval = 0;
while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(m_mainloop, 1, &retval);
return retval;
m_5msFrames = m_sampleSpec.rate * 5 / 1000;
m_mixInfo.m_sampleRate = m_sampleSpec.rate;
m_mixInfo.m_sampleFormat = SOXR_FLOAT32;
m_mixInfo.m_bitsPerSample = 32;
m_mixInfo.m_periodFrames = m_5msFrames;
if (!(m_stream = pa_stream_new(m_ctx, "master", &m_sampleSpec, &m_chanMap))) {
Log.report(logvisor::Error, "Unable to pa_stream_new(): %s", pa_strerror(pa_context_errno(m_ctx)));
goto err;
}
bool _setupSink()
{
if (m_stream)
{
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
m_stream = nullptr;
}
pa_buffer_attr bufAttr;
bufAttr.minreq = uint32_t(m_5msFrames * m_sampleSpec.channels * sizeof(float));
bufAttr.maxlength = bufAttr.minreq * 24;
bufAttr.tlength = bufAttr.maxlength;
bufAttr.prebuf = UINT32_MAX;
bufAttr.fragsize = UINT32_MAX;
pa_operation* op;
m_sampleSpec.format = PA_SAMPLE_INVALID;
op = pa_context_get_sink_info_by_name(m_ctx, m_sinkName.c_str(), pa_sink_info_cb_t(_getSinkInfoReply), this);
_paIterate(op);
pa_operation_unref(op);
if (m_sampleSpec.format == PA_SAMPLE_INVALID)
{
Log.report(logvisor::Error, "Unable to setup audio stream");
goto err;
}
m_5msFrames = m_sampleSpec.rate * 5 / 1000;
m_mixInfo.m_sampleRate = m_sampleSpec.rate;
m_mixInfo.m_sampleFormat = SOXR_FLOAT32;
m_mixInfo.m_bitsPerSample = 32;
m_mixInfo.m_periodFrames = m_5msFrames;
if (!(m_stream = pa_stream_new(m_ctx, "master", &m_sampleSpec, &m_chanMap)))
{
Log.report(logvisor::Error, "Unable to pa_stream_new(): %s", pa_strerror(pa_context_errno(m_ctx)));
goto err;
}
pa_buffer_attr bufAttr;
bufAttr.minreq = uint32_t(m_5msFrames * m_sampleSpec.channels * sizeof(float));
bufAttr.maxlength = bufAttr.minreq * 24;
bufAttr.tlength = bufAttr.maxlength;
bufAttr.prebuf = UINT32_MAX;
bufAttr.fragsize = UINT32_MAX;
if (pa_stream_connect_playback(m_stream, m_sinkName.c_str(), &bufAttr,
pa_stream_flags_t(PA_STREAM_START_UNMUTED | PA_STREAM_EARLY_REQUESTS),
nullptr, nullptr))
{
Log.report(logvisor::Error, "Unable to pa_stream_connect_playback()");
goto err;
}
pa_stream_set_moved_callback(m_stream, pa_stream_notify_cb_t(_streamMoved), this);
_paStreamWaitReady();
_resetSampleRate();
return true;
err:
if (m_stream)
{
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
m_stream = nullptr;
}
return false;
if (pa_stream_connect_playback(m_stream, m_sinkName.c_str(), &bufAttr,
pa_stream_flags_t(PA_STREAM_START_UNMUTED | PA_STREAM_EARLY_REQUESTS), nullptr,
nullptr)) {
Log.report(logvisor::Error, "Unable to pa_stream_connect_playback()");
goto err;
}
PulseAudioVoiceEngine()
{
if (!(m_mainloop = pa_mainloop_new()))
{
Log.report(logvisor::Error, "Unable to pa_mainloop_new()");
return;
}
pa_stream_set_moved_callback(m_stream, pa_stream_notify_cb_t(_streamMoved), this);
pa_mainloop_api* mlApi = pa_mainloop_get_api(m_mainloop);
pa_proplist* propList = pa_proplist_new();
pa_proplist_sets(propList, PA_PROP_APPLICATION_ICON_NAME, APP->getUniqueName().data());
char pidStr[16];
snprintf(pidStr, 16, "%d", int(getpid()));
pa_proplist_sets(propList, PA_PROP_APPLICATION_PROCESS_ID, pidStr);
if (!(m_ctx = pa_context_new_with_proplist(mlApi, APP->getFriendlyName().data(), propList)))
{
Log.report(logvisor::Error, "Unable to pa_context_new_with_proplist()");
pa_mainloop_free(m_mainloop);
m_mainloop = nullptr;
return;
}
_paStreamWaitReady();
pa_operation* op;
_resetSampleRate();
return true;
err:
if (m_stream) {
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
m_stream = nullptr;
}
return false;
}
if (pa_context_connect(m_ctx, nullptr, PA_CONTEXT_NOFLAGS, nullptr))
{
Log.report(logvisor::Error, "Unable to pa_context_connect()");
goto err;
}
_paWaitReady();
op = pa_context_get_server_info(m_ctx, pa_server_info_cb_t(_getServerInfoReply), this);
_paIterate(op);
pa_operation_unref(op);
if (!_setupSink())
goto err;
return;
err:
pa_context_disconnect(m_ctx);
pa_context_unref(m_ctx);
m_ctx = nullptr;
pa_mainloop_free(m_mainloop);
m_mainloop = nullptr;
PulseAudioVoiceEngine() {
if (!(m_mainloop = pa_mainloop_new())) {
Log.report(logvisor::Error, "Unable to pa_mainloop_new()");
return;
}
~PulseAudioVoiceEngine()
{
if (m_stream)
{
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
pa_mainloop_api* mlApi = pa_mainloop_get_api(m_mainloop);
pa_proplist* propList = pa_proplist_new();
pa_proplist_sets(propList, PA_PROP_APPLICATION_ICON_NAME, APP->getUniqueName().data());
char pidStr[16];
snprintf(pidStr, 16, "%d", int(getpid()));
pa_proplist_sets(propList, PA_PROP_APPLICATION_PROCESS_ID, pidStr);
if (!(m_ctx = pa_context_new_with_proplist(mlApi, APP->getFriendlyName().data(), propList))) {
Log.report(logvisor::Error, "Unable to pa_context_new_with_proplist()");
pa_mainloop_free(m_mainloop);
m_mainloop = nullptr;
return;
}
pa_operation* op;
if (pa_context_connect(m_ctx, nullptr, PA_CONTEXT_NOFLAGS, nullptr)) {
Log.report(logvisor::Error, "Unable to pa_context_connect()");
goto err;
}
_paWaitReady();
op = pa_context_get_server_info(m_ctx, pa_server_info_cb_t(_getServerInfoReply), this);
_paIterate(op);
pa_operation_unref(op);
if (!_setupSink())
goto err;
return;
err:
pa_context_disconnect(m_ctx);
pa_context_unref(m_ctx);
m_ctx = nullptr;
pa_mainloop_free(m_mainloop);
m_mainloop = nullptr;
}
~PulseAudioVoiceEngine() {
if (m_stream) {
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
}
if (m_ctx) {
pa_context_disconnect(m_ctx);
pa_context_unref(m_ctx);
}
if (m_mainloop) {
pa_mainloop_free(m_mainloop);
}
}
static void _streamMoved(pa_stream* p, PulseAudioVoiceEngine* userdata) {
userdata->m_sinkName = pa_stream_get_device_name(p);
userdata->m_handleMove = true;
}
static void _getServerInfoReply(pa_context* c, const pa_server_info* i, PulseAudioVoiceEngine* userdata) {
userdata->m_sinkName = i->default_sink_name;
}
void _parseAudioChannelSet(const pa_channel_map* chm) {
m_chanMap = *chm;
ChannelMap& chmapOut = m_mixInfo.m_channelMap;
m_mixInfo.m_channels = AudioChannelSet::Unknown;
uint64_t chBits = 0;
chmapOut.m_channelCount = chm->channels;
for (unsigned c = 0; c < chm->channels; ++c) {
chBits |= 1 << chm->map[c];
switch (chm->map[c]) {
case PA_CHANNEL_POSITION_FRONT_LEFT:
chmapOut.m_channels[c] = AudioChannel::FrontLeft;
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT:
chmapOut.m_channels[c] = AudioChannel::FrontRight;
break;
case PA_CHANNEL_POSITION_REAR_LEFT:
chmapOut.m_channels[c] = AudioChannel::RearLeft;
break;
case PA_CHANNEL_POSITION_REAR_RIGHT:
chmapOut.m_channels[c] = AudioChannel::RearRight;
break;
case PA_CHANNEL_POSITION_FRONT_CENTER:
chmapOut.m_channels[c] = AudioChannel::FrontCenter;
break;
case PA_CHANNEL_POSITION_LFE:
chmapOut.m_channels[c] = AudioChannel::LFE;
break;
case PA_CHANNEL_POSITION_SIDE_LEFT:
chmapOut.m_channels[c] = AudioChannel::SideLeft;
break;
case PA_CHANNEL_POSITION_SIDE_RIGHT:
chmapOut.m_channels[c] = AudioChannel::SideRight;
break;
default:
chmapOut.m_channels[c] = AudioChannel::Unknown;
break;
}
}
static const std::array<AudioChannelSet, 4> testSets = {
{AudioChannelSet::Surround71, AudioChannelSet::Surround51, AudioChannelSet::Quad, AudioChannelSet::Stereo}};
for (AudioChannelSet set : testSets) {
switch (set) {
case AudioChannelSet::Stereo: {
if ((chBits & StereoChans) == StereoChans) {
m_mixInfo.m_channels = AudioChannelSet::Stereo;
return;
}
if (m_ctx)
{
pa_context_disconnect(m_ctx);
pa_context_unref(m_ctx);
break;
}
case AudioChannelSet::Quad: {
if ((chBits & QuadChans) == QuadChans) {
m_mixInfo.m_channels = AudioChannelSet::Quad;
return;
}
if (m_mainloop)
{
pa_mainloop_free(m_mainloop);
break;
}
case AudioChannelSet::Surround51: {
if ((chBits & S51Chans) == S51Chans) {
m_mixInfo.m_channels = AudioChannelSet::Surround51;
return;
}
}
static void _streamMoved(pa_stream* p, PulseAudioVoiceEngine* userdata)
{
userdata->m_sinkName = pa_stream_get_device_name(p);
userdata->m_handleMove = true;
}
static void _getServerInfoReply(pa_context* c, const pa_server_info* i, PulseAudioVoiceEngine* userdata)
{
userdata->m_sinkName = i->default_sink_name;
}
void _parseAudioChannelSet(const pa_channel_map* chm)
{
m_chanMap = *chm;
ChannelMap& chmapOut = m_mixInfo.m_channelMap;
m_mixInfo.m_channels = AudioChannelSet::Unknown;
uint64_t chBits = 0;
chmapOut.m_channelCount = chm->channels;
for (unsigned c=0 ; c<chm->channels ; ++c)
{
chBits |= 1 << chm->map[c];
switch (chm->map[c])
{
case PA_CHANNEL_POSITION_FRONT_LEFT:
chmapOut.m_channels[c] = AudioChannel::FrontLeft;
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT:
chmapOut.m_channels[c] = AudioChannel::FrontRight;
break;
case PA_CHANNEL_POSITION_REAR_LEFT:
chmapOut.m_channels[c] = AudioChannel::RearLeft;
break;
case PA_CHANNEL_POSITION_REAR_RIGHT:
chmapOut.m_channels[c] = AudioChannel::RearRight;
break;
case PA_CHANNEL_POSITION_FRONT_CENTER:
chmapOut.m_channels[c] = AudioChannel::FrontCenter;
break;
case PA_CHANNEL_POSITION_LFE:
chmapOut.m_channels[c] = AudioChannel::LFE;
break;
case PA_CHANNEL_POSITION_SIDE_LEFT:
chmapOut.m_channels[c] = AudioChannel::SideLeft;
break;
case PA_CHANNEL_POSITION_SIDE_RIGHT:
chmapOut.m_channels[c] = AudioChannel::SideRight;
break;
default:
chmapOut.m_channels[c] = AudioChannel::Unknown;
break;
}
break;
}
case AudioChannelSet::Surround71: {
if ((chBits & S71Chans) == S71Chans) {
m_mixInfo.m_channels = AudioChannelSet::Surround71;
return;
}
break;
}
default:
break;
}
}
}
static const std::array<AudioChannelSet, 4> testSets =
{{AudioChannelSet::Surround71, AudioChannelSet::Surround51,
AudioChannelSet::Quad, AudioChannelSet::Stereo}};
for (AudioChannelSet set : testSets)
{
switch (set)
{
case AudioChannelSet::Stereo:
{
if ((chBits & StereoChans) == StereoChans)
{
m_mixInfo.m_channels = AudioChannelSet::Stereo;
return;
}
break;
}
case AudioChannelSet::Quad:
{
if ((chBits & QuadChans) == QuadChans)
{
m_mixInfo.m_channels = AudioChannelSet::Quad;
return;
}
break;
}
case AudioChannelSet::Surround51:
{
if ((chBits & S51Chans) == S51Chans)
{
m_mixInfo.m_channels = AudioChannelSet::Surround51;
return;
}
break;
}
case AudioChannelSet::Surround71:
{
if ((chBits & S71Chans) == S71Chans)
{
m_mixInfo.m_channels = AudioChannelSet::Surround71;
return;
}
break;
}
default: break;
}
}
static void _getSinkInfoReply(pa_context* c, const pa_sink_info* i, int eol, PulseAudioVoiceEngine* userdata) {
if (!i)
return;
userdata->m_sampleSpec.format = PA_SAMPLE_FLOAT32;
userdata->m_sampleSpec.rate = i->sample_spec.rate;
userdata->m_sampleSpec.channels = i->sample_spec.channels;
userdata->_parseAudioChannelSet(&i->channel_map);
}
mutable std::vector<std::pair<std::string, std::string>> m_sinks;
static void _getSinkInfoListReply(pa_context* c, const pa_sink_info* i, int eol, PulseAudioVoiceEngine* userdata) {
if (i)
userdata->m_sinks.push_back(std::make_pair(i->name, i->description));
}
std::vector<std::pair<std::string, std::string>> enumerateAudioOutputs() const {
pa_operation* op = pa_context_get_sink_info_list(m_ctx, pa_sink_info_cb_t(_getSinkInfoListReply), (void*)this);
_paIterate(op);
pa_operation_unref(op);
std::vector<std::pair<std::string, std::string>> ret;
ret.swap(m_sinks);
return ret;
}
std::string getCurrentAudioOutput() const { return m_sinkName; }
bool m_sinkOk = false;
static void _checkAudioSinkReply(pa_context* c, const pa_sink_info* i, int eol, PulseAudioVoiceEngine* userdata) {
if (i)
userdata->m_sinkOk = true;
}
bool setCurrentAudioOutput(const char* name) {
m_sinkOk = false;
pa_operation* op;
op = pa_context_get_sink_info_by_name(m_ctx, name, pa_sink_info_cb_t(_checkAudioSinkReply), this);
_paIterate(op);
pa_operation_unref(op);
if (m_sinkOk) {
m_sinkName = name;
return _setupSink();
}
return false;
}
void _doIterate() {
int retval;
pa_mainloop_iterate(m_mainloop, 1, &retval);
if (m_handleMove) {
m_handleMove = false;
_setupSink();
}
}
void pumpAndMixVoices() {
if (!m_stream) {
/* Dummy pump mode - use failsafe defaults for 1/60sec of samples */
m_mixInfo.m_sampleRate = 32000.0;
m_mixInfo.m_sampleFormat = SOXR_FLOAT32_I;
m_mixInfo.m_bitsPerSample = 32;
m_5msFrames = 32000 / 60;
m_mixInfo.m_periodFrames = m_5msFrames;
m_mixInfo.m_channels = AudioChannelSet::Stereo;
m_mixInfo.m_channelMap.m_channelCount = 2;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
_pumpAndMixVoices(m_5msFrames, (float*)nullptr);
return;
}
static void _getSinkInfoReply(pa_context* c, const pa_sink_info* i, int eol, PulseAudioVoiceEngine* userdata)
{
if (!i)
return;
userdata->m_sampleSpec.format = PA_SAMPLE_FLOAT32;
userdata->m_sampleSpec.rate = i->sample_spec.rate;
userdata->m_sampleSpec.channels = i->sample_spec.channels;
userdata->_parseAudioChannelSet(&i->channel_map);
size_t writableSz = pa_stream_writable_size(m_stream);
size_t frameSz = m_mixInfo.m_channelMap.m_channelCount * sizeof(float);
size_t writableFrames = writableSz / frameSz;
size_t writablePeriods = writableFrames / m_mixInfo.m_periodFrames;
if (!writablePeriods) {
_doIterate();
return;
}
mutable std::vector<std::pair<std::string, std::string>> m_sinks;
static void _getSinkInfoListReply(pa_context* c, const pa_sink_info* i, int eol, PulseAudioVoiceEngine* userdata)
{
if (i)
userdata->m_sinks.push_back(std::make_pair(i->name, i->description));
}
std::vector<std::pair<std::string, std::string>> enumerateAudioOutputs() const
{
pa_operation* op = pa_context_get_sink_info_list(m_ctx, pa_sink_info_cb_t(_getSinkInfoListReply), (void*)this);
_paIterate(op);
pa_operation_unref(op);
std::vector<std::pair<std::string, std::string>> ret;
ret.swap(m_sinks);
return ret;
void* data;
size_t periodSz = m_mixInfo.m_periodFrames * frameSz;
size_t nbytes = writablePeriods * periodSz;
if (pa_stream_begin_write(m_stream, &data, &nbytes)) {
pa_stream_state_t st = pa_stream_get_state(m_stream);
Log.report(logvisor::Error, "Unable to pa_stream_begin_write(): %s %d", pa_strerror(pa_context_errno(m_ctx)), st);
_doIterate();
return;
}
std::string getCurrentAudioOutput() const
{
return m_sinkName;
}
writablePeriods = nbytes / periodSz;
size_t periodSamples = m_mixInfo.m_periodFrames * m_mixInfo.m_channelMap.m_channelCount;
_pumpAndMixVoices(m_mixInfo.m_periodFrames * writablePeriods, reinterpret_cast<float*>(data));
bool m_sinkOk = false;
static void _checkAudioSinkReply(pa_context* c, const pa_sink_info* i, int eol, PulseAudioVoiceEngine* userdata)
{
if (i)
userdata->m_sinkOk = true;
}
bool setCurrentAudioOutput(const char* name)
{
m_sinkOk = false;
pa_operation* op;
op = pa_context_get_sink_info_by_name(m_ctx, name, pa_sink_info_cb_t(_checkAudioSinkReply), this);
_paIterate(op);
pa_operation_unref(op);
if (m_sinkOk)
{
m_sinkName = name;
return _setupSink();
}
return false;
}
if (pa_stream_write(m_stream, data, nbytes, nullptr, 0, PA_SEEK_RELATIVE))
Log.report(logvisor::Error, "Unable to pa_stream_write()");
void _doIterate()
{
int retval;
pa_mainloop_iterate(m_mainloop, 1, &retval);
if (m_handleMove)
{
m_handleMove = false;
_setupSink();
}
}
void pumpAndMixVoices()
{
if (!m_stream)
{
/* Dummy pump mode - use failsafe defaults for 1/60sec of samples */
m_mixInfo.m_sampleRate = 32000.0;
m_mixInfo.m_sampleFormat = SOXR_FLOAT32_I;
m_mixInfo.m_bitsPerSample = 32;
m_5msFrames = 32000 / 60;
m_mixInfo.m_periodFrames = m_5msFrames;
m_mixInfo.m_channels = AudioChannelSet::Stereo;
m_mixInfo.m_channelMap.m_channelCount = 2;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
_pumpAndMixVoices(m_5msFrames, (float*)nullptr);
return;
}
size_t writableSz = pa_stream_writable_size(m_stream);
size_t frameSz = m_mixInfo.m_channelMap.m_channelCount * sizeof(float);
size_t writableFrames = writableSz / frameSz;
size_t writablePeriods = writableFrames / m_mixInfo.m_periodFrames;
if (!writablePeriods)
{
_doIterate();
return;
}
void* data;
size_t periodSz = m_mixInfo.m_periodFrames * frameSz;
size_t nbytes = writablePeriods * periodSz;
if (pa_stream_begin_write(m_stream, &data, &nbytes))
{
pa_stream_state_t st = pa_stream_get_state(m_stream);
Log.report(logvisor::Error, "Unable to pa_stream_begin_write(): %s %d",
pa_strerror(pa_context_errno(m_ctx)), st);
_doIterate();
return;
}
writablePeriods = nbytes / periodSz;
size_t periodSamples = m_mixInfo.m_periodFrames * m_mixInfo.m_channelMap.m_channelCount;
_pumpAndMixVoices(m_mixInfo.m_periodFrames * writablePeriods, reinterpret_cast<float*>(data));
if (pa_stream_write(m_stream, data, nbytes, nullptr, 0, PA_SEEK_RELATIVE))
Log.report(logvisor::Error, "Unable to pa_stream_write()");
_doIterate();
}
_doIterate();
}
};
std::unique_ptr<IAudioVoiceEngine> NewAudioVoiceEngine()
{
return std::make_unique<PulseAudioVoiceEngine>();
}
std::unique_ptr<IAudioVoiceEngine> NewAudioVoiceEngine() { return std::make_unique<PulseAudioVoiceEngine>(); }
}
} // namespace boo

File diff suppressed because it is too large Load Diff

View File

@@ -2,305 +2,247 @@
#include "logvisor/logvisor.hpp"
#include "boo/audiodev/IAudioVoiceEngine.hpp"
namespace boo
{
namespace boo {
static logvisor::Module Log("boo::WAVOut");
struct WAVOutVoiceEngine : BaseAudioVoiceEngine
{
std::vector<float> m_interleavedBuf;
struct WAVOutVoiceEngine : BaseAudioVoiceEngine {
std::vector<float> m_interleavedBuf;
AudioChannelSet _getAvailableSet()
{
return AudioChannelSet::Stereo;
AudioChannelSet _getAvailableSet() { return AudioChannelSet::Stereo; }
std::string getCurrentAudioOutput() const { return "wavout"; }
bool setCurrentAudioOutput(const char* name) { return false; }
std::vector<std::pair<std::string, std::string>> enumerateAudioOutputs() const { return {{"wavout", "WAVOut"}}; }
std::vector<std::pair<std::string, std::string>> enumerateMIDIInputs() const { return {}; }
bool supportsVirtualMIDIIn() const { return false; }
ReceiveFunctor* m_midiReceiver = nullptr;
struct MIDIIn : public IMIDIIn {
MIDIIn(WAVOutVoiceEngine* parent, bool virt, ReceiveFunctor&& receiver)
: IMIDIIn(parent, virt, std::move(receiver)) {}
std::string description() const { return "WAVOut MIDI"; }
};
std::unique_ptr<IMIDIIn> newVirtualMIDIIn(ReceiveFunctor&& receiver) {
std::unique_ptr<IMIDIIn> ret = std::make_unique<MIDIIn>(nullptr, true, std::move(receiver));
m_midiReceiver = &ret->m_receiver;
return ret;
}
std::unique_ptr<IMIDIOut> newVirtualMIDIOut() { return {}; }
std::unique_ptr<IMIDIInOut> newVirtualMIDIInOut(ReceiveFunctor&& receiver) { return {}; }
std::unique_ptr<IMIDIIn> newRealMIDIIn(const char* name, ReceiveFunctor&& receiver) { return {}; }
std::unique_ptr<IMIDIOut> newRealMIDIOut(const char* name) { return {}; }
std::unique_ptr<IMIDIInOut> newRealMIDIInOut(const char* name, ReceiveFunctor&& receiver) { return {}; }
bool useMIDILock() const { return false; }
FILE* m_fp = nullptr;
size_t m_bytesWritten = 0;
void prepareWAV(double sampleRate, int numChans) {
uint32_t speakerMask = 0;
switch (numChans) {
default:
case 2:
numChans = 2;
m_mixInfo.m_channels = AudioChannelSet::Stereo;
m_mixInfo.m_channelMap.m_channelCount = 2;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
speakerMask = 0x00000001 | 0x00000002;
break;
case 4:
numChans = 4;
m_mixInfo.m_channels = AudioChannelSet::Quad;
m_mixInfo.m_channelMap.m_channelCount = 4;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_mixInfo.m_channelMap.m_channels[2] = AudioChannel::RearLeft;
m_mixInfo.m_channelMap.m_channels[3] = AudioChannel::RearRight;
speakerMask = 0x00000001 | 0x00000002 | 0x00000010 | 0x00000020;
break;
case 6:
numChans = 6;
m_mixInfo.m_channels = AudioChannelSet::Surround51;
m_mixInfo.m_channelMap.m_channelCount = 6;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_mixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
m_mixInfo.m_channelMap.m_channels[3] = AudioChannel::LFE;
m_mixInfo.m_channelMap.m_channels[4] = AudioChannel::RearLeft;
m_mixInfo.m_channelMap.m_channels[5] = AudioChannel::RearRight;
speakerMask = 0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020;
break;
case 8:
numChans = 8;
m_mixInfo.m_channels = AudioChannelSet::Surround71;
m_mixInfo.m_channelMap.m_channelCount = 8;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_mixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
m_mixInfo.m_channelMap.m_channels[3] = AudioChannel::LFE;
m_mixInfo.m_channelMap.m_channels[4] = AudioChannel::RearLeft;
m_mixInfo.m_channelMap.m_channels[5] = AudioChannel::RearRight;
m_mixInfo.m_channelMap.m_channels[6] = AudioChannel::SideLeft;
m_mixInfo.m_channelMap.m_channels[7] = AudioChannel::SideRight;
speakerMask =
0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020 | 0x00000200 | 0x00000400;
break;
}
std::string getCurrentAudioOutput() const
{
return "wavout";
if (numChans == 2) {
fwrite("RIFF", 1, 4, m_fp);
uint32_t dataSize = 0;
uint32_t chunkSize = 36 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fwrite("WAVE", 1, 4, m_fp);
fwrite("fmt ", 1, 4, m_fp);
uint32_t sixteen = 16;
fwrite(&sixteen, 1, 4, m_fp);
uint16_t audioFmt = 3;
fwrite(&audioFmt, 1, 2, m_fp);
uint16_t chCount = numChans;
fwrite(&chCount, 1, 2, m_fp);
uint32_t sampRate = sampleRate;
fwrite(&sampRate, 1, 4, m_fp);
uint16_t blockAlign = 4 * numChans;
uint32_t byteRate = sampRate * blockAlign;
fwrite(&byteRate, 1, 4, m_fp);
fwrite(&blockAlign, 1, 2, m_fp);
uint16_t bps = 32;
fwrite(&bps, 1, 2, m_fp);
fwrite("data", 1, 4, m_fp);
fwrite(&dataSize, 1, 4, m_fp);
} else {
fwrite("RIFF", 1, 4, m_fp);
uint32_t dataSize = 0;
uint32_t chunkSize = 60 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fwrite("WAVE", 1, 4, m_fp);
fwrite("fmt ", 1, 4, m_fp);
uint32_t forty = 40;
fwrite(&forty, 1, 4, m_fp);
uint16_t audioFmt = 0xFFFE;
fwrite(&audioFmt, 1, 2, m_fp);
uint16_t chCount = numChans;
fwrite(&chCount, 1, 2, m_fp);
uint32_t sampRate = sampleRate;
fwrite(&sampRate, 1, 4, m_fp);
uint16_t blockAlign = 4 * numChans;
uint32_t byteRate = sampRate * blockAlign;
fwrite(&byteRate, 1, 4, m_fp);
fwrite(&blockAlign, 1, 2, m_fp);
uint16_t bps = 32;
fwrite(&bps, 1, 2, m_fp);
uint16_t extSize = 22;
fwrite(&extSize, 1, 2, m_fp);
fwrite(&bps, 1, 2, m_fp);
fwrite(&speakerMask, 1, 4, m_fp);
fwrite("\x03\x00\x00\x00\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 1, 16, m_fp);
fwrite("data", 1, 4, m_fp);
fwrite(&dataSize, 1, 4, m_fp);
}
bool setCurrentAudioOutput(const char* name)
{
return false;
}
m_mixInfo.m_periodFrames = 512;
m_mixInfo.m_sampleRate = sampleRate;
m_mixInfo.m_sampleFormat = SOXR_FLOAT32_I;
m_mixInfo.m_bitsPerSample = 32;
_buildAudioRenderClient();
}
std::vector<std::pair<std::string, std::string>> enumerateAudioOutputs() const
{
return {{"wavout", "WAVOut"}};
}
std::vector<std::pair<std::string, std::string>> enumerateMIDIInputs() const
{
return {};
}
bool supportsVirtualMIDIIn() const
{
return false;
}
ReceiveFunctor* m_midiReceiver = nullptr;
struct MIDIIn : public IMIDIIn
{
MIDIIn(WAVOutVoiceEngine* parent, bool virt, ReceiveFunctor&& receiver)
: IMIDIIn(parent, virt, std::move(receiver)) {}
std::string description() const
{
return "WAVOut MIDI";
}
};
std::unique_ptr<IMIDIIn> newVirtualMIDIIn(ReceiveFunctor&& receiver)
{
std::unique_ptr<IMIDIIn> ret = std::make_unique<MIDIIn>(nullptr, true, std::move(receiver));
m_midiReceiver = &ret->m_receiver;
return ret;
}
std::unique_ptr<IMIDIOut> newVirtualMIDIOut()
{
return {};
}
std::unique_ptr<IMIDIInOut> newVirtualMIDIInOut(ReceiveFunctor&& receiver)
{
return {};
}
std::unique_ptr<IMIDIIn> newRealMIDIIn(const char* name, ReceiveFunctor&& receiver)
{
return {};
}
std::unique_ptr<IMIDIOut> newRealMIDIOut(const char* name)
{
return {};
}
std::unique_ptr<IMIDIInOut> newRealMIDIInOut(const char* name, ReceiveFunctor&& receiver)
{
return {};
}
bool useMIDILock() const {return false;}
FILE* m_fp = nullptr;
size_t m_bytesWritten = 0;
void prepareWAV(double sampleRate, int numChans)
{
uint32_t speakerMask = 0;
switch (numChans)
{
default:
case 2:
numChans = 2;
m_mixInfo.m_channels = AudioChannelSet::Stereo;
m_mixInfo.m_channelMap.m_channelCount = 2;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
speakerMask = 0x00000001 | 0x00000002;
break;
case 4:
numChans = 4;
m_mixInfo.m_channels = AudioChannelSet::Quad;
m_mixInfo.m_channelMap.m_channelCount = 4;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_mixInfo.m_channelMap.m_channels[2] = AudioChannel::RearLeft;
m_mixInfo.m_channelMap.m_channels[3] = AudioChannel::RearRight;
speakerMask = 0x00000001 | 0x00000002 | 0x00000010 | 0x00000020;
break;
case 6:
numChans = 6;
m_mixInfo.m_channels = AudioChannelSet::Surround51;
m_mixInfo.m_channelMap.m_channelCount = 6;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_mixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
m_mixInfo.m_channelMap.m_channels[3] = AudioChannel::LFE;
m_mixInfo.m_channelMap.m_channels[4] = AudioChannel::RearLeft;
m_mixInfo.m_channelMap.m_channels[5] = AudioChannel::RearRight;
speakerMask = 0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020;
break;
case 8:
numChans = 8;
m_mixInfo.m_channels = AudioChannelSet::Surround71;
m_mixInfo.m_channelMap.m_channelCount = 8;
m_mixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
m_mixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
m_mixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
m_mixInfo.m_channelMap.m_channels[3] = AudioChannel::LFE;
m_mixInfo.m_channelMap.m_channels[4] = AudioChannel::RearLeft;
m_mixInfo.m_channelMap.m_channels[5] = AudioChannel::RearRight;
m_mixInfo.m_channelMap.m_channels[6] = AudioChannel::SideLeft;
m_mixInfo.m_channelMap.m_channels[7] = AudioChannel::SideRight;
speakerMask = 0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020 | 0x00000200 | 0x00000400;
break;
}
if (numChans == 2)
{
fwrite("RIFF", 1, 4, m_fp);
uint32_t dataSize = 0;
uint32_t chunkSize = 36 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fwrite("WAVE", 1, 4, m_fp);
fwrite("fmt ", 1, 4, m_fp);
uint32_t sixteen = 16;
fwrite(&sixteen, 1, 4, m_fp);
uint16_t audioFmt = 3;
fwrite(&audioFmt, 1, 2, m_fp);
uint16_t chCount = numChans;
fwrite(&chCount, 1, 2, m_fp);
uint32_t sampRate = sampleRate;
fwrite(&sampRate, 1, 4, m_fp);
uint16_t blockAlign = 4 * numChans;
uint32_t byteRate = sampRate * blockAlign;
fwrite(&byteRate, 1, 4, m_fp);
fwrite(&blockAlign, 1, 2, m_fp);
uint16_t bps = 32;
fwrite(&bps, 1, 2, m_fp);
fwrite("data", 1, 4, m_fp);
fwrite(&dataSize, 1, 4, m_fp);
}
else
{
fwrite("RIFF", 1, 4, m_fp);
uint32_t dataSize = 0;
uint32_t chunkSize = 60 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fwrite("WAVE", 1, 4, m_fp);
fwrite("fmt ", 1, 4, m_fp);
uint32_t forty = 40;
fwrite(&forty, 1, 4, m_fp);
uint16_t audioFmt = 0xFFFE;
fwrite(&audioFmt, 1, 2, m_fp);
uint16_t chCount = numChans;
fwrite(&chCount, 1, 2, m_fp);
uint32_t sampRate = sampleRate;
fwrite(&sampRate, 1, 4, m_fp);
uint16_t blockAlign = 4 * numChans;
uint32_t byteRate = sampRate * blockAlign;
fwrite(&byteRate, 1, 4, m_fp);
fwrite(&blockAlign, 1, 2, m_fp);
uint16_t bps = 32;
fwrite(&bps, 1, 2, m_fp);
uint16_t extSize = 22;
fwrite(&extSize, 1, 2, m_fp);
fwrite(&bps, 1, 2, m_fp);
fwrite(&speakerMask, 1, 4, m_fp);
fwrite("\x03\x00\x00\x00\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71", 1, 16, m_fp);
fwrite("data", 1, 4, m_fp);
fwrite(&dataSize, 1, 4, m_fp);
}
m_mixInfo.m_periodFrames = 512;
m_mixInfo.m_sampleRate = sampleRate;
m_mixInfo.m_sampleFormat = SOXR_FLOAT32_I;
m_mixInfo.m_bitsPerSample = 32;
_buildAudioRenderClient();
}
WAVOutVoiceEngine(const char* path, double sampleRate, int numChans)
{
m_fp = fopen(path, "wb");
if (!m_fp)
return;
prepareWAV(sampleRate, numChans);
}
WAVOutVoiceEngine(const char* path, double sampleRate, int numChans) {
m_fp = fopen(path, "wb");
if (!m_fp)
return;
prepareWAV(sampleRate, numChans);
}
#if _WIN32
WAVOutVoiceEngine(const wchar_t* path, double sampleRate, int numChans)
{
m_fp = _wfopen(path, L"wb");
if (!m_fp)
return;
prepareWAV(sampleRate, numChans);
}
WAVOutVoiceEngine(const wchar_t* path, double sampleRate, int numChans) {
m_fp = _wfopen(path, L"wb");
if (!m_fp)
return;
prepareWAV(sampleRate, numChans);
}
#endif
void finishWav()
{
uint32_t dataSize = m_bytesWritten;
void finishWav() {
uint32_t dataSize = m_bytesWritten;
if (m_mixInfo.m_channelMap.m_channelCount == 2)
{
fseek(m_fp, 4, SEEK_SET);
uint32_t chunkSize = 36 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
if (m_mixInfo.m_channelMap.m_channelCount == 2) {
fseek(m_fp, 4, SEEK_SET);
uint32_t chunkSize = 36 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fseek(m_fp, 40, SEEK_SET);
fwrite(&dataSize, 1, 4, m_fp);
}
else
{
fseek(m_fp, 4, SEEK_SET);
uint32_t chunkSize = 60 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fseek(m_fp, 40, SEEK_SET);
fwrite(&dataSize, 1, 4, m_fp);
} else {
fseek(m_fp, 4, SEEK_SET);
uint32_t chunkSize = 60 + dataSize;
fwrite(&chunkSize, 1, 4, m_fp);
fseek(m_fp, 64, SEEK_SET);
fwrite(&dataSize, 1, 4, m_fp);
}
fclose(m_fp);
fseek(m_fp, 64, SEEK_SET);
fwrite(&dataSize, 1, 4, m_fp);
}
~WAVOutVoiceEngine()
{
finishWav();
}
fclose(m_fp);
}
void _buildAudioRenderClient()
{
m_5msFrames = m_mixInfo.m_sampleRate * 5 / 1000;
m_interleavedBuf.resize(m_mixInfo.m_channelMap.m_channelCount * m_5msFrames);
}
~WAVOutVoiceEngine() { finishWav(); }
void _rebuildAudioRenderClient(double sampleRate, size_t periodFrames)
{
m_mixInfo.m_periodFrames = periodFrames;
m_mixInfo.m_sampleRate = sampleRate;
_buildAudioRenderClient();
_resetSampleRate();
}
void _buildAudioRenderClient() {
m_5msFrames = m_mixInfo.m_sampleRate * 5 / 1000;
m_interleavedBuf.resize(m_mixInfo.m_channelMap.m_channelCount * m_5msFrames);
}
void pumpAndMixVoices()
{
size_t frameSz = 4 * m_mixInfo.m_channelMap.m_channelCount;
_pumpAndMixVoices(m_5msFrames, m_interleavedBuf.data());
fwrite(m_interleavedBuf.data(), 1, m_5msFrames * frameSz, m_fp);
m_bytesWritten += m_5msFrames * frameSz;
}
void _rebuildAudioRenderClient(double sampleRate, size_t periodFrames) {
m_mixInfo.m_periodFrames = periodFrames;
m_mixInfo.m_sampleRate = sampleRate;
_buildAudioRenderClient();
_resetSampleRate();
}
void pumpAndMixVoices() {
size_t frameSz = 4 * m_mixInfo.m_channelMap.m_channelCount;
_pumpAndMixVoices(m_5msFrames, m_interleavedBuf.data());
fwrite(m_interleavedBuf.data(), 1, m_5msFrames * frameSz, m_fp);
m_bytesWritten += m_5msFrames * frameSz;
}
};
std::unique_ptr<IAudioVoiceEngine> NewWAVAudioVoiceEngine(const char* path, double sampleRate, int numChans)
{
std::unique_ptr<IAudioVoiceEngine> ret = std::make_unique<WAVOutVoiceEngine>(path, sampleRate, numChans);
if (!static_cast<WAVOutVoiceEngine&>(*ret).m_fp)
return {};
return ret;
std::unique_ptr<IAudioVoiceEngine> NewWAVAudioVoiceEngine(const char* path, double sampleRate, int numChans) {
std::unique_ptr<IAudioVoiceEngine> ret = std::make_unique<WAVOutVoiceEngine>(path, sampleRate, numChans);
if (!static_cast<WAVOutVoiceEngine&>(*ret).m_fp)
return {};
return ret;
}
#if _WIN32
std::unique_ptr<IAudioVoiceEngine> NewWAVAudioVoiceEngine(const wchar_t* path, double sampleRate, int numChans)
{
std::unique_ptr<IAudioVoiceEngine> ret = std::make_unique<WAVOutVoiceEngine>(path, sampleRate, numChans);
if (!static_cast<WAVOutVoiceEngine&>(*ret).m_fp)
return {};
return ret;
std::unique_ptr<IAudioVoiceEngine> NewWAVAudioVoiceEngine(const wchar_t* path, double sampleRate, int numChans) {
std::unique_ptr<IAudioVoiceEngine> ret = std::make_unique<WAVOutVoiceEngine>(path, sampleRate, numChans);
if (!static_cast<WAVOutVoiceEngine&>(*ret).m_fp)
return {};
return ret;
}
#endif
}
} // namespace boo