diff --git a/CMakeLists.txt b/CMakeLists.txt index c6a2a15..703dc7d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,6 +14,14 @@ endif() add_subdirectory(xxhash) +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}") +find_package(IPP) +if (IPP_FOUND) + add_definitions(-DINTEL_IPP=1) + include_directories(${IPP_INCLUDE_DIRS}) + list(APPEND _BOO_SYS_LIBS ${IPP_LIBRARIES}) +endif () + set(WITH_LSR_BINDINGS OFF) set(BUILD_TESTS OFF) set(BUILD_SHARED_LIBS OFF) @@ -222,6 +230,7 @@ add_library(boo lib/inputdev/DeviceFinder.cpp include/boo/inputdev/DeviceFinder.hpp lib/inputdev/HIDParser.cpp include/boo/inputdev/HIDParser.hpp lib/inputdev/IHIDDevice.hpp + lib/audiodev/Common.hpp lib/audiodev/WAVOut.cpp lib/audiodev/AudioMatrix.hpp #lib/audiodev/AudioMatrix.cpp @@ -232,6 +241,8 @@ add_library(boo lib/audiodev/AudioVoice.cpp lib/audiodev/AudioSubmix.hpp lib/audiodev/AudioSubmix.cpp + lib/audiodev/LtRtProcessing.hpp + lib/audiodev/LtRtProcessing.cpp lib/audiodev/MIDIEncoder.cpp lib/audiodev/MIDIDecoder.cpp lib/audiodev/MIDICommon.hpp diff --git a/FindIPP.cmake b/FindIPP.cmake new file mode 100644 index 0000000..dd6a19c --- /dev/null +++ b/FindIPP.cmake @@ -0,0 +1,81 @@ +# - Find Intel IPP +# Find the IPP libraries +# Options: +# +# IPP_STATIC: true if using static linking +# IPP_MULTI_THREADED: true if using multi-threaded static linking +# +# This module defines the following variables: +# +# IPP_FOUND : True if IPP_INCLUDE_DIR are found +# IPP_INCLUDE_DIR : where to find ipp.h, etc. +# IPP_INCLUDE_DIRS: set when IPP_INCLUDE_DIR found +# IPP_LIBRARIES : the library to link against. + +set(IPP_STATIC ON) + +include(FindPackageHandleStandardArgs) + +set(IPP_ROOT /opt/intel/ipp CACHE PATH "Folder contains IPP") + +# Find header file dir +find_path(IPP_INCLUDE_DIR ipp.h + PATHS ${IPP_ROOT}/include) + +# Find libraries + +# Handle suffix +set(_IPP_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + +if(WIN32) + set(CMAKE_FIND_LIBRARY_SUFFIXES .lib) +else() + if(IPP_STATIC) + set(CMAKE_FIND_LIBRARY_SUFFIXES .a) + else() + set(CMAKE_FIND_LIBRARY_SUFFIXES .so) + endif() +endif() + +if(IPP_STATIC) + if(IPP_MULTI_THREADED) + set(IPP_LIBNAME_SUFFIX _t) + else() + set(IPP_LIBNAME_SUFFIX _l) + endif() +else() + set(IPP_LIBNAME_SUFFIX "") +endif() + +set(IPP_LIBNAME_SUFFIX "") + +macro(find_ipp_library IPP_COMPONENT) + string(TOLOWER ${IPP_COMPONENT} IPP_COMPONENT_LOWER) + + find_library(IPP_LIB_${IPP_COMPONENT} ipp${IPP_COMPONENT_LOWER}${IPP_LIBNAME_SUFFIX} + PATHS ${IPP_ROOT}/lib/ia32/ ${IPP_ROOT}/lib) +endmacro() + +# IPP components +# Core +find_ipp_library(CORE) +# Signal Processing +find_ipp_library(S) +# Vector Math +find_ipp_library(VM) + +set(IPP_LIBRARY + ${IPP_LIB_CORE} + ${IPP_LIB_S} + ${IPP_LIB_VM}) + +set(CMAKE_FIND_LIBRARY_SUFFIXES ${_IPP_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES}) + +find_package_handle_standard_args(IPP DEFAULT_MSG + IPP_INCLUDE_DIR IPP_LIBRARY) + +if (IPP_FOUND) + set(IPP_INCLUDE_DIRS ${IPP_INCLUDE_DIR}) + set(IPP_LIBRARIES ${IPP_LIBRARY}) +endif() + diff --git a/include/boo/audiodev/IAudioVoiceEngine.hpp b/include/boo/audiodev/IAudioVoiceEngine.hpp index a4ac8f7..3b7acf1 100644 --- a/include/boo/audiodev/IAudioVoiceEngine.hpp +++ b/include/boo/audiodev/IAudioVoiceEngine.hpp @@ -60,6 +60,9 @@ struct IAudioVoiceEngine /** Set total volume of engine */ virtual void setVolume(float vol)=0; + /** Enable or disable Lt/Rt surround encoding. If successful, getAvailableSet() will return Surround51 */ + virtual bool enableLtRt(bool enable)=0; + /** Get list of MIDI devices found on system */ virtual std::vector> enumerateMIDIDevices() const=0; diff --git a/lib/audiodev/AudioSubmix.cpp b/lib/audiodev/AudioSubmix.cpp index 6d90ab4..bf8143a 100644 --- a/lib/audiodev/AudioSubmix.cpp +++ b/lib/audiodev/AudioSubmix.cpp @@ -95,7 +95,7 @@ int16_t* AudioSubmix::_getMergeBuf16(size_t frames) if (m_redirect16) return m_redirect16; - size_t sampleCount = frames * m_root.m_mixInfo.m_channelMap.m_channelCount; + size_t sampleCount = frames * m_root.clientMixInfo().m_channelMap.m_channelCount; if (m_scratch16.size() < sampleCount) m_scratch16.resize(sampleCount); @@ -107,7 +107,7 @@ int32_t* AudioSubmix::_getMergeBuf32(size_t frames) if (m_redirect32) return m_redirect32; - size_t sampleCount = frames * m_root.m_mixInfo.m_channelMap.m_channelCount; + size_t sampleCount = frames * m_root.clientMixInfo().m_channelMap.m_channelCount; if (m_scratch32.size() < sampleCount) m_scratch32.resize(sampleCount); @@ -119,7 +119,7 @@ float* AudioSubmix::_getMergeBufFlt(size_t frames) if (m_redirectFlt) return m_redirectFlt; - size_t sampleCount = frames * m_root.m_mixInfo.m_channelMap.m_channelCount; + size_t sampleCount = frames * m_root.clientMixInfo().m_channelMap.m_channelCount; if (m_scratchFlt.size() < sampleCount) m_scratchFlt.resize(sampleCount); @@ -128,13 +128,13 @@ float* AudioSubmix::_getMergeBufFlt(size_t frames) size_t AudioSubmix::_pumpAndMix16(size_t frames) { - ChannelMap& chMap = m_root.m_mixInfo.m_channelMap; + const ChannelMap& chMap = m_root.clientMixInfo().m_channelMap; size_t chanCount = chMap.m_channelCount; if (m_redirect16) { if (m_cb && m_cb->canApplyEffect()) - m_cb->applyEffect(m_redirect16, frames, chMap, m_root.m_mixInfo.m_sampleRate); + m_cb->applyEffect(m_redirect16, frames, chMap, m_root.clientMixInfo().m_sampleRate); m_redirect16 += chanCount * frames; } else @@ -143,7 +143,7 @@ size_t AudioSubmix::_pumpAndMix16(size_t frames) if (m_scratch16.size() < sampleCount) m_scratch16.resize(sampleCount); if (m_cb && m_cb->canApplyEffect()) - m_cb->applyEffect(m_scratch16.data(), frames, chMap, m_root.m_mixInfo.m_sampleRate); + m_cb->applyEffect(m_scratch16.data(), frames, chMap, m_root.clientMixInfo().m_sampleRate); size_t curSlewFrame = m_slewFrames; for (auto& smx : m_sendGains) @@ -188,13 +188,13 @@ size_t AudioSubmix::_pumpAndMix16(size_t frames) size_t AudioSubmix::_pumpAndMix32(size_t frames) { - ChannelMap& chMap = m_root.m_mixInfo.m_channelMap; + const ChannelMap& chMap = m_root.clientMixInfo().m_channelMap; size_t chanCount = chMap.m_channelCount; if (m_redirect32) { if (m_cb && m_cb->canApplyEffect()) - m_cb->applyEffect(m_redirect32, frames, chMap, m_root.m_mixInfo.m_sampleRate); + m_cb->applyEffect(m_redirect32, frames, chMap, m_root.clientMixInfo().m_sampleRate); m_redirect32 += chanCount * frames; } else @@ -203,7 +203,7 @@ size_t AudioSubmix::_pumpAndMix32(size_t frames) if (m_scratch32.size() < sampleCount) m_scratch32.resize(sampleCount); if (m_cb && m_cb->canApplyEffect()) - m_cb->applyEffect(m_scratch32.data(), frames, chMap, m_root.m_mixInfo.m_sampleRate); + m_cb->applyEffect(m_scratch32.data(), frames, chMap, m_root.clientMixInfo().m_sampleRate); size_t curSlewFrame = m_slewFrames; for (auto& smx : m_sendGains) @@ -248,13 +248,13 @@ size_t AudioSubmix::_pumpAndMix32(size_t frames) size_t AudioSubmix::_pumpAndMixFlt(size_t frames) { - ChannelMap& chMap = m_root.m_mixInfo.m_channelMap; + const ChannelMap& chMap = m_root.clientMixInfo().m_channelMap; size_t chanCount = chMap.m_channelCount; if (m_redirectFlt) { if (m_cb && m_cb->canApplyEffect()) - m_cb->applyEffect(m_redirectFlt, frames, chMap, m_root.m_mixInfo.m_sampleRate); + m_cb->applyEffect(m_redirectFlt, frames, chMap, m_root.clientMixInfo().m_sampleRate); m_redirectFlt += chanCount * frames; } else @@ -263,7 +263,7 @@ size_t AudioSubmix::_pumpAndMixFlt(size_t frames) if (m_scratchFlt.size() < sampleCount) m_scratchFlt.resize(sampleCount); if (m_cb && m_cb->canApplyEffect()) - m_cb->applyEffect(m_scratchFlt.data(), frames, chMap, m_root.m_mixInfo.m_sampleRate); + m_cb->applyEffect(m_scratchFlt.data(), frames, chMap, m_root.clientMixInfo().m_sampleRate); size_t curSlewFrame = m_slewFrames; for (auto& smx : m_sendGains) diff --git a/lib/audiodev/AudioVoice.cpp b/lib/audiodev/AudioVoice.cpp index d0126f5..de7a5c4 100644 --- a/lib/audiodev/AudioVoice.cpp +++ b/lib/audiodev/AudioVoice.cpp @@ -146,14 +146,14 @@ size_t AudioVoiceMono::pumpAndMix16(size_t frames) { AudioSubmix& smx = *reinterpret_cast(mtx.first); m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratch16Pre.data(), scratch16Post.data()); - mtx.second.mixMonoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); + mtx.second.mixMonoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); } } else { AudioSubmix& smx = reinterpret_cast(m_root.m_mainSubmix); m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratch16Pre.data(), scratch16Post.data()); - DefaultMonoMtx.mixMonoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); + DefaultMonoMtx.mixMonoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); } } @@ -183,14 +183,14 @@ size_t AudioVoiceMono::pumpAndMix32(size_t frames) { AudioSubmix& smx = *reinterpret_cast(mtx.first); m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratch32Pre.data(), scratch32Post.data()); - mtx.second.mixMonoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); + mtx.second.mixMonoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); } } else { AudioSubmix& smx = reinterpret_cast(m_root.m_mainSubmix); m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratch32Pre.data(), scratch32Post.data()); - DefaultMonoMtx.mixMonoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); + DefaultMonoMtx.mixMonoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); } } @@ -220,14 +220,14 @@ size_t AudioVoiceMono::pumpAndMixFlt(size_t frames) { AudioSubmix& smx = *reinterpret_cast(mtx.first); m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratchFltPre.data(), scratchFltPost.data()); - mtx.second.mixMonoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); + mtx.second.mixMonoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); } } else { AudioSubmix& smx = reinterpret_cast(m_root.m_mainSubmix); m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratchFltPre.data(), scratchFltPost.data()); - DefaultMonoMtx.mixMonoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); + DefaultMonoMtx.mixMonoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); } } @@ -349,14 +349,14 @@ size_t AudioVoiceStereo::pumpAndMix16(size_t frames) { AudioSubmix& smx = *reinterpret_cast(mtx.first); m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratch16Pre.data(), scratch16Post.data()); - mtx.second.mixStereoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); + mtx.second.mixStereoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); } } else { AudioSubmix& smx = reinterpret_cast(m_root.m_mainSubmix); m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratch16Pre.data(), scratch16Post.data()); - DefaultStereoMtx.mixStereoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); + DefaultStereoMtx.mixStereoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone); } } @@ -388,14 +388,14 @@ size_t AudioVoiceStereo::pumpAndMix32(size_t frames) { AudioSubmix& smx = *reinterpret_cast(mtx.first); m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratch32Pre.data(), scratch32Post.data()); - mtx.second.mixStereoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); + mtx.second.mixStereoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); } } else { AudioSubmix& smx = reinterpret_cast(m_root.m_mainSubmix); m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratch32Pre.data(), scratch32Post.data()); - DefaultStereoMtx.mixStereoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); + DefaultStereoMtx.mixStereoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone); } } @@ -427,14 +427,14 @@ size_t AudioVoiceStereo::pumpAndMixFlt(size_t frames) { AudioSubmix& smx = *reinterpret_cast(mtx.first); m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratchFltPre.data(), scratchFltPost.data()); - mtx.second.mixStereoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); + mtx.second.mixStereoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); } } else { AudioSubmix& smx = reinterpret_cast(m_root.m_mainSubmix); m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratchFltPre.data(), scratchFltPost.data()); - DefaultStereoMtx.mixStereoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); + DefaultStereoMtx.mixStereoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone); } } diff --git a/lib/audiodev/AudioVoiceEngine.cpp b/lib/audiodev/AudioVoiceEngine.cpp index 15948cc..f47e5e2 100644 --- a/lib/audiodev/AudioVoiceEngine.cpp +++ b/lib/audiodev/AudioVoiceEngine.cpp @@ -1,5 +1,5 @@ #include "AudioVoiceEngine.hpp" -#include +#include "LtRtProcessing.hpp" namespace boo { @@ -15,7 +15,17 @@ BaseAudioVoiceEngine::~BaseAudioVoiceEngine() void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int16_t* dataOut) { memset(dataOut, 0, sizeof(int16_t) * frames * m_mixInfo.m_channelMap.m_channelCount); - m_mainSubmix.m_redirect16 = dataOut; + if (m_ltRtProcessing) + { + size_t sampleCount = m_5msFrames * 5; + if (m_ltRtIn16.size() < sampleCount) + m_ltRtIn16.resize(sampleCount); + m_mainSubmix.m_redirect16 = m_ltRtIn16.data(); + } + else + { + m_mainSubmix.m_redirect16 = dataOut; + } if (m_submixesDirty) { @@ -50,6 +60,12 @@ void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int16_t* dataOut) for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it) (*it)->_pumpAndMix16(thisFrames); + if (m_ltRtProcessing) + { + m_ltRtProcessing->Process(m_ltRtIn16.data(), dataOut, int(thisFrames)); + m_mainSubmix.m_redirect16 = m_ltRtIn16.data(); + } + size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount; for (size_t i=0 ; i_pumpAndMix32(thisFrames); + if (m_ltRtProcessing) + { + m_ltRtProcessing->Process(m_ltRtIn32.data(), dataOut, int(thisFrames)); + m_mainSubmix.m_redirect32 = m_ltRtIn32.data(); + } + size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount; for (size_t i=0 ; i_pumpAndMixFlt(thisFrames); + if (m_ltRtProcessing) + { + m_ltRtProcessing->Process(m_ltRtInFlt.data(), dataOut, int(thisFrames)); + m_mainSubmix.m_redirectFlt = m_ltRtInFlt.data(); + } + size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount; for (size_t i=0 ; i(m_5msFrames, m_mixInfo); + else + m_ltRtProcessing.reset(); + return m_ltRtProcessing.operator bool(); +} + const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::mixInfo() const { return m_mixInfo; } +const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::clientMixInfo() const +{ + return m_ltRtProcessing ? m_ltRtProcessing->inMixInfo() : m_mixInfo; +} + } diff --git a/lib/audiodev/AudioVoiceEngine.hpp b/lib/audiodev/AudioVoiceEngine.hpp index e68d0c0..bed47d6 100644 --- a/lib/audiodev/AudioVoiceEngine.hpp +++ b/lib/audiodev/AudioVoiceEngine.hpp @@ -2,6 +2,8 @@ #define BOO_AUDIOVOICEENGINE_HPP #include "boo/audiodev/IAudioVoiceEngine.hpp" +#include "LtRtProcessing.hpp" +#include "Common.hpp" #include "AudioVoice.hpp" #include "AudioSubmix.hpp" #include @@ -9,17 +11,6 @@ namespace boo { -/** Pertinent information from audio backend about optimal mixed-audio representation */ -struct AudioVoiceEngineMixInfo -{ - double m_sampleRate; - soxr_datatype_t m_sampleFormat; - unsigned m_bitsPerSample; - AudioChannelSet m_channels; - ChannelMap m_channelMap; - size_t m_periodFrames; -}; - /** Base class for managing mixing and sample-rate-conversion amongst active voices */ class BaseAudioVoiceEngine : public IAudioVoiceEngine { @@ -44,6 +35,12 @@ protected: std::vector m_scratch32Post; std::vector m_scratchFltPost; + /* LtRt processing if enabled */ + std::unique_ptr m_ltRtProcessing; + std::vector m_ltRtIn16; + std::vector m_ltRtIn32; + std::vector m_ltRtInFlt; + AudioSubmix m_mainSubmix; std::list m_linearizedSubmixes; bool m_submixesDirty = true; @@ -71,8 +68,10 @@ public: void setCallbackInterface(IAudioVoiceEngineCallback* cb); void setVolume(float vol); + bool enableLtRt(bool enable); const AudioVoiceEngineMixInfo& mixInfo() const; - AudioChannelSet getAvailableSet() {return m_mixInfo.m_channels;} + const AudioVoiceEngineMixInfo& clientMixInfo() const; + AudioChannelSet getAvailableSet() {return clientMixInfo().m_channels;} void pumpAndMixVoices() {} size_t get5MsFrames() const {return m_5msFrames;} }; diff --git a/lib/audiodev/Common.hpp b/lib/audiodev/Common.hpp new file mode 100644 index 0000000..6ac6386 --- /dev/null +++ b/lib/audiodev/Common.hpp @@ -0,0 +1,22 @@ +#ifndef BOO_AUDIOCOMMON_HPP +#define BOO_AUDIOCOMMON_HPP + +#include + +namespace boo +{ + +/** Pertinent information from audio backend about optimal mixed-audio representation */ +struct AudioVoiceEngineMixInfo +{ + double m_sampleRate; + soxr_datatype_t m_sampleFormat; + unsigned m_bitsPerSample; + AudioChannelSet m_channels; + ChannelMap m_channelMap; + size_t m_periodFrames; +}; + +} + +#endif // BOO_AUDIOCOMMON_HPP diff --git a/lib/audiodev/LtRtProcessing.cpp b/lib/audiodev/LtRtProcessing.cpp new file mode 100644 index 0000000..1c4d302 --- /dev/null +++ b/lib/audiodev/LtRtProcessing.cpp @@ -0,0 +1,270 @@ +#include "LtRtProcessing.hpp" +#include + +namespace boo +{ + +template +inline T ClampFull(float in) +{ + if(std::is_floating_point()) + { + return std::min(std::max(in, -1.f), 1.f); + } + else + { + constexpr T MAX = std::numeric_limits::max(); + constexpr T MIN = std::numeric_limits::min(); + + if (in < MIN) + return MIN; + else if (in > MAX) + return MAX; + else + return in; + } +} + +#if INTEL_IPP + +WindowedHilbert::WindowedHilbert(int windowSamples) +: m_windowSamples(windowSamples), m_halfSamples(windowSamples / 2), + m_inputBuf(new Ipp32f[m_windowSamples * 2 + m_halfSamples]), + m_outputBuf(new Ipp32fc[m_windowSamples * 4]), + m_hammingTable(new Ipp32f[m_halfSamples]) +{ + memset(m_inputBuf.get(), 0, sizeof(Ipp32fc) * m_windowSamples * 2 + m_halfSamples); + memset(m_outputBuf.get(), 0, sizeof(Ipp32fc) * m_windowSamples * 4); + m_output[0] = m_outputBuf.get(); + m_output[1] = m_output[0] + m_windowSamples; + m_output[2] = m_output[1] + m_windowSamples; + m_output[3] = m_output[2] + m_windowSamples; + int sizeSpec, sizeBuf; + ippsHilbertGetSize_32f32fc(m_windowSamples, ippAlgHintNone, &sizeSpec, &sizeBuf); + m_spec = (IppsHilbertSpec*)ippMalloc(sizeSpec); + m_buffer = (Ipp8u*)ippMalloc(sizeBuf); + ippsHilbertInit_32f32fc(m_windowSamples, ippAlgHintNone, m_spec, m_buffer); + + for (int i=0 ; i +void WindowedHilbert::Output(T* output, float lCoef, float rCoef) const +{ + int first, middle, last; + if (m_bufIdx) + { + first = 3; + middle = 0; + last = 1; + } + else + { + first = 1; + middle = 2; + last = 3; + } + + int i, t; + for (i=0, t=0 ; i(output[i*2] + tmp * lCoef); + output[i*2+1] = ClampFull(output[i*2+1] + tmp * rCoef); + } + for (; i(output[i*2] + tmp * lCoef); + output[i*2+1] = ClampFull(output[i*2+1] + tmp * rCoef); + } + for (t=0 ; i(output[i*2] + tmp * lCoef); + output[i*2+1] = ClampFull(output[i*2+1] + tmp * rCoef); + } +} + +template void WindowedHilbert::Output(int16_t* output, float lCoef, float rCoef) const; +template void WindowedHilbert::Output(int32_t* output, float lCoef, float rCoef) const; +template void WindowedHilbert::Output(float* output, float lCoef, float rCoef) const; + +#endif + +template <> int16_t* LtRtProcessing::_getInBuf() { return m_16Buffer.get(); } +template <> int32_t* LtRtProcessing::_getInBuf() { return m_32Buffer.get(); } +template <> float* LtRtProcessing::_getInBuf() { return m_fltBuffer.get(); } + +template <> int16_t* LtRtProcessing::_getOutBuf() { return m_16Buffer.get() + m_outputOffset; } +template <> int32_t* LtRtProcessing::_getOutBuf() { return m_32Buffer.get() + m_outputOffset; } +template <> float* LtRtProcessing::_getOutBuf() { return m_fltBuffer.get() + m_outputOffset; } + +LtRtProcessing::LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo) +: m_inMixInfo(mixInfo), m_5msFrames(_5msFrames), m_5msFramesHalf(_5msFrames / 2), + m_outputOffset(m_5msFrames * 5 * 2), m_hilbertSL(_5msFrames), m_hilbertSR(_5msFrames) +{ + m_inMixInfo.m_channels = AudioChannelSet::Surround51; + m_inMixInfo.m_channelMap.m_channelCount = 5; + m_inMixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft; + m_inMixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight; + m_inMixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter; + m_inMixInfo.m_channelMap.m_channels[3] = AudioChannel::RearLeft; + m_inMixInfo.m_channelMap.m_channels[4] = AudioChannel::RearRight; + + int samples = m_5msFrames * (5 * 2 + 2 * 2); + switch (mixInfo.m_sampleFormat) + { + case SOXR_INT16_I: + m_16Buffer.reset(new int16_t[samples]); + memset(m_16Buffer.get(), 0, sizeof(int16_t) * samples); + break; + case SOXR_INT32_I: + m_32Buffer.reset(new int32_t[samples]); + memset(m_32Buffer.get(), 0, sizeof(int32_t) * samples); + break; + case SOXR_FLOAT32_I: + m_fltBuffer.reset(new float[samples]); + memset(m_fltBuffer.get(), 0, sizeof(float) * samples); + break; + default: + break; + } +} + +template +void LtRtProcessing::Process(const T* input, T* output, int frameCount) +{ + int outFramesRem = frameCount; + T* inBuf = _getInBuf(); + T* outBuf = _getOutBuf(); + int tail = std::min(m_5msFrames * 2, m_bufferTail + frameCount); + int samples = (tail - m_bufferTail) * 5; + memmove(&inBuf[m_bufferTail * 5], input, samples * sizeof(float)); + input += samples; + frameCount -= tail - m_bufferTail; + + int bufIdx = m_bufferTail / m_5msFrames; + if (tail / m_5msFrames > bufIdx) + { + T* in = &inBuf[bufIdx * m_5msFrames * 5]; + T* out = &outBuf[bufIdx * m_5msFrames * 2]; + m_hilbertSL.AddWindow(in + 3, 5); + m_hilbertSR.AddWindow(in + 4, 5); + + // x(:,1) + sqrt(.5)*x(:,3) + sqrt(19/25)*x(:,4) + sqrt(6/25)*x(:,5) + // x(:,2) + sqrt(.5)*x(:,3) - sqrt(6/25)*x(:,4) - sqrt(19/25)*x(:,5) + if (bufIdx) + { + int delayI = -m_5msFramesHalf; + for (int i=0 ; i(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]); + out[i * 2 + 1] = ClampFull(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]); + } + } + else + { + int delayI = m_5msFrames * 2 - m_5msFramesHalf; + int i; + for (i=0 ; i(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]); + out[i * 2 + 1] = ClampFull(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]); + } + delayI = 0; + for (; i(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]); + out[i * 2 + 1] = ClampFull(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]); + } + } +#if INTEL_IPP + m_hilbertSL.Output(out, 0.8717798f, 0.4898979f); + m_hilbertSR.Output(out, -0.4898979f, -0.8717798f); +#endif + } + m_bufferTail = tail; + + if (frameCount) + { + samples = frameCount * 5; + memmove(inBuf, input, samples * sizeof(float)); + m_bufferTail = frameCount; + } + + int head = std::min(m_5msFrames * 2, m_bufferHead + outFramesRem); + samples = (head - m_bufferHead) * 2; + memmove(output, outBuf + m_bufferHead * 2, samples * sizeof(float)); + output += samples; + outFramesRem -= head - m_bufferHead; + m_bufferHead = head; + if (outFramesRem) + { + samples = outFramesRem * 2; + memmove(output, outBuf, samples * sizeof(float)); + m_bufferHead = outFramesRem; + } +} + +template void LtRtProcessing::Process(const int16_t* input, int16_t* output, int frameCount); +template void LtRtProcessing::Process(const int32_t* input, int32_t* output, int frameCount); +template void LtRtProcessing::Process(const float* input, float* output, int frameCount); + +} diff --git a/lib/audiodev/LtRtProcessing.hpp b/lib/audiodev/LtRtProcessing.hpp new file mode 100644 index 0000000..7f08fa2 --- /dev/null +++ b/lib/audiodev/LtRtProcessing.hpp @@ -0,0 +1,65 @@ +#ifndef BOO_LTRTPROCESSING_HPP +#define BOO_LTRTPROCESSING_HPP + +#include "boo/System.hpp" +#include "boo/audiodev/IAudioVoice.hpp" +#include "Common.hpp" +#include + +#if INTEL_IPP +#include "ipp.h" +#endif + +namespace boo +{ + +#if INTEL_IPP +class WindowedHilbert +{ + IppsHilbertSpec* m_spec; + Ipp8u* m_buffer; + int m_windowSamples, m_halfSamples; + int m_bufIdx = 0; + int m_bufferTail = 0; + std::unique_ptr m_inputBuf; + std::unique_ptr m_outputBuf; + Ipp32fc* m_output[4]; + std::unique_ptr m_hammingTable; + void _AddWindow(); +public: + explicit WindowedHilbert(int windowSamples); + ~WindowedHilbert(); + void AddWindow(const float* input, int stride); + void AddWindow(const int32_t* input, int stride); + void AddWindow(const int16_t* input, int stride); + template + void Output(T* output, float lCoef, float rCoef) const; +}; +#endif + +class LtRtProcessing +{ + AudioVoiceEngineMixInfo m_inMixInfo; + int m_5msFrames; + int m_5msFramesHalf; + int m_outputOffset; + int m_bufferTail = 0; + int m_bufferHead = 0; + std::unique_ptr m_16Buffer; + std::unique_ptr m_32Buffer; + std::unique_ptr m_fltBuffer; +#if INTEL_IPP + WindowedHilbert m_hilbertSL, m_hilbertSR; +#endif + template T* _getInBuf(); + template T* _getOutBuf(); +public: + LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo); + template + void Process(const T* input, T* output, int frameCount); + const AudioVoiceEngineMixInfo& inMixInfo() const { return m_inMixInfo; } +}; + +} + +#endif // BOO_LTRTPROCESSING_HPP