mirror of https://github.com/AxioDL/boo.git
Experimental support for LtRt surround matrixing
This commit is contained in:
parent
71b8893dde
commit
0b35c584f6
|
@ -14,6 +14,14 @@ endif()
|
||||||
|
|
||||||
add_subdirectory(xxhash)
|
add_subdirectory(xxhash)
|
||||||
|
|
||||||
|
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
|
find_package(IPP)
|
||||||
|
if (IPP_FOUND)
|
||||||
|
add_definitions(-DINTEL_IPP=1)
|
||||||
|
include_directories(${IPP_INCLUDE_DIRS})
|
||||||
|
list(APPEND _BOO_SYS_LIBS ${IPP_LIBRARIES})
|
||||||
|
endif ()
|
||||||
|
|
||||||
set(WITH_LSR_BINDINGS OFF)
|
set(WITH_LSR_BINDINGS OFF)
|
||||||
set(BUILD_TESTS OFF)
|
set(BUILD_TESTS OFF)
|
||||||
set(BUILD_SHARED_LIBS OFF)
|
set(BUILD_SHARED_LIBS OFF)
|
||||||
|
@ -222,6 +230,7 @@ add_library(boo
|
||||||
lib/inputdev/DeviceFinder.cpp include/boo/inputdev/DeviceFinder.hpp
|
lib/inputdev/DeviceFinder.cpp include/boo/inputdev/DeviceFinder.hpp
|
||||||
lib/inputdev/HIDParser.cpp include/boo/inputdev/HIDParser.hpp
|
lib/inputdev/HIDParser.cpp include/boo/inputdev/HIDParser.hpp
|
||||||
lib/inputdev/IHIDDevice.hpp
|
lib/inputdev/IHIDDevice.hpp
|
||||||
|
lib/audiodev/Common.hpp
|
||||||
lib/audiodev/WAVOut.cpp
|
lib/audiodev/WAVOut.cpp
|
||||||
lib/audiodev/AudioMatrix.hpp
|
lib/audiodev/AudioMatrix.hpp
|
||||||
#lib/audiodev/AudioMatrix.cpp
|
#lib/audiodev/AudioMatrix.cpp
|
||||||
|
@ -232,6 +241,8 @@ add_library(boo
|
||||||
lib/audiodev/AudioVoice.cpp
|
lib/audiodev/AudioVoice.cpp
|
||||||
lib/audiodev/AudioSubmix.hpp
|
lib/audiodev/AudioSubmix.hpp
|
||||||
lib/audiodev/AudioSubmix.cpp
|
lib/audiodev/AudioSubmix.cpp
|
||||||
|
lib/audiodev/LtRtProcessing.hpp
|
||||||
|
lib/audiodev/LtRtProcessing.cpp
|
||||||
lib/audiodev/MIDIEncoder.cpp
|
lib/audiodev/MIDIEncoder.cpp
|
||||||
lib/audiodev/MIDIDecoder.cpp
|
lib/audiodev/MIDIDecoder.cpp
|
||||||
lib/audiodev/MIDICommon.hpp
|
lib/audiodev/MIDICommon.hpp
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
# - Find Intel IPP
|
||||||
|
# Find the IPP libraries
|
||||||
|
# Options:
|
||||||
|
#
|
||||||
|
# IPP_STATIC: true if using static linking
|
||||||
|
# IPP_MULTI_THREADED: true if using multi-threaded static linking
|
||||||
|
#
|
||||||
|
# This module defines the following variables:
|
||||||
|
#
|
||||||
|
# IPP_FOUND : True if IPP_INCLUDE_DIR are found
|
||||||
|
# IPP_INCLUDE_DIR : where to find ipp.h, etc.
|
||||||
|
# IPP_INCLUDE_DIRS: set when IPP_INCLUDE_DIR found
|
||||||
|
# IPP_LIBRARIES : the library to link against.
|
||||||
|
|
||||||
|
set(IPP_STATIC ON)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
|
||||||
|
set(IPP_ROOT /opt/intel/ipp CACHE PATH "Folder contains IPP")
|
||||||
|
|
||||||
|
# Find header file dir
|
||||||
|
find_path(IPP_INCLUDE_DIR ipp.h
|
||||||
|
PATHS ${IPP_ROOT}/include)
|
||||||
|
|
||||||
|
# Find libraries
|
||||||
|
|
||||||
|
# Handle suffix
|
||||||
|
set(_IPP_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||||
|
|
||||||
|
if(WIN32)
|
||||||
|
set(CMAKE_FIND_LIBRARY_SUFFIXES .lib)
|
||||||
|
else()
|
||||||
|
if(IPP_STATIC)
|
||||||
|
set(CMAKE_FIND_LIBRARY_SUFFIXES .a)
|
||||||
|
else()
|
||||||
|
set(CMAKE_FIND_LIBRARY_SUFFIXES .so)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(IPP_STATIC)
|
||||||
|
if(IPP_MULTI_THREADED)
|
||||||
|
set(IPP_LIBNAME_SUFFIX _t)
|
||||||
|
else()
|
||||||
|
set(IPP_LIBNAME_SUFFIX _l)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
set(IPP_LIBNAME_SUFFIX "")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(IPP_LIBNAME_SUFFIX "")
|
||||||
|
|
||||||
|
macro(find_ipp_library IPP_COMPONENT)
|
||||||
|
string(TOLOWER ${IPP_COMPONENT} IPP_COMPONENT_LOWER)
|
||||||
|
|
||||||
|
find_library(IPP_LIB_${IPP_COMPONENT} ipp${IPP_COMPONENT_LOWER}${IPP_LIBNAME_SUFFIX}
|
||||||
|
PATHS ${IPP_ROOT}/lib/ia32/ ${IPP_ROOT}/lib)
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
# IPP components
|
||||||
|
# Core
|
||||||
|
find_ipp_library(CORE)
|
||||||
|
# Signal Processing
|
||||||
|
find_ipp_library(S)
|
||||||
|
# Vector Math
|
||||||
|
find_ipp_library(VM)
|
||||||
|
|
||||||
|
set(IPP_LIBRARY
|
||||||
|
${IPP_LIB_CORE}
|
||||||
|
${IPP_LIB_S}
|
||||||
|
${IPP_LIB_VM})
|
||||||
|
|
||||||
|
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_IPP_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||||
|
|
||||||
|
find_package_handle_standard_args(IPP DEFAULT_MSG
|
||||||
|
IPP_INCLUDE_DIR IPP_LIBRARY)
|
||||||
|
|
||||||
|
if (IPP_FOUND)
|
||||||
|
set(IPP_INCLUDE_DIRS ${IPP_INCLUDE_DIR})
|
||||||
|
set(IPP_LIBRARIES ${IPP_LIBRARY})
|
||||||
|
endif()
|
||||||
|
|
|
@ -60,6 +60,9 @@ struct IAudioVoiceEngine
|
||||||
/** Set total volume of engine */
|
/** Set total volume of engine */
|
||||||
virtual void setVolume(float vol)=0;
|
virtual void setVolume(float vol)=0;
|
||||||
|
|
||||||
|
/** Enable or disable Lt/Rt surround encoding. If successful, getAvailableSet() will return Surround51 */
|
||||||
|
virtual bool enableLtRt(bool enable)=0;
|
||||||
|
|
||||||
/** Get list of MIDI devices found on system */
|
/** Get list of MIDI devices found on system */
|
||||||
virtual std::vector<std::pair<std::string, std::string>> enumerateMIDIDevices() const=0;
|
virtual std::vector<std::pair<std::string, std::string>> enumerateMIDIDevices() const=0;
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ int16_t* AudioSubmix::_getMergeBuf16(size_t frames)
|
||||||
if (m_redirect16)
|
if (m_redirect16)
|
||||||
return m_redirect16;
|
return m_redirect16;
|
||||||
|
|
||||||
size_t sampleCount = frames * m_root.m_mixInfo.m_channelMap.m_channelCount;
|
size_t sampleCount = frames * m_root.clientMixInfo().m_channelMap.m_channelCount;
|
||||||
if (m_scratch16.size() < sampleCount)
|
if (m_scratch16.size() < sampleCount)
|
||||||
m_scratch16.resize(sampleCount);
|
m_scratch16.resize(sampleCount);
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ int32_t* AudioSubmix::_getMergeBuf32(size_t frames)
|
||||||
if (m_redirect32)
|
if (m_redirect32)
|
||||||
return m_redirect32;
|
return m_redirect32;
|
||||||
|
|
||||||
size_t sampleCount = frames * m_root.m_mixInfo.m_channelMap.m_channelCount;
|
size_t sampleCount = frames * m_root.clientMixInfo().m_channelMap.m_channelCount;
|
||||||
if (m_scratch32.size() < sampleCount)
|
if (m_scratch32.size() < sampleCount)
|
||||||
m_scratch32.resize(sampleCount);
|
m_scratch32.resize(sampleCount);
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ float* AudioSubmix::_getMergeBufFlt(size_t frames)
|
||||||
if (m_redirectFlt)
|
if (m_redirectFlt)
|
||||||
return m_redirectFlt;
|
return m_redirectFlt;
|
||||||
|
|
||||||
size_t sampleCount = frames * m_root.m_mixInfo.m_channelMap.m_channelCount;
|
size_t sampleCount = frames * m_root.clientMixInfo().m_channelMap.m_channelCount;
|
||||||
if (m_scratchFlt.size() < sampleCount)
|
if (m_scratchFlt.size() < sampleCount)
|
||||||
m_scratchFlt.resize(sampleCount);
|
m_scratchFlt.resize(sampleCount);
|
||||||
|
|
||||||
|
@ -128,13 +128,13 @@ float* AudioSubmix::_getMergeBufFlt(size_t frames)
|
||||||
|
|
||||||
size_t AudioSubmix::_pumpAndMix16(size_t frames)
|
size_t AudioSubmix::_pumpAndMix16(size_t frames)
|
||||||
{
|
{
|
||||||
ChannelMap& chMap = m_root.m_mixInfo.m_channelMap;
|
const ChannelMap& chMap = m_root.clientMixInfo().m_channelMap;
|
||||||
size_t chanCount = chMap.m_channelCount;
|
size_t chanCount = chMap.m_channelCount;
|
||||||
|
|
||||||
if (m_redirect16)
|
if (m_redirect16)
|
||||||
{
|
{
|
||||||
if (m_cb && m_cb->canApplyEffect())
|
if (m_cb && m_cb->canApplyEffect())
|
||||||
m_cb->applyEffect(m_redirect16, frames, chMap, m_root.m_mixInfo.m_sampleRate);
|
m_cb->applyEffect(m_redirect16, frames, chMap, m_root.clientMixInfo().m_sampleRate);
|
||||||
m_redirect16 += chanCount * frames;
|
m_redirect16 += chanCount * frames;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -143,7 +143,7 @@ size_t AudioSubmix::_pumpAndMix16(size_t frames)
|
||||||
if (m_scratch16.size() < sampleCount)
|
if (m_scratch16.size() < sampleCount)
|
||||||
m_scratch16.resize(sampleCount);
|
m_scratch16.resize(sampleCount);
|
||||||
if (m_cb && m_cb->canApplyEffect())
|
if (m_cb && m_cb->canApplyEffect())
|
||||||
m_cb->applyEffect(m_scratch16.data(), frames, chMap, m_root.m_mixInfo.m_sampleRate);
|
m_cb->applyEffect(m_scratch16.data(), frames, chMap, m_root.clientMixInfo().m_sampleRate);
|
||||||
|
|
||||||
size_t curSlewFrame = m_slewFrames;
|
size_t curSlewFrame = m_slewFrames;
|
||||||
for (auto& smx : m_sendGains)
|
for (auto& smx : m_sendGains)
|
||||||
|
@ -188,13 +188,13 @@ size_t AudioSubmix::_pumpAndMix16(size_t frames)
|
||||||
|
|
||||||
size_t AudioSubmix::_pumpAndMix32(size_t frames)
|
size_t AudioSubmix::_pumpAndMix32(size_t frames)
|
||||||
{
|
{
|
||||||
ChannelMap& chMap = m_root.m_mixInfo.m_channelMap;
|
const ChannelMap& chMap = m_root.clientMixInfo().m_channelMap;
|
||||||
size_t chanCount = chMap.m_channelCount;
|
size_t chanCount = chMap.m_channelCount;
|
||||||
|
|
||||||
if (m_redirect32)
|
if (m_redirect32)
|
||||||
{
|
{
|
||||||
if (m_cb && m_cb->canApplyEffect())
|
if (m_cb && m_cb->canApplyEffect())
|
||||||
m_cb->applyEffect(m_redirect32, frames, chMap, m_root.m_mixInfo.m_sampleRate);
|
m_cb->applyEffect(m_redirect32, frames, chMap, m_root.clientMixInfo().m_sampleRate);
|
||||||
m_redirect32 += chanCount * frames;
|
m_redirect32 += chanCount * frames;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -203,7 +203,7 @@ size_t AudioSubmix::_pumpAndMix32(size_t frames)
|
||||||
if (m_scratch32.size() < sampleCount)
|
if (m_scratch32.size() < sampleCount)
|
||||||
m_scratch32.resize(sampleCount);
|
m_scratch32.resize(sampleCount);
|
||||||
if (m_cb && m_cb->canApplyEffect())
|
if (m_cb && m_cb->canApplyEffect())
|
||||||
m_cb->applyEffect(m_scratch32.data(), frames, chMap, m_root.m_mixInfo.m_sampleRate);
|
m_cb->applyEffect(m_scratch32.data(), frames, chMap, m_root.clientMixInfo().m_sampleRate);
|
||||||
|
|
||||||
size_t curSlewFrame = m_slewFrames;
|
size_t curSlewFrame = m_slewFrames;
|
||||||
for (auto& smx : m_sendGains)
|
for (auto& smx : m_sendGains)
|
||||||
|
@ -248,13 +248,13 @@ size_t AudioSubmix::_pumpAndMix32(size_t frames)
|
||||||
|
|
||||||
size_t AudioSubmix::_pumpAndMixFlt(size_t frames)
|
size_t AudioSubmix::_pumpAndMixFlt(size_t frames)
|
||||||
{
|
{
|
||||||
ChannelMap& chMap = m_root.m_mixInfo.m_channelMap;
|
const ChannelMap& chMap = m_root.clientMixInfo().m_channelMap;
|
||||||
size_t chanCount = chMap.m_channelCount;
|
size_t chanCount = chMap.m_channelCount;
|
||||||
|
|
||||||
if (m_redirectFlt)
|
if (m_redirectFlt)
|
||||||
{
|
{
|
||||||
if (m_cb && m_cb->canApplyEffect())
|
if (m_cb && m_cb->canApplyEffect())
|
||||||
m_cb->applyEffect(m_redirectFlt, frames, chMap, m_root.m_mixInfo.m_sampleRate);
|
m_cb->applyEffect(m_redirectFlt, frames, chMap, m_root.clientMixInfo().m_sampleRate);
|
||||||
m_redirectFlt += chanCount * frames;
|
m_redirectFlt += chanCount * frames;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -263,7 +263,7 @@ size_t AudioSubmix::_pumpAndMixFlt(size_t frames)
|
||||||
if (m_scratchFlt.size() < sampleCount)
|
if (m_scratchFlt.size() < sampleCount)
|
||||||
m_scratchFlt.resize(sampleCount);
|
m_scratchFlt.resize(sampleCount);
|
||||||
if (m_cb && m_cb->canApplyEffect())
|
if (m_cb && m_cb->canApplyEffect())
|
||||||
m_cb->applyEffect(m_scratchFlt.data(), frames, chMap, m_root.m_mixInfo.m_sampleRate);
|
m_cb->applyEffect(m_scratchFlt.data(), frames, chMap, m_root.clientMixInfo().m_sampleRate);
|
||||||
|
|
||||||
size_t curSlewFrame = m_slewFrames;
|
size_t curSlewFrame = m_slewFrames;
|
||||||
for (auto& smx : m_sendGains)
|
for (auto& smx : m_sendGains)
|
||||||
|
|
|
@ -146,14 +146,14 @@ size_t AudioVoiceMono::pumpAndMix16(size_t frames)
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
||||||
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratch16Pre.data(), scratch16Post.data());
|
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratch16Pre.data(), scratch16Post.data());
|
||||||
mtx.second.mixMonoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
mtx.second.mixMonoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
||||||
m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratch16Pre.data(), scratch16Post.data());
|
m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratch16Pre.data(), scratch16Post.data());
|
||||||
DefaultMonoMtx.mixMonoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
DefaultMonoMtx.mixMonoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,14 +183,14 @@ size_t AudioVoiceMono::pumpAndMix32(size_t frames)
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
||||||
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratch32Pre.data(), scratch32Post.data());
|
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratch32Pre.data(), scratch32Post.data());
|
||||||
mtx.second.mixMonoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
mtx.second.mixMonoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
||||||
m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratch32Pre.data(), scratch32Post.data());
|
m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratch32Pre.data(), scratch32Post.data());
|
||||||
DefaultMonoMtx.mixMonoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
DefaultMonoMtx.mixMonoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,14 +220,14 @@ size_t AudioVoiceMono::pumpAndMixFlt(size_t frames)
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
||||||
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
m_cb->routeAudio(oDone, 1, dt, smx.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
||||||
mtx.second.mixMonoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
mtx.second.mixMonoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
||||||
m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
m_cb->routeAudio(oDone, 1, dt, m_root.m_mainSubmix.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
||||||
DefaultMonoMtx.mixMonoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
DefaultMonoMtx.mixMonoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,14 +349,14 @@ size_t AudioVoiceStereo::pumpAndMix16(size_t frames)
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
||||||
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratch16Pre.data(), scratch16Post.data());
|
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratch16Pre.data(), scratch16Post.data());
|
||||||
mtx.second.mixStereoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
mtx.second.mixStereoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
||||||
m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratch16Pre.data(), scratch16Post.data());
|
m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratch16Pre.data(), scratch16Post.data());
|
||||||
DefaultStereoMtx.mixStereoSampleData(m_root.m_mixInfo, scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
DefaultStereoMtx.mixStereoSampleData(m_root.clientMixInfo(), scratch16Post.data(), smx._getMergeBuf16(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,14 +388,14 @@ size_t AudioVoiceStereo::pumpAndMix32(size_t frames)
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
||||||
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratch32Pre.data(), scratch32Post.data());
|
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratch32Pre.data(), scratch32Post.data());
|
||||||
mtx.second.mixStereoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
mtx.second.mixStereoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
||||||
m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratch32Pre.data(), scratch32Post.data());
|
m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratch32Pre.data(), scratch32Post.data());
|
||||||
DefaultStereoMtx.mixStereoSampleData(m_root.m_mixInfo, scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
DefaultStereoMtx.mixStereoSampleData(m_root.clientMixInfo(), scratch32Post.data(), smx._getMergeBuf32(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,14 +427,14 @@ size_t AudioVoiceStereo::pumpAndMixFlt(size_t frames)
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
AudioSubmix& smx = *reinterpret_cast<AudioSubmix*>(mtx.first);
|
||||||
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
m_cb->routeAudio(oDone, 2, dt, smx.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
||||||
mtx.second.mixStereoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
mtx.second.mixStereoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
AudioSubmix& smx = reinterpret_cast<AudioSubmix&>(m_root.m_mainSubmix);
|
||||||
m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
m_cb->routeAudio(oDone, 2, dt, m_root.m_mainSubmix.m_busId, scratchFltPre.data(), scratchFltPost.data());
|
||||||
DefaultStereoMtx.mixStereoSampleData(m_root.m_mixInfo, scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
DefaultStereoMtx.mixStereoSampleData(m_root.clientMixInfo(), scratchFltPost.data(), smx._getMergeBufFlt(oDone), oDone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#include "AudioVoiceEngine.hpp"
|
#include "AudioVoiceEngine.hpp"
|
||||||
#include <string.h>
|
#include "LtRtProcessing.hpp"
|
||||||
|
|
||||||
namespace boo
|
namespace boo
|
||||||
{
|
{
|
||||||
|
@ -15,7 +15,17 @@ BaseAudioVoiceEngine::~BaseAudioVoiceEngine()
|
||||||
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int16_t* dataOut)
|
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int16_t* dataOut)
|
||||||
{
|
{
|
||||||
memset(dataOut, 0, sizeof(int16_t) * frames * m_mixInfo.m_channelMap.m_channelCount);
|
memset(dataOut, 0, sizeof(int16_t) * frames * m_mixInfo.m_channelMap.m_channelCount);
|
||||||
m_mainSubmix.m_redirect16 = dataOut;
|
if (m_ltRtProcessing)
|
||||||
|
{
|
||||||
|
size_t sampleCount = m_5msFrames * 5;
|
||||||
|
if (m_ltRtIn16.size() < sampleCount)
|
||||||
|
m_ltRtIn16.resize(sampleCount);
|
||||||
|
m_mainSubmix.m_redirect16 = m_ltRtIn16.data();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
m_mainSubmix.m_redirect16 = dataOut;
|
||||||
|
}
|
||||||
|
|
||||||
if (m_submixesDirty)
|
if (m_submixesDirty)
|
||||||
{
|
{
|
||||||
|
@ -50,6 +60,12 @@ void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int16_t* dataOut)
|
||||||
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
|
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
|
||||||
(*it)->_pumpAndMix16(thisFrames);
|
(*it)->_pumpAndMix16(thisFrames);
|
||||||
|
|
||||||
|
if (m_ltRtProcessing)
|
||||||
|
{
|
||||||
|
m_ltRtProcessing->Process(m_ltRtIn16.data(), dataOut, int(thisFrames));
|
||||||
|
m_mainSubmix.m_redirect16 = m_ltRtIn16.data();
|
||||||
|
}
|
||||||
|
|
||||||
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
|
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
|
||||||
for (size_t i=0 ; i<sampleCount ; ++i)
|
for (size_t i=0 ; i<sampleCount ; ++i)
|
||||||
dataOut[i] *= m_totalVol;
|
dataOut[i] *= m_totalVol;
|
||||||
|
@ -65,7 +81,17 @@ void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int16_t* dataOut)
|
||||||
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int32_t* dataOut)
|
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int32_t* dataOut)
|
||||||
{
|
{
|
||||||
memset(dataOut, 0, sizeof(int32_t) * frames * m_mixInfo.m_channelMap.m_channelCount);
|
memset(dataOut, 0, sizeof(int32_t) * frames * m_mixInfo.m_channelMap.m_channelCount);
|
||||||
m_mainSubmix.m_redirect32 = dataOut;
|
if (m_ltRtProcessing)
|
||||||
|
{
|
||||||
|
size_t sampleCount = m_5msFrames * 5;
|
||||||
|
if (m_ltRtIn32.size() < sampleCount)
|
||||||
|
m_ltRtIn32.resize(sampleCount);
|
||||||
|
m_mainSubmix.m_redirect32 = m_ltRtIn32.data();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
m_mainSubmix.m_redirect32 = dataOut;
|
||||||
|
}
|
||||||
|
|
||||||
if (m_submixesDirty)
|
if (m_submixesDirty)
|
||||||
{
|
{
|
||||||
|
@ -100,6 +126,12 @@ void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int32_t* dataOut)
|
||||||
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
|
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
|
||||||
(*it)->_pumpAndMix32(thisFrames);
|
(*it)->_pumpAndMix32(thisFrames);
|
||||||
|
|
||||||
|
if (m_ltRtProcessing)
|
||||||
|
{
|
||||||
|
m_ltRtProcessing->Process(m_ltRtIn32.data(), dataOut, int(thisFrames));
|
||||||
|
m_mainSubmix.m_redirect32 = m_ltRtIn32.data();
|
||||||
|
}
|
||||||
|
|
||||||
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
|
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
|
||||||
for (size_t i=0 ; i<sampleCount ; ++i)
|
for (size_t i=0 ; i<sampleCount ; ++i)
|
||||||
dataOut[i] *= m_totalVol;
|
dataOut[i] *= m_totalVol;
|
||||||
|
@ -115,7 +147,17 @@ void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, int32_t* dataOut)
|
||||||
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, float* dataOut)
|
void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, float* dataOut)
|
||||||
{
|
{
|
||||||
memset(dataOut, 0, sizeof(float) * frames * m_mixInfo.m_channelMap.m_channelCount);
|
memset(dataOut, 0, sizeof(float) * frames * m_mixInfo.m_channelMap.m_channelCount);
|
||||||
m_mainSubmix.m_redirectFlt = dataOut;
|
if (m_ltRtProcessing)
|
||||||
|
{
|
||||||
|
size_t sampleCount = m_5msFrames * 5;
|
||||||
|
if (m_ltRtInFlt.size() < sampleCount)
|
||||||
|
m_ltRtInFlt.resize(sampleCount);
|
||||||
|
m_mainSubmix.m_redirectFlt = m_ltRtInFlt.data();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
m_mainSubmix.m_redirectFlt = dataOut;
|
||||||
|
}
|
||||||
|
|
||||||
if (m_submixesDirty)
|
if (m_submixesDirty)
|
||||||
{
|
{
|
||||||
|
@ -150,6 +192,12 @@ void BaseAudioVoiceEngine::_pumpAndMixVoices(size_t frames, float* dataOut)
|
||||||
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
|
for (auto it = m_linearizedSubmixes.rbegin() ; it != m_linearizedSubmixes.rend() ; ++it)
|
||||||
(*it)->_pumpAndMixFlt(thisFrames);
|
(*it)->_pumpAndMixFlt(thisFrames);
|
||||||
|
|
||||||
|
if (m_ltRtProcessing)
|
||||||
|
{
|
||||||
|
m_ltRtProcessing->Process(m_ltRtInFlt.data(), dataOut, int(thisFrames));
|
||||||
|
m_mainSubmix.m_redirectFlt = m_ltRtInFlt.data();
|
||||||
|
}
|
||||||
|
|
||||||
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
|
size_t sampleCount = thisFrames * m_mixInfo.m_channelMap.m_channelCount;
|
||||||
for (size_t i=0 ; i<sampleCount ; ++i)
|
for (size_t i=0 ; i<sampleCount ; ++i)
|
||||||
dataOut[i] *= m_totalVol;
|
dataOut[i] *= m_totalVol;
|
||||||
|
@ -216,9 +264,24 @@ void BaseAudioVoiceEngine::setVolume(float vol)
|
||||||
m_totalVol = vol;
|
m_totalVol = vol;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool BaseAudioVoiceEngine::enableLtRt(bool enable)
|
||||||
|
{
|
||||||
|
if (enable && m_mixInfo.m_channelMap.m_channelCount == 2 &&
|
||||||
|
m_mixInfo.m_channels == AudioChannelSet::Stereo)
|
||||||
|
m_ltRtProcessing = std::make_unique<LtRtProcessing>(m_5msFrames, m_mixInfo);
|
||||||
|
else
|
||||||
|
m_ltRtProcessing.reset();
|
||||||
|
return m_ltRtProcessing.operator bool();
|
||||||
|
}
|
||||||
|
|
||||||
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::mixInfo() const
|
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::mixInfo() const
|
||||||
{
|
{
|
||||||
return m_mixInfo;
|
return m_mixInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const AudioVoiceEngineMixInfo& BaseAudioVoiceEngine::clientMixInfo() const
|
||||||
|
{
|
||||||
|
return m_ltRtProcessing ? m_ltRtProcessing->inMixInfo() : m_mixInfo;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
#define BOO_AUDIOVOICEENGINE_HPP
|
#define BOO_AUDIOVOICEENGINE_HPP
|
||||||
|
|
||||||
#include "boo/audiodev/IAudioVoiceEngine.hpp"
|
#include "boo/audiodev/IAudioVoiceEngine.hpp"
|
||||||
|
#include "LtRtProcessing.hpp"
|
||||||
|
#include "Common.hpp"
|
||||||
#include "AudioVoice.hpp"
|
#include "AudioVoice.hpp"
|
||||||
#include "AudioSubmix.hpp"
|
#include "AudioSubmix.hpp"
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
@ -9,17 +11,6 @@
|
||||||
namespace boo
|
namespace boo
|
||||||
{
|
{
|
||||||
|
|
||||||
/** Pertinent information from audio backend about optimal mixed-audio representation */
|
|
||||||
struct AudioVoiceEngineMixInfo
|
|
||||||
{
|
|
||||||
double m_sampleRate;
|
|
||||||
soxr_datatype_t m_sampleFormat;
|
|
||||||
unsigned m_bitsPerSample;
|
|
||||||
AudioChannelSet m_channels;
|
|
||||||
ChannelMap m_channelMap;
|
|
||||||
size_t m_periodFrames;
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Base class for managing mixing and sample-rate-conversion amongst active voices */
|
/** Base class for managing mixing and sample-rate-conversion amongst active voices */
|
||||||
class BaseAudioVoiceEngine : public IAudioVoiceEngine
|
class BaseAudioVoiceEngine : public IAudioVoiceEngine
|
||||||
{
|
{
|
||||||
|
@ -44,6 +35,12 @@ protected:
|
||||||
std::vector<int32_t> m_scratch32Post;
|
std::vector<int32_t> m_scratch32Post;
|
||||||
std::vector<float> m_scratchFltPost;
|
std::vector<float> m_scratchFltPost;
|
||||||
|
|
||||||
|
/* LtRt processing if enabled */
|
||||||
|
std::unique_ptr<LtRtProcessing> m_ltRtProcessing;
|
||||||
|
std::vector<int16_t> m_ltRtIn16;
|
||||||
|
std::vector<int32_t> m_ltRtIn32;
|
||||||
|
std::vector<float> m_ltRtInFlt;
|
||||||
|
|
||||||
AudioSubmix m_mainSubmix;
|
AudioSubmix m_mainSubmix;
|
||||||
std::list<AudioSubmix*> m_linearizedSubmixes;
|
std::list<AudioSubmix*> m_linearizedSubmixes;
|
||||||
bool m_submixesDirty = true;
|
bool m_submixesDirty = true;
|
||||||
|
@ -71,8 +68,10 @@ public:
|
||||||
void setCallbackInterface(IAudioVoiceEngineCallback* cb);
|
void setCallbackInterface(IAudioVoiceEngineCallback* cb);
|
||||||
|
|
||||||
void setVolume(float vol);
|
void setVolume(float vol);
|
||||||
|
bool enableLtRt(bool enable);
|
||||||
const AudioVoiceEngineMixInfo& mixInfo() const;
|
const AudioVoiceEngineMixInfo& mixInfo() const;
|
||||||
AudioChannelSet getAvailableSet() {return m_mixInfo.m_channels;}
|
const AudioVoiceEngineMixInfo& clientMixInfo() const;
|
||||||
|
AudioChannelSet getAvailableSet() {return clientMixInfo().m_channels;}
|
||||||
void pumpAndMixVoices() {}
|
void pumpAndMixVoices() {}
|
||||||
size_t get5MsFrames() const {return m_5msFrames;}
|
size_t get5MsFrames() const {return m_5msFrames;}
|
||||||
};
|
};
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
#ifndef BOO_AUDIOCOMMON_HPP
|
||||||
|
#define BOO_AUDIOCOMMON_HPP
|
||||||
|
|
||||||
|
#include <soxr.h>
|
||||||
|
|
||||||
|
namespace boo
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Pertinent information from audio backend about optimal mixed-audio representation */
|
||||||
|
struct AudioVoiceEngineMixInfo
|
||||||
|
{
|
||||||
|
double m_sampleRate;
|
||||||
|
soxr_datatype_t m_sampleFormat;
|
||||||
|
unsigned m_bitsPerSample;
|
||||||
|
AudioChannelSet m_channels;
|
||||||
|
ChannelMap m_channelMap;
|
||||||
|
size_t m_periodFrames;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BOO_AUDIOCOMMON_HPP
|
|
@ -0,0 +1,270 @@
|
||||||
|
#include "LtRtProcessing.hpp"
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
|
namespace boo
|
||||||
|
{
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
inline T ClampFull(float in)
|
||||||
|
{
|
||||||
|
if(std::is_floating_point<T>())
|
||||||
|
{
|
||||||
|
return std::min<T>(std::max<T>(in, -1.f), 1.f);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
constexpr T MAX = std::numeric_limits<T>::max();
|
||||||
|
constexpr T MIN = std::numeric_limits<T>::min();
|
||||||
|
|
||||||
|
if (in < MIN)
|
||||||
|
return MIN;
|
||||||
|
else if (in > MAX)
|
||||||
|
return MAX;
|
||||||
|
else
|
||||||
|
return in;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if INTEL_IPP
|
||||||
|
|
||||||
|
WindowedHilbert::WindowedHilbert(int windowSamples)
|
||||||
|
: m_windowSamples(windowSamples), m_halfSamples(windowSamples / 2),
|
||||||
|
m_inputBuf(new Ipp32f[m_windowSamples * 2 + m_halfSamples]),
|
||||||
|
m_outputBuf(new Ipp32fc[m_windowSamples * 4]),
|
||||||
|
m_hammingTable(new Ipp32f[m_halfSamples])
|
||||||
|
{
|
||||||
|
memset(m_inputBuf.get(), 0, sizeof(Ipp32fc) * m_windowSamples * 2 + m_halfSamples);
|
||||||
|
memset(m_outputBuf.get(), 0, sizeof(Ipp32fc) * m_windowSamples * 4);
|
||||||
|
m_output[0] = m_outputBuf.get();
|
||||||
|
m_output[1] = m_output[0] + m_windowSamples;
|
||||||
|
m_output[2] = m_output[1] + m_windowSamples;
|
||||||
|
m_output[3] = m_output[2] + m_windowSamples;
|
||||||
|
int sizeSpec, sizeBuf;
|
||||||
|
ippsHilbertGetSize_32f32fc(m_windowSamples, ippAlgHintNone, &sizeSpec, &sizeBuf);
|
||||||
|
m_spec = (IppsHilbertSpec*)ippMalloc(sizeSpec);
|
||||||
|
m_buffer = (Ipp8u*)ippMalloc(sizeBuf);
|
||||||
|
ippsHilbertInit_32f32fc(m_windowSamples, ippAlgHintNone, m_spec, m_buffer);
|
||||||
|
|
||||||
|
for (int i=0 ; i<m_halfSamples ; ++i)
|
||||||
|
m_hammingTable[i] = Ipp32f(std::cos(M_PI * (i / double(m_halfSamples) + 1.0)) * 0.5 + 0.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
WindowedHilbert::~WindowedHilbert()
|
||||||
|
{
|
||||||
|
ippFree(m_spec);
|
||||||
|
ippFree(m_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void WindowedHilbert::_AddWindow()
|
||||||
|
{
|
||||||
|
if (m_bufIdx)
|
||||||
|
{
|
||||||
|
/* Mirror last half of samples to start of input buffer */
|
||||||
|
Ipp32f* bufBase = &m_inputBuf[m_windowSamples * 2];
|
||||||
|
for (int i=0 ; i<m_halfSamples ; ++i)
|
||||||
|
m_inputBuf[i] = bufBase[i];
|
||||||
|
ippsHilbert_32f32fc(&m_inputBuf[m_windowSamples],
|
||||||
|
m_output[2], m_spec, m_buffer);
|
||||||
|
ippsHilbert_32f32fc(&m_inputBuf[m_windowSamples + m_halfSamples],
|
||||||
|
m_output[3], m_spec, m_buffer);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ippsHilbert_32f32fc(&m_inputBuf[0],
|
||||||
|
m_output[0], m_spec, m_buffer);
|
||||||
|
ippsHilbert_32f32fc(&m_inputBuf[m_halfSamples],
|
||||||
|
m_output[1], m_spec, m_buffer);
|
||||||
|
}
|
||||||
|
m_bufIdx ^= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void WindowedHilbert::AddWindow(const float* input, int stride)
|
||||||
|
{
|
||||||
|
Ipp32f* bufBase = &m_inputBuf[m_windowSamples * m_bufIdx + m_halfSamples];
|
||||||
|
for (int i=0 ; i<m_windowSamples ; ++i)
|
||||||
|
bufBase[i] = input[i * stride];
|
||||||
|
_AddWindow();
|
||||||
|
}
|
||||||
|
|
||||||
|
void WindowedHilbert::AddWindow(const int32_t* input, int stride)
|
||||||
|
{
|
||||||
|
Ipp32f* bufBase = &m_inputBuf[m_windowSamples * m_bufIdx + m_halfSamples];
|
||||||
|
for (int i=0 ; i<m_windowSamples ; ++i)
|
||||||
|
bufBase[i] = input[i * stride] / (float(INT32_MAX) + 1.f);
|
||||||
|
_AddWindow();
|
||||||
|
}
|
||||||
|
|
||||||
|
void WindowedHilbert::AddWindow(const int16_t* input, int stride)
|
||||||
|
{
|
||||||
|
Ipp32f* bufBase = &m_inputBuf[m_windowSamples * m_bufIdx + m_halfSamples];
|
||||||
|
for (int i=0 ; i<m_windowSamples ; ++i)
|
||||||
|
bufBase[i] = input[i * stride] / (float(INT16_MAX) + 1.f);
|
||||||
|
_AddWindow();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void WindowedHilbert::Output(T* output, float lCoef, float rCoef) const
|
||||||
|
{
|
||||||
|
int first, middle, last;
|
||||||
|
if (m_bufIdx)
|
||||||
|
{
|
||||||
|
first = 3;
|
||||||
|
middle = 0;
|
||||||
|
last = 1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
first = 1;
|
||||||
|
middle = 2;
|
||||||
|
last = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
int i, t;
|
||||||
|
for (i=0, t=0 ; i<m_halfSamples ; ++i, ++t)
|
||||||
|
{
|
||||||
|
float tmp = m_output[first][i].im * (1.f - m_hammingTable[t]) +
|
||||||
|
m_output[middle][i].im * m_hammingTable[t];
|
||||||
|
output[i*2] = ClampFull<T>(output[i*2] + tmp * lCoef);
|
||||||
|
output[i*2+1] = ClampFull<T>(output[i*2+1] + tmp * rCoef);
|
||||||
|
}
|
||||||
|
for (; i<m_windowSamples-m_halfSamples ; ++i)
|
||||||
|
{
|
||||||
|
float tmp = m_output[middle][i].im;
|
||||||
|
output[i*2] = ClampFull<T>(output[i*2] + tmp * lCoef);
|
||||||
|
output[i*2+1] = ClampFull<T>(output[i*2+1] + tmp * rCoef);
|
||||||
|
}
|
||||||
|
for (t=0 ; i<m_windowSamples ; ++i, ++t)
|
||||||
|
{
|
||||||
|
float tmp = m_output[middle][i].im * (1.f - m_hammingTable[t]) +
|
||||||
|
m_output[last][i].im * m_hammingTable[t];
|
||||||
|
output[i*2] = ClampFull<T>(output[i*2] + tmp * lCoef);
|
||||||
|
output[i*2+1] = ClampFull<T>(output[i*2+1] + tmp * rCoef);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void WindowedHilbert::Output<int16_t>(int16_t* output, float lCoef, float rCoef) const;
|
||||||
|
template void WindowedHilbert::Output<int32_t>(int32_t* output, float lCoef, float rCoef) const;
|
||||||
|
template void WindowedHilbert::Output<float>(float* output, float lCoef, float rCoef) const;
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
template <> int16_t* LtRtProcessing::_getInBuf<int16_t>() { return m_16Buffer.get(); }
|
||||||
|
template <> int32_t* LtRtProcessing::_getInBuf<int32_t>() { return m_32Buffer.get(); }
|
||||||
|
template <> float* LtRtProcessing::_getInBuf<float>() { return m_fltBuffer.get(); }
|
||||||
|
|
||||||
|
template <> int16_t* LtRtProcessing::_getOutBuf<int16_t>() { return m_16Buffer.get() + m_outputOffset; }
|
||||||
|
template <> int32_t* LtRtProcessing::_getOutBuf<int32_t>() { return m_32Buffer.get() + m_outputOffset; }
|
||||||
|
template <> float* LtRtProcessing::_getOutBuf<float>() { return m_fltBuffer.get() + m_outputOffset; }
|
||||||
|
|
||||||
|
LtRtProcessing::LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo)
|
||||||
|
: m_inMixInfo(mixInfo), m_5msFrames(_5msFrames), m_5msFramesHalf(_5msFrames / 2),
|
||||||
|
m_outputOffset(m_5msFrames * 5 * 2), m_hilbertSL(_5msFrames), m_hilbertSR(_5msFrames)
|
||||||
|
{
|
||||||
|
m_inMixInfo.m_channels = AudioChannelSet::Surround51;
|
||||||
|
m_inMixInfo.m_channelMap.m_channelCount = 5;
|
||||||
|
m_inMixInfo.m_channelMap.m_channels[0] = AudioChannel::FrontLeft;
|
||||||
|
m_inMixInfo.m_channelMap.m_channels[1] = AudioChannel::FrontRight;
|
||||||
|
m_inMixInfo.m_channelMap.m_channels[2] = AudioChannel::FrontCenter;
|
||||||
|
m_inMixInfo.m_channelMap.m_channels[3] = AudioChannel::RearLeft;
|
||||||
|
m_inMixInfo.m_channelMap.m_channels[4] = AudioChannel::RearRight;
|
||||||
|
|
||||||
|
int samples = m_5msFrames * (5 * 2 + 2 * 2);
|
||||||
|
switch (mixInfo.m_sampleFormat)
|
||||||
|
{
|
||||||
|
case SOXR_INT16_I:
|
||||||
|
m_16Buffer.reset(new int16_t[samples]);
|
||||||
|
memset(m_16Buffer.get(), 0, sizeof(int16_t) * samples);
|
||||||
|
break;
|
||||||
|
case SOXR_INT32_I:
|
||||||
|
m_32Buffer.reset(new int32_t[samples]);
|
||||||
|
memset(m_32Buffer.get(), 0, sizeof(int32_t) * samples);
|
||||||
|
break;
|
||||||
|
case SOXR_FLOAT32_I:
|
||||||
|
m_fltBuffer.reset(new float[samples]);
|
||||||
|
memset(m_fltBuffer.get(), 0, sizeof(float) * samples);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void LtRtProcessing::Process(const T* input, T* output, int frameCount)
|
||||||
|
{
|
||||||
|
int outFramesRem = frameCount;
|
||||||
|
T* inBuf = _getInBuf<T>();
|
||||||
|
T* outBuf = _getOutBuf<T>();
|
||||||
|
int tail = std::min(m_5msFrames * 2, m_bufferTail + frameCount);
|
||||||
|
int samples = (tail - m_bufferTail) * 5;
|
||||||
|
memmove(&inBuf[m_bufferTail * 5], input, samples * sizeof(float));
|
||||||
|
input += samples;
|
||||||
|
frameCount -= tail - m_bufferTail;
|
||||||
|
|
||||||
|
int bufIdx = m_bufferTail / m_5msFrames;
|
||||||
|
if (tail / m_5msFrames > bufIdx)
|
||||||
|
{
|
||||||
|
T* in = &inBuf[bufIdx * m_5msFrames * 5];
|
||||||
|
T* out = &outBuf[bufIdx * m_5msFrames * 2];
|
||||||
|
m_hilbertSL.AddWindow(in + 3, 5);
|
||||||
|
m_hilbertSR.AddWindow(in + 4, 5);
|
||||||
|
|
||||||
|
// x(:,1) + sqrt(.5)*x(:,3) + sqrt(19/25)*x(:,4) + sqrt(6/25)*x(:,5)
|
||||||
|
// x(:,2) + sqrt(.5)*x(:,3) - sqrt(6/25)*x(:,4) - sqrt(19/25)*x(:,5)
|
||||||
|
if (bufIdx)
|
||||||
|
{
|
||||||
|
int delayI = -m_5msFramesHalf;
|
||||||
|
for (int i=0 ; i<m_5msFrames ; ++i, ++delayI)
|
||||||
|
{
|
||||||
|
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
|
||||||
|
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int delayI = m_5msFrames * 2 - m_5msFramesHalf;
|
||||||
|
int i;
|
||||||
|
for (i=0 ; i<m_5msFramesHalf ; ++i, ++delayI)
|
||||||
|
{
|
||||||
|
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
|
||||||
|
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
|
||||||
|
}
|
||||||
|
delayI = 0;
|
||||||
|
for (; i<m_5msFrames ; ++i, ++delayI)
|
||||||
|
{
|
||||||
|
out[i * 2] = ClampFull<T>(in[delayI * 5] + 0.7071068f * in[delayI * 5 + 2]);
|
||||||
|
out[i * 2 + 1] = ClampFull<T>(in[delayI * 5 + 1] + 0.7071068f * in[delayI * 5 + 2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#if INTEL_IPP
|
||||||
|
m_hilbertSL.Output(out, 0.8717798f, 0.4898979f);
|
||||||
|
m_hilbertSR.Output(out, -0.4898979f, -0.8717798f);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
m_bufferTail = tail;
|
||||||
|
|
||||||
|
if (frameCount)
|
||||||
|
{
|
||||||
|
samples = frameCount * 5;
|
||||||
|
memmove(inBuf, input, samples * sizeof(float));
|
||||||
|
m_bufferTail = frameCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
int head = std::min(m_5msFrames * 2, m_bufferHead + outFramesRem);
|
||||||
|
samples = (head - m_bufferHead) * 2;
|
||||||
|
memmove(output, outBuf + m_bufferHead * 2, samples * sizeof(float));
|
||||||
|
output += samples;
|
||||||
|
outFramesRem -= head - m_bufferHead;
|
||||||
|
m_bufferHead = head;
|
||||||
|
if (outFramesRem)
|
||||||
|
{
|
||||||
|
samples = outFramesRem * 2;
|
||||||
|
memmove(output, outBuf, samples * sizeof(float));
|
||||||
|
m_bufferHead = outFramesRem;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void LtRtProcessing::Process<int16_t>(const int16_t* input, int16_t* output, int frameCount);
|
||||||
|
template void LtRtProcessing::Process<int32_t>(const int32_t* input, int32_t* output, int frameCount);
|
||||||
|
template void LtRtProcessing::Process<float>(const float* input, float* output, int frameCount);
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
#ifndef BOO_LTRTPROCESSING_HPP
|
||||||
|
#define BOO_LTRTPROCESSING_HPP
|
||||||
|
|
||||||
|
#include "boo/System.hpp"
|
||||||
|
#include "boo/audiodev/IAudioVoice.hpp"
|
||||||
|
#include "Common.hpp"
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#if INTEL_IPP
|
||||||
|
#include "ipp.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace boo
|
||||||
|
{
|
||||||
|
|
||||||
|
#if INTEL_IPP
|
||||||
|
class WindowedHilbert
|
||||||
|
{
|
||||||
|
IppsHilbertSpec* m_spec;
|
||||||
|
Ipp8u* m_buffer;
|
||||||
|
int m_windowSamples, m_halfSamples;
|
||||||
|
int m_bufIdx = 0;
|
||||||
|
int m_bufferTail = 0;
|
||||||
|
std::unique_ptr<Ipp32f[]> m_inputBuf;
|
||||||
|
std::unique_ptr<Ipp32fc[]> m_outputBuf;
|
||||||
|
Ipp32fc* m_output[4];
|
||||||
|
std::unique_ptr<Ipp32f[]> m_hammingTable;
|
||||||
|
void _AddWindow();
|
||||||
|
public:
|
||||||
|
explicit WindowedHilbert(int windowSamples);
|
||||||
|
~WindowedHilbert();
|
||||||
|
void AddWindow(const float* input, int stride);
|
||||||
|
void AddWindow(const int32_t* input, int stride);
|
||||||
|
void AddWindow(const int16_t* input, int stride);
|
||||||
|
template <typename T>
|
||||||
|
void Output(T* output, float lCoef, float rCoef) const;
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
class LtRtProcessing
|
||||||
|
{
|
||||||
|
AudioVoiceEngineMixInfo m_inMixInfo;
|
||||||
|
int m_5msFrames;
|
||||||
|
int m_5msFramesHalf;
|
||||||
|
int m_outputOffset;
|
||||||
|
int m_bufferTail = 0;
|
||||||
|
int m_bufferHead = 0;
|
||||||
|
std::unique_ptr<int16_t[]> m_16Buffer;
|
||||||
|
std::unique_ptr<int32_t[]> m_32Buffer;
|
||||||
|
std::unique_ptr<float[]> m_fltBuffer;
|
||||||
|
#if INTEL_IPP
|
||||||
|
WindowedHilbert m_hilbertSL, m_hilbertSR;
|
||||||
|
#endif
|
||||||
|
template <typename T> T* _getInBuf();
|
||||||
|
template <typename T> T* _getOutBuf();
|
||||||
|
public:
|
||||||
|
LtRtProcessing(int _5msFrames, const AudioVoiceEngineMixInfo& mixInfo);
|
||||||
|
template <typename T>
|
||||||
|
void Process(const T* input, T* output, int frameCount);
|
||||||
|
const AudioVoiceEngineMixInfo& inMixInfo() const { return m_inMixInfo; }
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BOO_LTRTPROCESSING_HPP
|
Loading…
Reference in New Issue