audio: share decoded clip cache across sources
This commit is contained in:
@@ -1,16 +1,153 @@
|
||||
#include <XCEngine/Components/AudioSourceComponent.h>
|
||||
#include <XCEngine/Audio/AudioSystem.h>
|
||||
#include <XCEngine/Core/Asset/ResourceManager.h>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <sstream>
|
||||
|
||||
namespace XCEngine {
|
||||
namespace Components {
|
||||
|
||||
AudioSourceComponent::AudioSourceComponent()
|
||||
: m_outputBuffer(BufferSize * 2, 0.0f)
|
||||
{
|
||||
namespace {
|
||||
|
||||
std::string ToStdString(const Containers::String& value) {
|
||||
return std::string(value.CStr());
|
||||
}
|
||||
|
||||
bool HasVirtualPathScheme(const std::string& path) {
|
||||
return path.find("://") != std::string::npos;
|
||||
}
|
||||
|
||||
std::string EncodeAssetRef(const Resources::AssetRef& assetRef) {
|
||||
if (!assetRef.IsValid()) {
|
||||
return std::string();
|
||||
}
|
||||
|
||||
return ToStdString(assetRef.assetGuid.ToString()) + "," +
|
||||
std::to_string(assetRef.localID) + "," +
|
||||
std::to_string(static_cast<int>(assetRef.resourceType));
|
||||
}
|
||||
|
||||
bool TryDecodeAssetRef(const std::string& value, Resources::AssetRef& outRef) {
|
||||
const size_t firstComma = value.find(',');
|
||||
const size_t secondComma =
|
||||
firstComma == std::string::npos ? std::string::npos : value.find(',', firstComma + 1);
|
||||
if (firstComma == std::string::npos || secondComma == std::string::npos) {
|
||||
return false;
|
||||
}
|
||||
|
||||
outRef.assetGuid = Resources::AssetGUID::ParseOrDefault(
|
||||
Containers::String(value.substr(0, firstComma).c_str()));
|
||||
outRef.localID = static_cast<Resources::LocalID>(std::stoull(
|
||||
value.substr(firstComma + 1, secondComma - firstComma - 1)));
|
||||
outRef.resourceType = static_cast<Resources::ResourceType>(std::stoi(
|
||||
value.substr(secondComma + 1)));
|
||||
return outRef.IsValid();
|
||||
}
|
||||
|
||||
double WrapFramePosition(double framePosition, Audio::uint64 totalFrames) {
|
||||
if (totalFrames == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
const double totalFrameCount = static_cast<double>(totalFrames);
|
||||
double wrapped = std::fmod(framePosition, totalFrameCount);
|
||||
if (wrapped < 0.0) {
|
||||
wrapped += totalFrameCount;
|
||||
}
|
||||
return wrapped;
|
||||
}
|
||||
|
||||
float ReadDecodedSample(const std::vector<float>& decodedData,
|
||||
Audio::uint64 totalFrames,
|
||||
Audio::uint32 clipChannels,
|
||||
Audio::int64 frameIndex,
|
||||
Audio::uint32 channel,
|
||||
bool looping) {
|
||||
if (decodedData.empty() || totalFrames == 0 || clipChannels == 0) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
Audio::int64 resolvedFrame = frameIndex;
|
||||
if (looping) {
|
||||
const Audio::int64 frameCount = static_cast<Audio::int64>(totalFrames);
|
||||
resolvedFrame %= frameCount;
|
||||
if (resolvedFrame < 0) {
|
||||
resolvedFrame += frameCount;
|
||||
}
|
||||
} else if (resolvedFrame < 0 || resolvedFrame >= static_cast<Audio::int64>(totalFrames)) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
const Audio::uint32 clipChannel = std::min(channel, clipChannels - 1);
|
||||
const Audio::uint64 decodedIndex =
|
||||
static_cast<Audio::uint64>(resolvedFrame) * clipChannels + clipChannel;
|
||||
return decodedData[decodedIndex];
|
||||
}
|
||||
|
||||
float SampleDecodedChannel(const std::vector<float>& decodedData,
|
||||
Audio::uint64 totalFrames,
|
||||
Audio::uint32 clipChannels,
|
||||
double framePosition,
|
||||
Audio::uint32 channel,
|
||||
bool looping) {
|
||||
if (decodedData.empty() || totalFrames == 0 || clipChannels == 0) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
double samplePosition = framePosition;
|
||||
if (looping) {
|
||||
samplePosition = WrapFramePosition(samplePosition, totalFrames);
|
||||
} else {
|
||||
const double maxFrame = static_cast<double>(totalFrames - 1);
|
||||
samplePosition = std::clamp(samplePosition, 0.0, maxFrame);
|
||||
}
|
||||
|
||||
const Audio::int64 frame0 = static_cast<Audio::int64>(std::floor(samplePosition));
|
||||
const Audio::int64 frame1 = frame0 + 1;
|
||||
const float t = static_cast<float>(samplePosition - static_cast<double>(frame0));
|
||||
|
||||
const float sample0 = ReadDecodedSample(decodedData, totalFrames, clipChannels, frame0, channel, looping);
|
||||
const float sample1 = looping
|
||||
? ReadDecodedSample(decodedData, totalFrames, clipChannels, frame1, channel, true)
|
||||
: ReadDecodedSample(decodedData, totalFrames, clipChannels,
|
||||
std::min<Audio::int64>(frame1, static_cast<Audio::int64>(totalFrames - 1)),
|
||||
channel, false);
|
||||
return sample0 + (sample1 - sample0) * t;
|
||||
}
|
||||
|
||||
void ComputePanGains(float pan, float& leftGain, float& rightGain) {
|
||||
const float clampedPan = std::clamp(pan, -1.0f, 1.0f);
|
||||
leftGain = (clampedPan > 0.0f) ? (1.0f - clampedPan) : 1.0f;
|
||||
rightGain = (clampedPan < 0.0f) ? (1.0f + clampedPan) : 1.0f;
|
||||
}
|
||||
|
||||
void ApplyStereoSpread(float leftIn, float rightIn, float spread, float& leftOut, float& rightOut) {
|
||||
const float clampedSpread = std::clamp(spread, 0.0f, 1.0f);
|
||||
const float mono = (leftIn + rightIn) * 0.5f;
|
||||
leftOut = mono + (leftIn - mono) * clampedSpread;
|
||||
rightOut = mono + (rightIn - mono) * clampedSpread;
|
||||
}
|
||||
|
||||
void ApplyPanToBuffer(float* buffer, Audio::uint32 frameCount, Audio::uint32 channels, float pan) {
|
||||
if (buffer == nullptr || channels < 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
float leftGain = 1.0f;
|
||||
float rightGain = 1.0f;
|
||||
ComputePanGains(pan, leftGain, rightGain);
|
||||
for (Audio::uint32 frame = 0; frame < frameCount; ++frame) {
|
||||
buffer[frame * channels] *= leftGain;
|
||||
buffer[frame * channels + 1] *= rightGain;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AudioSourceComponent::AudioSourceComponent() = default;
|
||||
|
||||
AudioSourceComponent::~AudioSourceComponent() {
|
||||
if (m_playState == Audio::PlayState::Playing) {
|
||||
Audio::AudioSystem::Get().UnregisterSource(this);
|
||||
@@ -22,13 +159,28 @@ void AudioSourceComponent::Play() {
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_playState == Audio::PlayState::Playing) {
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
m_lastingTime = 0.0;
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
m_hasLastPosition = false;
|
||||
m_hrtf.ResetState();
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_playState == Audio::PlayState::Paused) {
|
||||
m_playState = Audio::PlayState::Playing;
|
||||
Audio::AudioSystem::Get().RegisterSource(this);
|
||||
return;
|
||||
}
|
||||
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
m_lastingTime = 0.0;
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
m_hasLastPosition = false;
|
||||
m_hrtf.ResetState();
|
||||
m_playState = Audio::PlayState::Playing;
|
||||
Audio::AudioSystem::Get().RegisterSource(this);
|
||||
}
|
||||
@@ -44,18 +196,78 @@ void AudioSourceComponent::Stop(Audio::StopMode mode) {
|
||||
if (m_playState != Audio::PlayState::Stopped) {
|
||||
m_playState = Audio::PlayState::Stopped;
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
m_lastingTime = 0.0;
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
m_hasLastPosition = false;
|
||||
m_hrtf.ResetState();
|
||||
Audio::AudioSystem::Get().UnregisterSource(this);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetClip(Resources::AudioClip* clip) {
|
||||
m_clipHandle = Resources::ResourceHandle<Resources::AudioClip>(clip);
|
||||
m_clip = clip;
|
||||
m_isDecoded = false;
|
||||
m_clipPath.clear();
|
||||
m_clipRef.Reset();
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
m_lastingTime = 0.0;
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
m_hasLastPosition = false;
|
||||
m_hrtf.ResetState();
|
||||
if (clip && clip->IsValid()) {
|
||||
DecodeAudioData();
|
||||
const std::string clipPath = ToStdString(clip->GetPath());
|
||||
if (!clipPath.empty()) {
|
||||
m_clipPath = clipPath;
|
||||
if (!Resources::ResourceManager::Get().TryGetAssetRef(
|
||||
m_clipPath.c_str(),
|
||||
Resources::ResourceType::AudioClip,
|
||||
m_clipRef)) {
|
||||
m_clipRef.Reset();
|
||||
}
|
||||
}
|
||||
static_cast<void>(m_clip->GetDecodedPCMData());
|
||||
}
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetClipPath(const std::string& clipPath) {
|
||||
m_clipRef.Reset();
|
||||
m_clipPath = clipPath;
|
||||
if (!m_clipPath.empty() &&
|
||||
!Resources::ResourceManager::Get().TryGetAssetRef(
|
||||
m_clipPath.c_str(),
|
||||
Resources::ResourceType::AudioClip,
|
||||
m_clipRef)) {
|
||||
m_clipRef.Reset();
|
||||
}
|
||||
|
||||
m_clipHandle.Reset();
|
||||
m_clip = nullptr;
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
m_lastingTime = 0.0;
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
m_hasLastPosition = false;
|
||||
m_hrtf.ResetState();
|
||||
|
||||
if (m_clipPath.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
m_clipHandle = Resources::ResourceManager::Get().Load<Resources::AudioClip>(m_clipPath.c_str());
|
||||
m_clip = m_clipHandle.Get();
|
||||
if (m_clip != nullptr && m_clip->IsValid()) {
|
||||
static_cast<void>(m_clip->GetDecodedPCMData());
|
||||
}
|
||||
}
|
||||
|
||||
void AudioSourceComponent::ClearClip() {
|
||||
m_clipRef.Reset();
|
||||
m_clipPath.clear();
|
||||
SetClip(nullptr);
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetVolume(float volume) {
|
||||
m_volume = std::max(0.0f, std::min(1.0f, volume));
|
||||
}
|
||||
@@ -68,6 +280,21 @@ void AudioSourceComponent::SetPan(float pan) {
|
||||
m_pan = std::max(-1.0f, std::min(1.0f, pan));
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetHRTFEnabled(bool enabled) {
|
||||
m_useHRTF = enabled;
|
||||
if (!enabled) {
|
||||
m_hrtf.ResetState();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetHRTFCrossFeed(float crossFeed) {
|
||||
m_hrtf.SetCrossFeed(crossFeed);
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetHRTFQuality(Audio::uint32 level) {
|
||||
m_hrtf.SetQualityLevel(level);
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetLooping(bool loop) {
|
||||
m_isLooping = loop;
|
||||
}
|
||||
@@ -78,10 +305,17 @@ void AudioSourceComponent::SetSpatialize(bool spatialize) {
|
||||
|
||||
void AudioSourceComponent::Set3DParams(const Audio::Audio3DParams& params) {
|
||||
m_3DParams = params;
|
||||
m_3DParams.dopplerLevel = std::max(0.0f, m_3DParams.dopplerLevel);
|
||||
m_3DParams.speedOfSound = std::max(1.0f, m_3DParams.speedOfSound);
|
||||
m_3DParams.minDistance = std::max(0.0f, m_3DParams.minDistance);
|
||||
m_3DParams.maxDistance = std::max(m_3DParams.minDistance, m_3DParams.maxDistance);
|
||||
m_3DParams.panLevel = std::clamp(m_3DParams.panLevel, 0.0f, 1.0f);
|
||||
m_3DParams.spread = std::clamp(m_3DParams.spread, 0.0f, 1.0f);
|
||||
m_3DParams.reverbZoneMix = std::clamp(m_3DParams.reverbZoneMix, 0.0f, 1.0f);
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetDopplerLevel(float level) {
|
||||
m_3DParams.dopplerLevel = level;
|
||||
m_3DParams.dopplerLevel = std::max(0.0f, level);
|
||||
}
|
||||
|
||||
void AudioSourceComponent::SetSpread(float spread) {
|
||||
@@ -101,11 +335,24 @@ void AudioSourceComponent::SetTime(float seconds) {
|
||||
return;
|
||||
}
|
||||
|
||||
Audio::uint32 sampleRate = m_clip->GetSampleRate();
|
||||
Audio::uint32 channels = m_clip->GetChannels();
|
||||
Audio::uint64 sampleOffset = static_cast<Audio::uint64>(seconds * sampleRate * channels);
|
||||
m_samplePosition = sampleOffset;
|
||||
m_lastingTime = seconds;
|
||||
const Audio::uint32 sampleRate = m_clip->GetSampleRate();
|
||||
if (sampleRate == 0) {
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
m_lastingTime = 0.0;
|
||||
return;
|
||||
}
|
||||
|
||||
const double frameOffset = static_cast<double>(std::max(0.0f, seconds)) * sampleRate;
|
||||
const Audio::uint64 totalFrames = m_clip->GetFrameCount();
|
||||
if (totalFrames > 0) {
|
||||
m_playbackPosition = std::min(frameOffset, static_cast<double>(totalFrames));
|
||||
m_samplePosition = static_cast<Audio::uint64>(m_playbackPosition);
|
||||
} else {
|
||||
m_samplePosition = 0;
|
||||
m_playbackPosition = 0.0;
|
||||
}
|
||||
m_lastingTime = m_playbackPosition / static_cast<double>(sampleRate);
|
||||
}
|
||||
|
||||
float AudioSourceComponent::GetTime() const {
|
||||
@@ -129,26 +376,26 @@ void AudioSourceComponent::StopEnergyDetect() {
|
||||
}
|
||||
|
||||
void AudioSourceComponent::Update(float deltaTime) {
|
||||
if (m_gameObject) {
|
||||
const Math::Vector3 position = transform().GetPosition();
|
||||
if (m_hasLastPosition && deltaTime > 0.0f) {
|
||||
m_velocity = (position - m_lastPosition) / deltaTime;
|
||||
} else {
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
}
|
||||
m_lastPosition = position;
|
||||
m_hasLastPosition = true;
|
||||
} else {
|
||||
m_velocity = Math::Vector3::Zero();
|
||||
m_hasLastPosition = false;
|
||||
}
|
||||
|
||||
if (m_playState != Audio::PlayState::Playing || !m_clip) {
|
||||
return;
|
||||
}
|
||||
|
||||
m_lastingTime += deltaTime * m_pitch;
|
||||
|
||||
Audio::uint32 channels = m_clip->GetChannels();
|
||||
Audio::uint32 sampleRate = m_clip->GetSampleRate();
|
||||
Audio::uint64 samplesPerSecond = sampleRate * channels;
|
||||
Audio::uint64 samplesToAdvance = static_cast<Audio::uint64>(deltaTime * m_pitch * samplesPerSecond);
|
||||
m_samplePosition += samplesToAdvance;
|
||||
|
||||
Audio::uint64 totalSamples = static_cast<Audio::uint64>(m_clip->GetAudioData().Size()) / (m_clip->GetBitsPerSample() / 8);
|
||||
|
||||
if (m_samplePosition >= totalSamples) {
|
||||
if (m_isLooping) {
|
||||
m_samplePosition = m_samplePosition % totalSamples;
|
||||
} else {
|
||||
Stop();
|
||||
}
|
||||
if (m_clip->GetSampleRate() > 0) {
|
||||
m_lastingTime = m_playbackPosition / static_cast<double>(m_clip->GetSampleRate());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,138 +415,431 @@ void AudioSourceComponent::OnDestroy() {
|
||||
Stop();
|
||||
}
|
||||
|
||||
void AudioSourceComponent::DecodeAudioData() {
|
||||
if (!m_clip || !m_clip->IsValid()) {
|
||||
return;
|
||||
void AudioSourceComponent::Serialize(std::ostream& os) const {
|
||||
Resources::AssetRef serializedClipRef = m_clipRef;
|
||||
std::string serializedClipPath = m_clipPath;
|
||||
if (serializedClipPath.empty() && m_clip != nullptr) {
|
||||
serializedClipPath = ToStdString(m_clip->GetPath());
|
||||
}
|
||||
|
||||
if (m_isDecoded) {
|
||||
return;
|
||||
if (!serializedClipRef.IsValid() &&
|
||||
!serializedClipPath.empty() &&
|
||||
!HasVirtualPathScheme(serializedClipPath) &&
|
||||
Resources::ResourceManager::Get().TryGetAssetRef(
|
||||
serializedClipPath.c_str(),
|
||||
Resources::ResourceType::AudioClip,
|
||||
serializedClipRef)) {
|
||||
}
|
||||
|
||||
const auto& audioData = m_clip->GetAudioData();
|
||||
if (audioData.Empty()) {
|
||||
return;
|
||||
if (serializedClipRef.IsValid() || !HasVirtualPathScheme(serializedClipPath)) {
|
||||
serializedClipPath.clear();
|
||||
}
|
||||
|
||||
Audio::uint32 channels = m_clip->GetChannels();
|
||||
Audio::uint32 bitsPerSample = m_clip->GetBitsPerSample();
|
||||
uint32_t bytesPerSample = bitsPerSample / 8;
|
||||
uint32_t totalSamples = static_cast<uint32_t>(audioData.Size()) / bytesPerSample;
|
||||
|
||||
m_decodedData.resize(totalSamples);
|
||||
|
||||
const uint8_t* rawData = audioData.Data();
|
||||
|
||||
if (bitsPerSample == 16) {
|
||||
const int16_t* samples16 = reinterpret_cast<const int16_t*>(rawData);
|
||||
for (uint32_t i = 0; i < totalSamples; ++i) {
|
||||
m_decodedData[i] = samples16[i] / 32768.0f;
|
||||
}
|
||||
} else if (bitsPerSample == 8) {
|
||||
for (uint32_t i = 0; i < totalSamples; ++i) {
|
||||
m_decodedData[i] = (rawData[i] - 128) / 128.0f;
|
||||
}
|
||||
} else if (bitsPerSample == 24) {
|
||||
for (uint32_t i = 0; i < totalSamples; ++i) {
|
||||
int32_t sample = (rawData[i * 3] | (rawData[i * 3 + 1] << 8) | (rawData[i * 3 + 2] << 16));
|
||||
if (sample & 0x800000) {
|
||||
sample |= 0xFF000000;
|
||||
}
|
||||
m_decodedData[i] = sample / 8388608.0f;
|
||||
}
|
||||
} else if (bitsPerSample == 32) {
|
||||
const int32_t* samples32 = reinterpret_cast<const int32_t*>(rawData);
|
||||
for (uint32_t i = 0; i < totalSamples; ++i) {
|
||||
m_decodedData[i] = samples32[i] / 2147483648.0f;
|
||||
}
|
||||
}
|
||||
|
||||
m_isDecoded = true;
|
||||
os << "clipPath=" << serializedClipPath << ";";
|
||||
os << "clipRef=" << EncodeAssetRef(serializedClipRef) << ";";
|
||||
os << "volume=" << m_volume << ";";
|
||||
os << "pitch=" << m_pitch << ";";
|
||||
os << "pan=" << m_pan << ";";
|
||||
os << "looping=" << (m_isLooping ? 1 : 0) << ";";
|
||||
os << "spatialize=" << (m_spatialize ? 1 : 0) << ";";
|
||||
os << "hrtfEnabled=" << (m_useHRTF ? 1 : 0) << ";";
|
||||
os << "hrtfCrossFeed=" << m_hrtf.GetCrossFeed() << ";";
|
||||
os << "hrtfQuality=" << m_hrtf.GetQualityLevel() << ";";
|
||||
os << "dopplerLevel=" << m_3DParams.dopplerLevel << ";";
|
||||
os << "speedOfSound=" << m_3DParams.speedOfSound << ";";
|
||||
os << "minDistance=" << m_3DParams.minDistance << ";";
|
||||
os << "maxDistance=" << m_3DParams.maxDistance << ";";
|
||||
os << "panLevel=" << m_3DParams.panLevel << ";";
|
||||
os << "spread=" << m_3DParams.spread << ";";
|
||||
os << "reverbZoneMix=" << m_3DParams.reverbZoneMix << ";";
|
||||
}
|
||||
|
||||
void AudioSourceComponent::ProcessAudio(float* buffer, Audio::uint32 sampleCount, Audio::uint32 channels,
|
||||
const Math::Vector3& listenerPosition,
|
||||
const Math::Quaternion& listenerRotation) {
|
||||
if (m_playState != Audio::PlayState::Playing || !m_clip || !m_isDecoded) {
|
||||
return;
|
||||
}
|
||||
void AudioSourceComponent::Deserialize(std::istream& is) {
|
||||
ClearClip();
|
||||
SetVolume(1.0f);
|
||||
SetPitch(1.0f);
|
||||
SetPan(0.0f);
|
||||
SetLooping(false);
|
||||
SetSpatialize(true);
|
||||
SetHRTFEnabled(false);
|
||||
SetHRTFCrossFeed(0.0f);
|
||||
SetHRTFQuality(2);
|
||||
Set3DParams(Audio::Audio3DParams());
|
||||
|
||||
if (channels == 0 || sampleCount == 0) {
|
||||
return;
|
||||
}
|
||||
std::string token;
|
||||
std::string pendingClipPath;
|
||||
Resources::AssetRef pendingClipRef;
|
||||
Audio::Audio3DParams params = m_3DParams;
|
||||
while (std::getline(is, token, ';')) {
|
||||
if (token.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (m_decodedData.empty()) {
|
||||
return;
|
||||
}
|
||||
const size_t eqPos = token.find('=');
|
||||
if (eqPos == std::string::npos) {
|
||||
continue;
|
||||
}
|
||||
|
||||
float volume = m_volume;
|
||||
if (m_spatialize) {
|
||||
Apply3DAttenuation(listenerPosition);
|
||||
volume *= m_volume;
|
||||
}
|
||||
const std::string key = token.substr(0, eqPos);
|
||||
const std::string value = token.substr(eqPos + 1);
|
||||
|
||||
Audio::uint32 clipChannels = m_clip->GetChannels();
|
||||
Audio::uint64 totalSamples = static_cast<Audio::uint64>(m_decodedData.size());
|
||||
Audio::uint64 samplesPerFrame = sampleCount * channels;
|
||||
|
||||
for (Audio::uint32 i = 0; i < sampleCount; ++i) {
|
||||
for (Audio::uint32 ch = 0; ch < channels; ++ch) {
|
||||
Audio::uint64 outputIndex = m_samplePosition + i * channels + ch;
|
||||
|
||||
if (outputIndex >= totalSamples) {
|
||||
if (m_isLooping && totalSamples > 0) {
|
||||
outputIndex = outputIndex % totalSamples;
|
||||
} else {
|
||||
buffer[i * channels + ch] += 0.0f;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Audio::uint64 decodedChannel = (ch < clipChannels) ? ch : (clipChannels - 1);
|
||||
Audio::uint64 decodedIndex = (outputIndex / channels) * clipChannels + decodedChannel;
|
||||
float sample = m_decodedData[decodedIndex];
|
||||
|
||||
buffer[i * channels + ch] += sample * volume;
|
||||
if (key == "clipPath") {
|
||||
pendingClipPath = value;
|
||||
} else if (key == "clipRef") {
|
||||
TryDecodeAssetRef(value, pendingClipRef);
|
||||
} else if (key == "volume") {
|
||||
SetVolume(std::stof(value));
|
||||
} else if (key == "pitch") {
|
||||
SetPitch(std::stof(value));
|
||||
} else if (key == "pan") {
|
||||
SetPan(std::stof(value));
|
||||
} else if (key == "looping") {
|
||||
SetLooping(std::stoi(value) != 0);
|
||||
} else if (key == "spatialize") {
|
||||
SetSpatialize(std::stoi(value) != 0);
|
||||
} else if (key == "hrtfEnabled") {
|
||||
SetHRTFEnabled(std::stoi(value) != 0);
|
||||
} else if (key == "hrtfCrossFeed") {
|
||||
SetHRTFCrossFeed(std::stof(value));
|
||||
} else if (key == "hrtfQuality") {
|
||||
SetHRTFQuality(static_cast<Audio::uint32>(std::stoul(value)));
|
||||
} else if (key == "dopplerLevel") {
|
||||
params.dopplerLevel = std::stof(value);
|
||||
} else if (key == "speedOfSound") {
|
||||
params.speedOfSound = std::stof(value);
|
||||
} else if (key == "minDistance") {
|
||||
params.minDistance = std::stof(value);
|
||||
} else if (key == "maxDistance") {
|
||||
params.maxDistance = std::stof(value);
|
||||
} else if (key == "panLevel") {
|
||||
params.panLevel = std::stof(value);
|
||||
} else if (key == "spread") {
|
||||
params.spread = std::stof(value);
|
||||
} else if (key == "reverbZoneMix") {
|
||||
params.reverbZoneMix = std::stof(value);
|
||||
}
|
||||
}
|
||||
|
||||
m_samplePosition += samplesPerFrame;
|
||||
Set3DParams(params);
|
||||
|
||||
if (m_samplePosition >= totalSamples) {
|
||||
if (pendingClipRef.IsValid()) {
|
||||
m_clipRef = pendingClipRef;
|
||||
m_clipHandle = Resources::ResourceManager::Get().Load<Resources::AudioClip>(pendingClipRef);
|
||||
m_clip = m_clipHandle.Get();
|
||||
if (m_clip != nullptr) {
|
||||
m_clipPath = ToStdString(m_clip->GetPath());
|
||||
if (m_clip->IsValid()) {
|
||||
static_cast<void>(m_clip->GetDecodedPCMData());
|
||||
}
|
||||
} else {
|
||||
Containers::String resolvedPath;
|
||||
if (Resources::ResourceManager::Get().TryResolveAssetPath(pendingClipRef, resolvedPath)) {
|
||||
SetClipPath(ToStdString(resolvedPath));
|
||||
m_clipRef = pendingClipRef;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (m_clip == nullptr && !pendingClipPath.empty() && HasVirtualPathScheme(pendingClipPath)) {
|
||||
SetClipPath(pendingClipPath);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioSourceComponent::ProcessAudio(float* buffer, Audio::uint32 frameCount, Audio::uint32 channels,
|
||||
const Math::Vector3& listenerPosition,
|
||||
const Math::Quaternion& listenerRotation,
|
||||
const Math::Vector3& listenerVelocity,
|
||||
float listenerDopplerLevel,
|
||||
float speedOfSound,
|
||||
Audio::uint32 outputSampleRate) {
|
||||
if (m_playState != Audio::PlayState::Playing || !m_clip) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (channels == 0 || frameCount == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& decodedData = m_clip->GetDecodedPCMData();
|
||||
if (decodedData.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const float attenuation = m_spatialize ? Compute3DAttenuation(listenerPosition) : 1.0f;
|
||||
const float volume = m_volume * attenuation;
|
||||
if (volume <= 0.0f) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Audio::uint32 clipChannels = m_clip->GetChannels();
|
||||
if (clipChannels == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Audio::uint64 totalFrames = static_cast<Audio::uint64>(decodedData.size()) / clipChannels;
|
||||
if (totalFrames == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Audio::uint32 clipSampleRate = m_clip->GetSampleRate();
|
||||
if (clipSampleRate == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Audio::uint32 mixSampleRate = outputSampleRate > 0 ? outputSampleRate : clipSampleRate;
|
||||
if (mixSampleRate == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
double playbackRate =
|
||||
(static_cast<double>(clipSampleRate) / static_cast<double>(mixSampleRate)) *
|
||||
static_cast<double>(m_pitch);
|
||||
playbackRate *= ComputeDopplerFactor(
|
||||
listenerPosition,
|
||||
listenerVelocity,
|
||||
listenerDopplerLevel,
|
||||
speedOfSound);
|
||||
|
||||
playbackRate = std::max(playbackRate, 0.0);
|
||||
|
||||
const bool useHRTF =
|
||||
m_useHRTF && m_spatialize && m_gameObject && clipChannels == 1 && channels >= 2;
|
||||
float spatialPan = 0.0f;
|
||||
if (!useHRTF && m_spatialize) {
|
||||
spatialPan = ComputeSpatialPan(listenerPosition, listenerRotation);
|
||||
}
|
||||
const float combinedPan = std::clamp(m_pan + spatialPan, -1.0f, 1.0f);
|
||||
float leftPanGain = 1.0f;
|
||||
float rightPanGain = 1.0f;
|
||||
ComputePanGains(combinedPan, leftPanGain, rightPanGain);
|
||||
|
||||
double playbackPosition = m_playbackPosition;
|
||||
bool reachedClipEnd = false;
|
||||
Audio::uint32 renderedFrameCount = 0;
|
||||
|
||||
for (Audio::uint32 i = 0; i < frameCount; ++i) {
|
||||
if (!m_isLooping && playbackPosition >= static_cast<double>(totalFrames)) {
|
||||
reachedClipEnd = true;
|
||||
break;
|
||||
}
|
||||
|
||||
const double sourceFrame = m_isLooping
|
||||
? WrapFramePosition(playbackPosition, totalFrames)
|
||||
: playbackPosition;
|
||||
|
||||
if (channels >= 2) {
|
||||
if (clipChannels == 1) {
|
||||
const float sample = SampleDecodedChannel(
|
||||
decodedData,
|
||||
totalFrames,
|
||||
clipChannels,
|
||||
sourceFrame,
|
||||
0,
|
||||
m_isLooping);
|
||||
if (useHRTF) {
|
||||
buffer[i * channels] += sample * volume;
|
||||
buffer[i * channels + 1] += sample * volume;
|
||||
} else {
|
||||
buffer[i * channels] += sample * volume * leftPanGain;
|
||||
buffer[i * channels + 1] += sample * volume * rightPanGain;
|
||||
}
|
||||
|
||||
for (Audio::uint32 ch = 2; ch < channels; ++ch) {
|
||||
buffer[i * channels + ch] += sample * volume;
|
||||
}
|
||||
} else {
|
||||
float leftSample = SampleDecodedChannel(
|
||||
decodedData,
|
||||
totalFrames,
|
||||
clipChannels,
|
||||
sourceFrame,
|
||||
0,
|
||||
m_isLooping);
|
||||
float rightSample = SampleDecodedChannel(
|
||||
decodedData,
|
||||
totalFrames,
|
||||
clipChannels,
|
||||
sourceFrame,
|
||||
1,
|
||||
m_isLooping);
|
||||
|
||||
ApplyStereoSpread(
|
||||
leftSample,
|
||||
rightSample,
|
||||
m_spatialize ? m_3DParams.spread : 1.0f,
|
||||
leftSample,
|
||||
rightSample);
|
||||
|
||||
float stereoLeftGain = leftPanGain;
|
||||
float stereoRightGain = rightPanGain;
|
||||
if (m_spatialize) {
|
||||
const float stereoPan = combinedPan * (1.0f - 0.5f * m_3DParams.spread);
|
||||
ComputePanGains(stereoPan, stereoLeftGain, stereoRightGain);
|
||||
}
|
||||
|
||||
buffer[i * channels] += leftSample * volume * stereoLeftGain;
|
||||
buffer[i * channels + 1] += rightSample * volume * stereoRightGain;
|
||||
|
||||
for (Audio::uint32 ch = 2; ch < channels; ++ch) {
|
||||
const float sample = SampleDecodedChannel(
|
||||
decodedData,
|
||||
totalFrames,
|
||||
clipChannels,
|
||||
sourceFrame,
|
||||
std::min(ch, clipChannels - 1),
|
||||
m_isLooping);
|
||||
buffer[i * channels + ch] += sample * volume;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (Audio::uint32 ch = 0; ch < channels; ++ch) {
|
||||
const float sample = SampleDecodedChannel(
|
||||
decodedData,
|
||||
totalFrames,
|
||||
clipChannels,
|
||||
sourceFrame,
|
||||
std::min(ch, clipChannels - 1),
|
||||
m_isLooping);
|
||||
buffer[i * channels + ch] += sample * volume;
|
||||
}
|
||||
}
|
||||
|
||||
renderedFrameCount++;
|
||||
|
||||
playbackPosition += playbackRate;
|
||||
if (m_isLooping && playbackPosition >= static_cast<double>(totalFrames)) {
|
||||
playbackPosition = WrapFramePosition(playbackPosition, totalFrames);
|
||||
} else if (!m_isLooping && playbackPosition >= static_cast<double>(totalFrames)) {
|
||||
reachedClipEnd = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (useHRTF && renderedFrameCount > 0) {
|
||||
m_hrtf.SetSampleRate(mixSampleRate);
|
||||
m_hrtf.SetSpeedOfSound(speedOfSound);
|
||||
m_hrtf.ProcessAudio(
|
||||
buffer,
|
||||
renderedFrameCount,
|
||||
channels,
|
||||
transform().GetPosition(),
|
||||
listenerPosition,
|
||||
listenerRotation);
|
||||
|
||||
if (std::abs(m_pan) > Math::EPSILON) {
|
||||
ApplyPanToBuffer(buffer, renderedFrameCount, channels, m_pan);
|
||||
}
|
||||
}
|
||||
|
||||
m_playbackPosition = playbackPosition;
|
||||
m_samplePosition = static_cast<Audio::uint64>(m_playbackPosition);
|
||||
m_lastingTime = m_playbackPosition / static_cast<double>(clipSampleRate);
|
||||
|
||||
if (reachedClipEnd) {
|
||||
if (m_isLooping) {
|
||||
m_samplePosition = m_samplePosition % totalSamples;
|
||||
m_playbackPosition = WrapFramePosition(m_playbackPosition, totalFrames);
|
||||
m_samplePosition = static_cast<Audio::uint64>(m_playbackPosition);
|
||||
m_lastingTime = m_playbackPosition / static_cast<double>(clipSampleRate);
|
||||
} else {
|
||||
Stop();
|
||||
}
|
||||
}
|
||||
|
||||
if (m_isEnergyDetecting) {
|
||||
UpdateEnergy(buffer, sampleCount * channels);
|
||||
UpdateEnergy(buffer, renderedFrameCount * channels);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioSourceComponent::Apply3DAttenuation(const Math::Vector3& listenerPosition) {
|
||||
float AudioSourceComponent::Compute3DAttenuation(const Math::Vector3& listenerPosition) const {
|
||||
if (!m_gameObject) {
|
||||
return;
|
||||
return 1.0f;
|
||||
}
|
||||
|
||||
Math::Vector3 sourcePosition = transform().GetPosition();
|
||||
Math::Vector3 direction = sourcePosition - listenerPosition;
|
||||
float distance = direction.Magnitude();
|
||||
const Math::Vector3 sourcePosition = transform().GetPosition();
|
||||
const Math::Vector3 direction = sourcePosition - listenerPosition;
|
||||
const float distance = direction.Magnitude();
|
||||
const float minDistance = std::max(0.0f, m_3DParams.minDistance);
|
||||
const float maxDistance = std::max(minDistance, m_3DParams.maxDistance);
|
||||
|
||||
if (distance > m_3DParams.maxDistance) {
|
||||
m_volume = 0.0f;
|
||||
return;
|
||||
if (distance <= minDistance) {
|
||||
return 1.0f;
|
||||
}
|
||||
|
||||
float normalizedDistance = distance / m_3DParams.maxDistance;
|
||||
if (distance >= maxDistance) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
float normalizedDistance = 0.0f;
|
||||
if (maxDistance > minDistance) {
|
||||
normalizedDistance = (distance - minDistance) / (maxDistance - minDistance);
|
||||
}
|
||||
normalizedDistance = std::max(0.0f, std::min(1.0f, normalizedDistance));
|
||||
|
||||
float attenuation = 1.0f - normalizedDistance;
|
||||
attenuation = std::pow(attenuation, 2.0f);
|
||||
return attenuation;
|
||||
}
|
||||
|
||||
m_volume *= attenuation;
|
||||
float AudioSourceComponent::ComputeSpatialPan(const Math::Vector3& listenerPosition,
|
||||
const Math::Quaternion& listenerRotation) const {
|
||||
if (!m_gameObject) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
const Math::Vector3 sourcePosition = transform().GetPosition();
|
||||
const Math::Vector3 toSource = sourcePosition - listenerPosition;
|
||||
const float distance = toSource.Magnitude();
|
||||
if (distance <= Math::EPSILON) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
const Math::Vector3 listenerSpaceDirection =
|
||||
listenerRotation.Inverse() * (toSource / distance);
|
||||
return std::clamp(listenerSpaceDirection.x, -1.0f, 1.0f) *
|
||||
std::clamp(m_3DParams.panLevel, 0.0f, 1.0f);
|
||||
}
|
||||
|
||||
double AudioSourceComponent::ComputeDopplerFactor(const Math::Vector3& listenerPosition,
|
||||
const Math::Vector3& listenerVelocity,
|
||||
float listenerDopplerLevel,
|
||||
float speedOfSound) const {
|
||||
if (!m_spatialize || !m_gameObject) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
const Math::Vector3 sourcePosition = transform().GetPosition();
|
||||
const Math::Vector3 toSource = sourcePosition - listenerPosition;
|
||||
const float distance = toSource.Magnitude();
|
||||
if (distance <= Math::EPSILON) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
const float dopplerScale =
|
||||
std::max(0.0f, listenerDopplerLevel) * std::max(0.0f, m_3DParams.dopplerLevel);
|
||||
if (dopplerScale <= 0.0f) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
const Math::Vector3 direction = toSource / distance;
|
||||
const float effectiveSpeedOfSound = std::max(1.0f, speedOfSound);
|
||||
const float velocityClamp = effectiveSpeedOfSound * 0.95f;
|
||||
const float listenerTowardSource = Math::Vector3::Dot(listenerVelocity, direction);
|
||||
const float sourceTowardListener = -Math::Vector3::Dot(m_velocity, direction);
|
||||
|
||||
const float scaledListenerVelocity = std::clamp(
|
||||
listenerTowardSource * dopplerScale,
|
||||
-velocityClamp,
|
||||
velocityClamp);
|
||||
const float scaledSourceVelocity = std::clamp(
|
||||
sourceTowardListener * dopplerScale,
|
||||
-velocityClamp,
|
||||
velocityClamp);
|
||||
|
||||
const double numerator = static_cast<double>(effectiveSpeedOfSound + scaledListenerVelocity);
|
||||
const double denominator = std::max(
|
||||
1.0,
|
||||
static_cast<double>(effectiveSpeedOfSound - scaledSourceVelocity));
|
||||
return std::clamp(numerator / denominator, 0.25, 4.0);
|
||||
}
|
||||
|
||||
void AudioSourceComponent::UpdateEnergy(const float* buffer, Audio::uint32 sampleCount) {
|
||||
|
||||
Reference in New Issue
Block a user