Add Music fluctuations project and Chinese plan docs

This commit is contained in:
2026-03-21 15:55:54 +08:00
parent 629455df07
commit a172d75e36
462 changed files with 382904 additions and 0 deletions

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,52 @@
#需求的最低cmake程序版本
cmake_minimum_required(VERSION 3.12)
#本工程的名字
project(Doom)
#本工程支持的C++版本
set(CMAKE_CXX_STANDARD 17)
file(GLOB resources "../res")
file(COPY ${resources} DESTINATION ${CMAKE_BINARY_DIR})
include_directories(./)
include_directories(./myflann/include)
add_subdirectory(window)
add_subdirectory(gpu)
add_subdirectory(game)
add_subdirectory(context)
add_subdirectory(application)
add_subdirectory(myinput)
add_subdirectory(scene)
add_subdirectory(renderer)
add_subdirectory(image)
add_subdirectory(time)
add_subdirectory(audio)
add_subdirectory(kissfft)
add_subdirectory(audio3d)
add_subdirectory(libsamplerate)
#本工程所有cpp文件编译链接生成exe
add_executable(doom "main.cpp")
target_link_libraries(doom
application
window
gpu
game
context
myinput
scene
renderer
image
time
audio
fft
audio3d
samplerate
#myflann/lib/flann.lib
)

View File

@@ -0,0 +1,34 @@
#include"Application.h"
#include<iostream>
#include <iomanip>
Application::Application(HINSTANCE p_hInstance) :
m_context(p_hInstance),
m_game(m_context)
{
}
Application::~Application()
{
}
void Application::Run()
{
m_game.OnEnter();
while (IsRunning()) {
float deltaTime = m_context.m_clock.Update();
m_game.PreUpdate();
m_game.Update(deltaTime);
m_game.PostUpdate();
}
m_game.OnExit();
return;
}
bool Application::IsRunning()
{
return !m_context.m_window.shouldClose();
}

View File

@@ -0,0 +1,17 @@
#pragma once
#include "../global/Base.h"
#include"../context/Context.h"
#include"../game/Game.h"
#include<Windows.h>
class Application
{
public:
Application(HINSTANCE p_hInstance);
~Application();
void Run();
bool IsRunning();
public:
Game m_game;
Context m_context;
};

View File

@@ -0,0 +1,4 @@
file(GLOB_RECURSE APPLICATION ./ *.cpp)
add_library(application ${APPLICATION})

View File

@@ -0,0 +1,13 @@
#include"AudioClip.h"
AudioClip::AudioClip(std::string p_fileName)
{
m_fileName = p_fileName;
}
AudioClip::~AudioClip(){}
std::string AudioClip::GetName()
{
return m_fileName;
}

View File

@@ -0,0 +1,11 @@
#pragma once
#include"../global/Base.h"
class AudioClip
{
public:
AudioClip(std::string p_fileName);
~AudioClip();
std::string GetName();
private:
std::string m_fileName;
};

View File

@@ -0,0 +1,4 @@
#pragma once
#define AUDIO_BUFFER_SIZE 8192
#define AUDIO_CHANNELS 2

View File

@@ -0,0 +1,164 @@
#include"AudioEngine.h"
AudioEngine::AudioEngine()
{
}
AudioEngine::~AudioEngine()
{
ExitAudioEngine();
delete[] m_audioBuffer1;
delete[] m_audioBuffer2;
}
void AudioEngine::InitAudioEngine()
{
InitWavFormat();
InitDevice();
InitWavHeader();
InitAudioThread();
}
void AudioEngine::ExitAudioEngine()
{
m_isRunning = false;
m_audioThread.join();
waveOutUnprepareHeader(m_hWaveOut, &m_waveHeader1, sizeof(WAVEHDR));
waveOutUnprepareHeader(m_hWaveOut, &m_waveHeader2, sizeof(WAVEHDR));
waveOutClose(m_hWaveOut);
}
void AudioEngine::SubmitSource(AudioSource* p_audioSource)
{
if (p_audioSource)
{
m_audioSource = p_audioSource;
}
}
void AudioEngine::Update(double p_deltaTime)
{
}
MMRESULT AudioEngine::PlayFrontData()
{
WAVEHDR* m_frontHeader = m_isBuffer1Front ? &m_waveHeader1 : &m_waveHeader2;
MMRESULT result = waveOutWrite(m_hWaveOut, m_frontHeader, sizeof(WAVEHDR));
if (result != MMSYSERR_NOERROR)
{
std::cout << "Failed to write audio data1" << std::endl;
return result;
}
return MMSYSERR_NOERROR;
}
void AudioEngine::PrepareBackData()
{
if (!m_audioSource || !m_audioSource->IsActive()) { return; }
m_audioSource->PrepareBufferData();
short* backBuffer = (m_isBuffer1Front ? m_audioBuffer2 : m_audioBuffer1);
memcpy(backBuffer, m_audioSource->GetBufferData(), sizeof(m_audioBuffer1));
}
void AudioEngine::SwapBuffer()
{
m_isBuffer1Front = !m_isBuffer1Front;
}
MMRESULT AudioEngine::InitAudioThread()
{
MMRESULT result = MMSYSERR_NOERROR;
m_isRunning = true;
m_audioThread = std::thread([&]() {
PlayFrontData();
SwapBuffer();
PlayFrontData();
});
if (!m_audioThread.joinable()) {
std::cout << "Failed to create audio thread" << std::endl;
result = MMSYSERR_ERROR;
}
return result;
}
void AudioEngine::InitWavFormat()
{
m_waveFormat.wFormatTag = WAVE_FORMAT_PCM;
m_waveFormat.nChannels = 2;
m_waveFormat.nSamplesPerSec = 44100;
m_waveFormat.nAvgBytesPerSec = 176400;
m_waveFormat.nBlockAlign = 4;
m_waveFormat.wBitsPerSample = 16;
m_waveFormat.cbSize = 0;
}
MMRESULT AudioEngine::InitDevice()
{
MMRESULT result;
/*音频设备枚举*/
WAVEOUTCAPS waveOutCaps;
for (UINT i = 0; i < waveOutGetNumDevs(); ++i)
{
waveOutGetDevCaps(i, &waveOutCaps, sizeof(WAVEOUTCAPS));
m_waveOutCaps.push_back(waveOutCaps);
std::cout << "Device #" << i << ": " << waveOutCaps.szPname << std::endl;
}
/*打开默认音频设备*/
//同时设置该设备的回调
result = waveOutOpen(&m_hWaveOut, WAVE_MAPPER, &m_waveFormat, (DWORD_PTR)&AudioEngine::StaticAudioCallback, reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION);
if (result != MMSYSERR_NOERROR)
{
std::cout << "Failed to open audio device" << std::endl;
return result;
}
return MMSYSERR_NOERROR;
}
void AudioEngine::OnAudioCallback(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2)
{
// 在这里处理音频事件
if (uMsg == WOM_DONE) {
PrepareBackData();
SwapBuffer();
PlayFrontData();
}
}
void CALLBACK AudioEngine::StaticAudioCallback(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2)
{
// 通过 dwInstance 恢复到实际的 AudioEngine 实例
AudioEngine* audioEngine = reinterpret_cast<AudioEngine*>(dwInstance);
if (audioEngine != nullptr) {
audioEngine->OnAudioCallback(hwo, uMsg, dwInstance, dwParam1, dwParam2);
}
}
MMRESULT AudioEngine::InitWavHeader()
{
MMRESULT result;
/*header1-initialize*/
m_waveHeader1.lpData = (LPSTR)m_audioBuffer1;
m_waveHeader1.dwBufferLength = sizeof(m_audioBuffer1);
m_waveHeader1.dwFlags = 0;
result = waveOutPrepareHeader(m_hWaveOut, &m_waveHeader1, sizeof(WAVEHDR));
if (result != MMSYSERR_NOERROR)
{
std::cout << "Failed to prepare audio header1" << std::endl;
return result;
}
/*header2-initialize*/
m_waveHeader2.lpData = (LPSTR)m_audioBuffer2;
m_waveHeader2.dwBufferLength = sizeof(m_audioBuffer2);
m_waveHeader2.dwFlags = 0;
result = waveOutPrepareHeader(m_hWaveOut, &m_waveHeader2, sizeof(WAVEHDR));
if (result != MMSYSERR_NOERROR)
{
std::cout << "Failed to prepare audio header2" << std::endl;
return result;
}
return MMSYSERR_NOERROR;
}

View File

@@ -0,0 +1,47 @@
#pragma once
#include <iostream>
#include <Windows.h>
#include <mmsystem.h>
#include <thread>
#include <chrono>
#include"AudioSource.h"
#include"AudioConfig.h"
#pragma comment(lib, "winmm.lib")
class AudioEngine
{
public:
AudioEngine();
~AudioEngine();
void Update(double p_deltaTime);
void InitAudioEngine();
void ExitAudioEngine();
void SubmitSource(AudioSource* p_audioSource);
private:
MMRESULT InitAudioThread();
void InitWavFormat();
MMRESULT InitDevice();
MMRESULT InitWavHeader();
MMRESULT PlayFrontData();
void PrepareBackData();
void SwapBuffer();
void OnAudioCallback(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2);
static void CALLBACK StaticAudioCallback(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2);
private:
bool m_isRunning = false; //音频引擎是否开启
std::thread m_audioThread; //音频数据处理线程
AudioSource* m_audioSource{ nullptr }; //TODO::每次play的时候都得增加一个
short m_audioBuffer1[AUDIO_BUFFER_SIZE] = { 0 }; //音频缓冲区1
short m_audioBuffer2[AUDIO_BUFFER_SIZE] = { 0 }; //音频缓冲区2
bool m_isBuffer1Front = true; //标记哪个缓冲区为front
bool m_isFBufferUsing = false; //FBuffer-Front Buffer
bool m_isBBufferPrepared = false; //BBuffer-Back Buffer
HWAVEOUT m_hWaveOut; //正在使用的音频设备
WAVEFORMATEX m_waveFormat;
WAVEHDR m_waveHeader1;
WAVEHDR m_waveHeader2;
std::vector<WAVEOUTCAPS> m_waveOutCaps; //可用音频设备
};

View File

@@ -0,0 +1,27 @@
#include"AudioManager.h"
AudioManager::AudioManager(AudioEngine& p_audioEngine):
m_audioEngine(p_audioEngine)
{
}
AudioManager::~AudioManager()
{
}
AudioSource* AudioManager::Load(char* p_fileName)
{
AudioClip* clip = new AudioClip(p_fileName);
m_audioSource = new AudioSource();
m_audioEngine.SubmitSource(m_audioSource);
m_audioSource->Load(clip);
return m_audioSource;
}
void AudioManager::Update(double p_deltaTime)
{
if(m_audioSource){
m_audioSource->Update(p_deltaTime);
}
}

View File

@@ -0,0 +1,50 @@
#pragma once
#include<iostream>
#include"WavFileReader.h"
#include"AudioClip.h"
#include"AudioSource.h"
#include"AudioEngine.h"
#pragma comment(lib,"winmm.lib")
class AudioManager
{
public:
AudioManager(AudioEngine& p_audioEngine);
~AudioManager();
AudioSource* Load(char* p_fileName);
void Update(double p_deltaTime);
private:
AudioEngine& m_audioEngine;
AudioSource* m_audioSource{ nullptr }; //TODO::每次play的时候都得增加一个
};
/*
PlaySound 函数是Windows操作系统中的一个API函数用于播放声音文件或系统声音。它可以播放.wav文件、系统声音或内存中的声音数据。以下是与 PlaySound 相关的一些相关API函数和标志
PlaySound函数
cpp
Copy code
BOOL PlaySound(
LPCTSTR pszSound,
HMODULE hmod,
DWORD fdwSound
);
pszSound指定要播放的声音文件的文件名或系统声音的名称。可以为NULL表示停止播放声音。
hmod保留为NULL。
fdwSound指定播放声音的标志可以是以下之一或它们的组合
SND_SYNC播放声音并等待声音播放完毕。
SND_ASYNC以异步方式播放声音不等待声音播放完毕。
SND_FILENAMEpszSound 参数是一个文件名。
SND_RESOURCEpszSound 参数是一个资源名称或标识符。
其他标志:用于控制声音的行为,例如循环播放、停止、替换等。
waveOut 函数族用于播放音频数据具有更高级的音频处理功能例如音频缓冲区管理和音量控制。PlaySound 函数实际上是基于 waveOut 函数族实现的。
mciSendString函数用于与多媒体设备交互可以用于播放音频和视频文件以及控制多媒体设备的状态。
sndPlaySound函数与 PlaySound 类似,用于播放声音,但通常用于纯粹的声音文件播放。
这些API函数可以帮助你在Windows应用程序中处理声音播放和多媒体交互。具体使用哪个API函数取决于你的需求和项目的复杂性。通常如果只需要简单地播放声音文件PlaySound 是一个方便的选择。如果需要更多控制和高级功能则可能需要使用其他API函数。
*/

View File

@@ -0,0 +1,373 @@
#include"AudioSource.h"
AudioSource::AudioSource():
m_filter(64),
m_reberation(AUDIO_BUFFER_SIZE / 2, 44100, 0.5f),
m_hrtf_filter_L(128),
m_hrtf_filter_R(128)
{
HRTFInitialize();
return;
//---------------------------------------------------------待删
std::vector<float> lowPassKernel(64, 0.0f);
float cutoffFrequency = 1000.0f; // 截止频率为1000 Hz
float nyquistFrequency = 0.5f * 44100.0f; // Nyquist频率
int numTaps = 64; // 滤波器长度
for (int i = 0; i < numTaps; ++i) {
float frequency = i * nyquistFrequency / numTaps;
if (frequency <= cutoffFrequency) {
lowPassKernel[i] = 1.0f; // 低通滤波器
}
}
m_filter.SetTimeDomainKernel(lowPassKernel);
}
AudioSource::~AudioSource()
{
m_historyEnergys.clear();
}
void AudioSource::HRTFInitialize()
{
/*hrtf-filter*/
m_hrtf.SetDirection(0, 0);
std::vector<float> hrtfData;
m_hrtf.GetLeftEarTimeHRTF(hrtfData);
for (int i = 0; i < 128; i++)
{
hrtfData[i] /= (float)32767;
}
m_hrtf_filter_L.SetTimeDomainKernel(hrtfData);
hrtfData.clear();
m_hrtf.GetRightEarTimeHRTF(hrtfData);
for (int i = 0; i < 128; i++)
{
hrtfData[i] /= (float)32767;
}
m_hrtf_filter_R.SetTimeDomainKernel(hrtfData);
/*fade-window*/
m_fade_window.resize(m_bufferSize / 2.);
double phase_step = PI / 2. / (m_bufferSize / 2. - 1);
for (int i = 0; i < m_bufferSize / 2.; i++)
{
m_fade_window[i] = sin(i * phase_step);
m_fade_window[i] *= m_fade_window[i];
}
}
void AudioSource::Play()
{
if (!m_isLoaded) { return; }
m_isActive = true;
m_lastingTime = 0;
}
void AudioSource::Load(AudioClip* p_clip)
{
m_isLoaded = false;
//TODO:检查音频文件的后缀名用相应的filereader打开
m_wavReader = new WavFileReader();
if (!m_wavReader->OpenFile(std::string(p_clip->GetName()))) { return; }
m_isLoaded = true;
m_audioClip = p_clip;
m_energyRate = m_wavReader->GetSampleRate() / (float)1024;
}
void AudioSource::Update(double p_deltaTime)
{
if (!m_isActive || !m_audioClip) { return; }
m_lastingTime += p_deltaTime;
/*energy-detect*/
if (m_isEnergyDetecting)
{
int index = m_lastingTime * m_energyRate;
/*this-energy*/
m_thisEnergy = 0;
for (int i = 0; i < m_wavReader->GetChannels() * 1024; i++)
{
m_thisEnergy += pow(m_wavReader->GetSampleValue(m_wavReader->GetChannels() * 1024 * index + i), 2);
}
/*averge-energy*/
for (int j = m_avergeOldIndex + 1; j < index; j++)
{
long long historyEnergy = 0;
for (int i = 0; i < m_wavReader->GetChannels() * 1024; i++)
{
historyEnergy += pow(m_wavReader->GetSampleValue(m_wavReader->GetChannels() * 1024 * j + i), 2);
}
/*calaulate-averge*/
if (m_avergeOldIndex < (int)m_energyRate - 1)
{
m_avergeEnergy *= m_avergeOldIndex;
m_avergeEnergy += historyEnergy;
m_avergeOldIndex += 1;
m_avergeEnergy /= m_avergeOldIndex;
m_historyEnergys.push_front(historyEnergy);
}
else
{
m_avergeEnergy *= (int)m_energyRate;
m_avergeEnergy += historyEnergy;
m_historyEnergys.push_front(historyEnergy);
m_avergeEnergy -= m_historyEnergys.back();
m_avergeEnergy /= (int)m_energyRate;
m_avergeOldIndex += 1;
m_historyEnergys.pop_back();
}
}
m_thisEnergyScale = m_avergeEnergy == 0 ? 0 : m_thisEnergy / (float)m_avergeEnergy;
m_thisEnergyScale = m_thisEnergyScale < m_maxEnergyScale ? m_thisEnergyScale : m_maxEnergyScale;
}
}
float AudioSource::GetEnergy()
{
return m_thisEnergyScale;
}
void AudioSource::StartEnergyDetect()
{
m_isEnergyDetecting = true;
}
void AudioSource::CloseEnergyDetect()
{
m_isEnergyDetecting = false;
}
short* AudioSource::GetBufferData()
{
return m_buffer;
}
void AudioSource::PrepareBufferData()
{
/*dry-audio*/
float sample = 0;
std::vector<float> samples;
for (int i = 0; i < m_bufferSize; i++)
{
m_sampleIndex++;
std::vector<float> data;
if (m_sampleIndex >= m_wavReader->GetSampleNum())
{
sample += 0;
m_isActive = false;
continue;
}
sample += m_wavReader->GetSampleValue(m_sampleIndex);
if (i % 2 == 1)
{
samples.push_back(sample / 2.);
sample = 0;
}
}
//####/*hrtf*/####
/*caculate hrtf result*/
std::vector<float> hrtfLeft(m_bufferSize / 2, 0);
std::vector<float> hrtfRight(m_bufferSize / 2, 0);
for (int i = 0; i < AUDIO_BUFFER_SIZE / 2 / 128; i++)
{
std::vector<float> subSamples(128);
std::vector<float> subHrtfLeft(128);
std::vector<float> subHrtfRight(128);
std::copy(samples.begin() + i * 128, samples.begin() + (i + 1) * 128, subSamples.begin());
m_hrtf_filter_L.AddSignalBlock(subSamples);
m_hrtf_filter_L.GetResult(&subHrtfLeft);
std::copy(subHrtfLeft.begin(), subHrtfLeft.end(), hrtfLeft.begin() + 128 * i);
m_hrtf_filter_R.AddSignalBlock(subSamples);
m_hrtf_filter_R.GetResult(&subHrtfRight);
std::copy(subHrtfRight.begin(), subHrtfRight.end(), hrtfRight.begin() + 128 * i);
}
/*direction-changed*/
if (m_hrtf.SetDirection(m_elevation, m_azimuth))
{
/*change-kernel*/
std::vector<float> hrtfData;
m_hrtf.GetLeftEarTimeHRTF(hrtfData);
for (int i = 0; i < 128; i++)
{
hrtfData[i] /= (float)32767;
}
m_hrtf_filter_L.SetTimeDomainKernel(hrtfData);
hrtfData.clear();
m_hrtf.GetRightEarTimeHRTF(hrtfData);
for (int i = 0; i < 128; i++)
{
hrtfData[i] /= (float)32767;
}
m_hrtf_filter_R.SetTimeDomainKernel(hrtfData);
/*caculate new hrtf result*/
std::vector<float> newHrtfLeft(m_bufferSize / 2, 0);
std::vector<float> newHrtfRight(m_bufferSize / 2, 0);
for (int i = 0; i < AUDIO_BUFFER_SIZE / 2 / 128; i++)
{
std::vector<float> subSamples(128);
std::vector<float> subHrtfLeft(128);
std::vector<float> subHrtfRight(128);
std::copy(samples.begin() + i * 128, samples.begin() + (i + 1) * 128, subSamples.begin());
m_hrtf_filter_L.AddSignalBlock(subSamples);
m_hrtf_filter_L.GetResult(&subHrtfLeft);
std::copy(subHrtfLeft.begin(), subHrtfLeft.end(), newHrtfLeft.begin() + 128 * i);
m_hrtf_filter_R.AddSignalBlock(subSamples);
m_hrtf_filter_R.GetResult(&subHrtfRight);
std::copy(subHrtfRight.begin(), subHrtfRight.end(), newHrtfRight.begin() + 128 * i);
}
/*apply fade window*/
ApplyFadeWindow(hrtfLeft, newHrtfLeft, &hrtfLeft);
ApplyFadeWindow(hrtfRight, newHrtfRight, &hrtfRight);
}
for (int i = 0; i < m_bufferSize; i++)
{
if (i % 2 == 0)
{
m_buffer[i] = hrtfLeft[i / 2];
}
else
{
m_buffer[i] = hrtfRight[i / 2];
}
}
}
void AudioSource::ApplyFadeWindow(const std::vector<float>& p_block_last, const std::vector<float>& p_block_next, std::vector<float>* p_fade_result)
{
for (int i = 0; i < m_bufferSize / 2.; i++)
{
(*p_fade_result)[i] = p_block_last[i] * m_fade_window[m_bufferSize / 2. - 1 - i] + p_block_next[i] * m_fade_window[i];
}
}
void AudioSource::SetDirection(int p_elevation, int p_azimuth)
{
if (p_elevation > 90) { p_elevation = 90; }
if (p_elevation < -90) { p_elevation = -90; }
m_azimuth = p_azimuth;
m_elevation = p_elevation;
}
/*
float sample = 0;
std::vector<float> samples;
for (int i = 0; i < m_bufferSize; i++)
{
m_sampleIndex++;
std::vector<float> data;
if (m_sampleIndex >= m_wavReader->GetSampleNum())
{
sample += 0;
m_isActive = false;
continue;
}
sample += m_wavReader->GetSampleValue(m_sampleIndex);
if (i % 2 == 1)
{
samples.push_back(sample / 2.);
sample = 0;
}
}
std::vector<float> reberationLeft(m_bufferSize / 2, 0);
std::vector<float> reberationRight(m_bufferSize / 2, 0);
m_reberation.AddReberation(samples, &reberationLeft, &reberationRight);
for (int i = 0; i < m_bufferSize; i++)
{
if (i % 2 == 0)
{
m_buffer[i] = reberationLeft[i / 2];
}
else
{
m_buffer[i] = reberationRight[i / 2];
}
}
*/
/*
float sample = 0;
std::vector<float> samples;
for (int i = 0; i < m_bufferSize; i++)
{
m_sampleIndex++;
std::vector<float> data;
if (m_sampleIndex >= m_wavReader->GetSampleNum())
{
sample += 0;
m_isActive = false;
continue;
}
sample += m_wavReader->GetSampleValue(m_sampleIndex);
if (i % 2 == 1)
{
samples.push_back(sample / 2.);
sample = 0;
}
}
std::vector<float> hrtfLeft(m_bufferSize / 2, 0);
std::vector<float> hrtfRight(m_bufferSize / 2, 0);
for (int i = 0; i < AUDIO_BUFFER_SIZE / 2 / 128; i++)
{
std::vector<float> subSamples(128);
std::vector<float> subHrtfLeft(128);
std::vector<float> subHrtfRight(128);
std::copy(samples.begin() + i * 128, samples.begin() + (i + 1) * 128, subSamples.begin());
m_hrtf_filter_L.AddSignalBlock(subSamples);
m_hrtf_filter_L.GetResult(&subHrtfLeft);
std::copy(subHrtfLeft.begin(), subHrtfLeft.end(), hrtfLeft.begin() + 128 * i);
m_hrtf_filter_R.AddSignalBlock(subSamples);
m_hrtf_filter_R.GetResult(&subHrtfRight);
std::copy(subHrtfRight.begin(), subHrtfRight.end(), hrtfRight.begin() + 128 * i);
}
for (int i = 0; i < m_bufferSize; i++)
{
if (i % 2 == 0)
{
m_buffer[i] = hrtfLeft[i / 2];
}
else
{
m_buffer[i] = hrtfRight[i / 2];
}
}
*/
/*
for (int i = 0; i < m_bufferSize; i++)
{
if (i % 2 == 0)
{
m_buffer[i] = samples[i / 2];
}
else
{
m_buffer[i] = samples[i / 2];
}
}
*/

View File

@@ -0,0 +1,58 @@
#pragma once
#include<Windows.h>
#include"AudioClip.h"
#include"WavFileReader.h"
#include"AudioConfig.h"
#include"../global/Base.h"
#include"../audio3d/FFTFilter.h"
#include"../audio3d/Reberation.h"
#include"../audio3d/HRTF.h"
#pragma comment(lib,"winmm.lib")
class AudioSource
{
public:
AudioSource();
~AudioSource();
void Play();
void Load(AudioClip* p_clip);
void Update(double p_deltaTime);
void StartEnergyDetect();
void CloseEnergyDetect();
float GetEnergy();
short* GetBufferData();
void PrepareBufferData();
void SetDirection(int p_elevation, int p_azimuth);
bool IsActive() { return m_isActive; }
private:
void HRTFInitialize();
void ApplyFadeWindow(const std::vector<float>& p_block_last, const std::vector<float>& p_block_next, std::vector<float>* p_fade_result);
private:
AudioClip* m_audioClip{nullptr};
WavFileReader* m_wavReader{ nullptr };
double m_lastingTime = 0;
bool m_isActive = false;
bool m_isLoaded = false;
bool m_isEnergyDetecting = 0;
long long m_avergeEnergy = 0;
int m_avergeOldIndex = 0;
long long m_thisEnergy = 0;
float m_thisEnergyScale = 0;
float m_maxEnergyScale = 5.;
float m_energyRate = 0;
std::deque<long long> m_historyEnergys{};
const static int m_bufferSize = AUDIO_BUFFER_SIZE;
short m_buffer[m_bufferSize] = { 0 };
int m_sampleIndex = 0;
int m_elevation = 0;
int m_azimuth = 0;
FFTFilter m_filter;
Reberation m_reberation;
HRTF m_hrtf;
FFTFilter m_hrtf_filter_L;
FFTFilter m_hrtf_filter_R;
std::vector<float> m_fade_window;
};

View File

@@ -0,0 +1,3 @@
file(GLOB_RECURSE AUDIO ./ *.cpp)
add_library(audio ${AUDIO})

View File

@@ -0,0 +1,223 @@
#include"WavFileReader.h"
WavFileReader::WavFileReader(){}
WavFileReader::~WavFileReader()
{
CloseFile();
}
bool WavFileReader::OpenFile(const std::string& p_fileName)
{
if (p_fileName == m_fileName) {
std::cerr << "已经打开了该WAV文件" << std::endl;
return true;
}
m_file = std::ifstream(p_fileName, std::ios::binary);
if (!m_file) {
std::cerr << "无法打开WAV文件" << std::endl;
return false;
}
if (ProcessWavFile()) {
m_fileName = p_fileName;
return true;
}
else {
m_file.close();
return false;
}
}
void WavFileReader::CloseFile()
{
if (m_file) { m_file.close(); }
m_fileName = "";
}
bool WavFileReader::ProcessWavFile()
{
/*处理文件头部分*/
WavRIFF riff;
WavFormat format;
m_file.seekg(0, std::ios::end);
m_fileSize = m_file.tellg();
m_file.seekg(0, std::ios::beg);
m_file.read(reinterpret_cast<char*>(&riff), sizeof(WavRIFF));
if (strncmp(riff.id, "RIFF", 4) != 0 || strncmp(riff.waveFlag, "WAVE", 4) != 0){
std::cerr << "不是有效的WAV文件!" << std::endl;
CloseFile();
return false;
}
m_file.read(reinterpret_cast<char*>(&format), sizeof(WavFormat));
if (strncmp(format.id, "fmt ", 4) != 0){
std::cerr << "format读取失败!" << std::endl;
CloseFile();
return false;
}
if (format.formatTag != 1){
std::cerr << "数据格式非pcm程序不支持!" << std::endl;
CloseFile();
return false;
}
/*处理文件后部数据*/
while (m_file.tellg() < m_fileSize){
WavChunk chunk;
m_file.read(reinterpret_cast<char*>(&chunk), sizeof(WavChunk));
if (strncmp(chunk.id, "LIST", 4) == 0){
std::cout << "LIST" << std::endl;
//ProcessLIST(chunk.dataLength);
m_file.seekg(chunk.dataLength, std::ios::cur);
continue;
}
else if (strncmp(chunk.id, "data", 4) == 0){
std::cout << "data" << std::endl;
m_dataOffset = m_file.tellg();
m_dataLength = chunk.dataLength;
m_file.seekg(chunk.dataLength, std::ios::cur);
continue;
}
else{
m_file.seekg(chunk.dataLength, std::ios::cur);
continue;
}
}
m_fileLength = riff.fileLength + 8;
m_channels = format.channels;
m_sampleRate = format.samplesPerSec;
m_bitsPerSample = format.bitsPerSample;
m_bytesPerSec = format.avgBytesPerSec;
m_lastingTime = (float)m_dataLength / m_bytesPerSec;
m_sampleNum = m_dataLength / (m_bitsPerSample / 8);
}
//TODO::
void WavFileReader::ProcessLIST(int listLength){
while (listLength > 0){
//if (strncmp(id, "INFO", 4) == 0){
//char* info = new char[chunk.dataLength];
//m_file.read(info, chunk.dataLength);
//TODO:处理INFO
//delete[] info;
//}
//else if (strncmp(id, "adtl", 4) == 0) {
//char* adtl = new char[chunk.dataLength];
//m_file.read(adtl, chunk.dataLength);
//TODO:处理adtl
//delete[] adtl;
//}
//else {
//char* custom = new char[chunk.dataLength];
//m_file.read(custom, chunk.dataLength);
//TODO:处理自定义块
//delete[] custom;
//}
}
}
int WavFileReader::ReadData(char* p_buffer, int p_position, int p_readLength){
if (!m_file) {
std::cerr << "该WAV文件不存在" << std::endl;
return 0;
}
int filePosition = m_dataOffset + p_position;
if(filePosition>m_fileSize){
return 0;
}
m_file.seekg(m_dataOffset + p_position, std::ios::beg);
m_file.read(p_buffer, p_readLength);
int realLength = m_file.gcount();
return realLength;
}
void WavFileReader::SetPosition(int p_position){
if (m_file){
m_file.seekg(p_position, std::ios::beg);
}
}
int WavFileReader::GetPosition(){
if (!m_file)
return -1;
return m_file.tellg();
}
int WavFileReader::GetFileLength(){
if (!m_file)
return -1;
return m_fileLength;
}
int WavFileReader::GetDataLength()
{
if (!m_file)
return -1;
return m_dataLength;
}
int WavFileReader::GetChannels(){
if (!m_file)
return -1;
return m_channels;
}
int WavFileReader::GetSampleRate(){
if (!m_file)
return -1;
return m_sampleRate;
}
int WavFileReader::GetBytesPerSec(){
if (!m_file)
return -1;
return m_bytesPerSec;
}
int WavFileReader::GetBitsPerSample(){
if (!m_file)
return -1;
return m_bitsPerSample;
}
float WavFileReader::GetLastingTime(){
if (!m_file)
return -1;
return m_lastingTime;
}
int WavFileReader::GetSampleNum(){
if (!m_file)
return -1;
return m_sampleNum;
}
int16_t WavFileReader::GetSampleValue(int p_index) {
if (p_index >= m_sampleNum || !m_file)
return -1;
unsigned int bytesPerSample = m_bitsPerSample / 8;
char* sampleBytes = new char[bytesPerSample];
ReadData(sampleBytes, p_index * bytesPerSample, bytesPerSample);
int16_t sampleValue = 0;
for (int i = 0; i < bytesPerSample; ++i) {
sampleValue |= static_cast<int16_t>(static_cast<unsigned char>(sampleBytes[i])) << (8 * i);
}
//
delete[] sampleBytes;
return sampleValue;
}

View File

@@ -0,0 +1,43 @@
#pragma once
#include"WavStruct.h"
#include <fstream>
class WavFileReader
{
public:
WavFileReader();
~WavFileReader();
bool OpenFile(const std::string& p_fileName);
void CloseFile();
int ReadData(char* p_buffer, int p_position, int p_readLength);
void SetPosition(int p_position);
int GetPosition();
int GetDataLength();
int GetFileLength();
int GetChannels();
int GetSampleRate();
int GetBitsPerSample();
float GetLastingTime();
int GetBytesPerSec();
int GetSampleNum();
int16_t GetSampleValue(int p_index);
private:
bool ProcessWavFile();
void ProcessSampleValue();
void ProcessLIST(int p_listLength);
void ProcessCustom();
void ProcessDATA();
private:
std::ifstream m_file;
uint32_t m_fileLength = 0;
uint32_t m_dataLength = 0;
uint32_t m_dataOffset = 0;
uint32_t m_channels = 0;
uint32_t m_sampleRate = 0;
uint32_t m_bitsPerSample = 0;
uint32_t m_bytesPerSec = 0;
float m_lastingTime = 0;
std::string m_fileName = "";
uint32_t m_sampleNum = 0;
uint64_t m_fileSize = 0;
};

View File

@@ -0,0 +1,26 @@
#pragma once
#include<iostream>
struct WavRIFF
{
const char id[4] = { 'R','I', 'F', 'F' };
uint32_t fileLength;
const char waveFlag[4] = { 'W','A', 'V', 'E' };
};
struct WavFormat
{
const char id[4] = { 'f','m', 't', ' ' };
uint32_t blockSize = 16;
uint16_t formatTag;
uint16_t channels;
uint32_t samplesPerSec;
uint32_t avgBytesPerSec;
uint16_t blockAlign;
uint16_t bitsPerSample;
};
struct WavChunk
{
char id[4];
uint32_t dataLength;
};

View File

@@ -0,0 +1,3 @@
file(GLOB_RECURSE AUDIO3D ./ *.cpp)
add_library(audio3d ${AUDIO3D})

View File

@@ -0,0 +1,232 @@
#include <assert.h>
#include <cmath>
#include <string>
#include"FFTFilter.h"
FFTFilter::FFTFilter(int filter_len)
: max_kernel_len_(filter_len),
fft_len_(filter_len * 2),
filter_state_(filter_len, 0.0f),
kernel_defined_(false),
kernel_time_domain_buffer_(fft_len_),
kernel_freq_domain_buffer_(fft_len_ / 2 + 1),
buffer_selector_(0),
signal_time_domain_buffer_(2, std::vector<kiss_fft_scalar>(fft_len_)),
signal_freq_domain_buffer_(2, std::vector < kiss_fft_cpx >(fft_len_ / 2 + 1)),
filtered_freq_domain_buffer_(fft_len_ / 2 + 1) {
//???
bool is_power_of_two = ((fft_len_ != 0) && !(fft_len_ & (fft_len_ - 1)));
assert(is_power_of_two && "Filter length must be a power of 2");
forward_fft_ = kiss_fftr_alloc(fft_len_, 0, 0, 0);
inverse_fft_ = kiss_fftr_alloc(fft_len_, 1, 0, 0);
Init();
}
FFTFilter::~FFTFilter() {
kiss_fft_free(forward_fft_);
kiss_fft_free(inverse_fft_);
}
void FFTFilter::Init() {
// Initialize all buffers with zeros.
memset(&kernel_time_domain_buffer_[0], 0, sizeof(kiss_fft_scalar) * fft_len_);
memset(&kernel_freq_domain_buffer_[0], 0,
sizeof(kiss_fft_cpx) * (fft_len_ / 2 + 1));
for (int i = 0; i < 2; ++i) {
memset(&signal_time_domain_buffer_[i][0], 0,
sizeof(kiss_fft_scalar) * fft_len_);
memset(&signal_freq_domain_buffer_[i][0], 0,
sizeof(kiss_fft_cpx) * (fft_len_ / 2 + 1));
}
}
void FFTFilter::ForwardTransform(const vector<float>& time_signal,
vector<float>* freq_signal) const {
assert(freq_signal);
assert(
time_signal.size() <= max_kernel_len_
&& "Kernel size must be <= max_kernel_len_");
vector<kiss_fft_scalar> time_domain_buffer(fft_len_);
VectorCopyWithZeroPadding(time_signal, &time_domain_buffer);
vector<kiss_fft_cpx> freq_domain_buffer(fft_len_ / 2 + 1);
// Perform forward FFT transform
kiss_fftr(forward_fft_, &time_domain_buffer[0], &freq_domain_buffer[0]);
freq_signal->resize(fft_len_ + 2);
vector<float>::iterator freq_out_itr = freq_signal->begin();
for (int freq_c = 0; freq_c < freq_domain_buffer.size(); ++freq_c) {
*freq_out_itr = freq_domain_buffer[freq_c].r;
++freq_out_itr;
*freq_out_itr = freq_domain_buffer[freq_c].i;
++freq_out_itr;
}
}
void FFTFilter::InverseTransform(const vector<float>& freq_signal,
vector<float>* time_signal) const {
assert(time_signal);
assert(
freq_signal.size() == fft_len_ + 2
&& "Frequency domain signal must match fft_len_+2");
vector<kiss_fft_cpx> freq_domain_buffer(fft_len_ / 2 + 1);
vector<float>::const_iterator freq_in_itr = freq_signal.begin();
for (int freq_c = 0; freq_c < freq_domain_buffer.size(); ++freq_c) {
freq_domain_buffer[freq_c].r = *freq_in_itr;
++freq_in_itr;
freq_domain_buffer[freq_c].i = *freq_in_itr;
++freq_in_itr;
}
time_signal->resize(fft_len_);
// Perform inverse FFT transform of filtered_freq_domain_buffer_ and store result back in signal_time_domain_buffer_
kiss_fftri(inverse_fft_, &freq_domain_buffer[0], &(*time_signal)[0]);
// Invert FFT scaling
InverseFFTScaling(time_signal);
}
void FFTFilter::InverseFFTScaling(vector<float>* signal) const {
assert(signal);
assert(signal->size() == fft_len_);
// Invert FFT scaling
for (int i = 0; i < fft_len_; ++i) {
(*signal)[i] /= fft_len_;
}
}
void FFTFilter::SetTimeDomainKernel(const std::vector<float>& kernel) {
assert(
kernel.size() <= max_kernel_len_
&& "Kernel size must be <= max_kernel_len_");
VectorCopyWithZeroPadding(kernel, &kernel_time_domain_buffer_);
// Perform forward FFT transform
kiss_fftr(forward_fft_, &kernel_time_domain_buffer_[0],
&kernel_freq_domain_buffer_[0]);
kernel_defined_ = true;
}
void FFTFilter::AddTimeDomainKernel(const vector<float>& kernel) {
vector<kiss_fft_cpx> temp_freq_domain_buffer(fft_len_ / 2 + 1);
VectorCopyWithZeroPadding(kernel, &kernel_time_domain_buffer_);
// Perform forward FFT transform
kiss_fftr(forward_fft_, &kernel_time_domain_buffer_[0],
&temp_freq_domain_buffer[0]);
// Complex multiplication in frequency domain with transformed kernel.
ComplexVectorProduct(temp_freq_domain_buffer, kernel_freq_domain_buffer_,
&kernel_freq_domain_buffer_);
}
void FFTFilter::SetFreqDomainKernel(const std::vector<float>& kernel) {
assert(kernel.size() == fft_len_ + 2);
vector<float>::const_iterator kernel_itr = kernel.begin();
for (int freq_c = 0; freq_c < kernel_freq_domain_buffer_.size(); ++freq_c) {
kernel_freq_domain_buffer_[freq_c].r = *kernel_itr;
++kernel_itr;
kernel_freq_domain_buffer_[freq_c].i = *kernel_itr;
++kernel_itr;
}
kernel_defined_ = true;
}
void FFTFilter::AddFreqDomainKernel(const vector<float>& kernel) {
vector<kiss_fft_cpx> temp_freq_domain_buffer(fft_len_ / 2 + 1);
vector<float>::const_iterator kernel_itr = kernel.begin();
for (int freq_c = 0; freq_c < kernel_freq_domain_buffer_.size(); ++freq_c) {
temp_freq_domain_buffer[freq_c].r = *kernel_itr;
++kernel_itr;
temp_freq_domain_buffer[freq_c].i = *kernel_itr;
++kernel_itr;
}
// Complex multiplication in frequency domain with transformed kernel.
ComplexVectorProduct(temp_freq_domain_buffer, kernel_freq_domain_buffer_,
&kernel_freq_domain_buffer_);
}
void FFTFilter::VectorCopyWithZeroPadding(
const vector<kiss_fft_scalar>& input,
vector<kiss_fft_scalar>* output) const {
assert(output);
assert(input.size() <= output->size());
memcpy(&((*output)[0]), &input[0], sizeof(kiss_fft_scalar) * input.size());
memset(&((*output)[input.size()]), 0,
sizeof(kiss_fft_scalar) * (output->size() - input.size()));
}
void FFTFilter::AddSignalBlock(const vector<float>& signal_block) {
assert(
signal_block.size() == max_kernel_len_
&& "Signal block size must match filter length");
assert(kernel_defined_ && "No suitable kernel defined");
// Switch buffer selector
buffer_selector_ = !buffer_selector_;
vector<kiss_fft_scalar>& time_domain_buffer =
signal_time_domain_buffer_[buffer_selector_];
vector<kiss_fft_cpx>& freq_domain_buffer =
signal_freq_domain_buffer_[buffer_selector_];
VectorCopyWithZeroPadding(signal_block, &time_domain_buffer);
// Perform forward FFT transform
kiss_fftr(forward_fft_, &time_domain_buffer[0], &freq_domain_buffer[0]);
// Complex vector product in frequency domain with transformed kernel.
ComplexVectorProduct(freq_domain_buffer, kernel_freq_domain_buffer_,
&filtered_freq_domain_buffer_);
// Perform inverse FFT transform of filtered_freq_domain_buffer_ and store result back in signal_time_domain_buffer_
kiss_fftri(inverse_fft_, &filtered_freq_domain_buffer_[0],
&time_domain_buffer[0]);
// Invert FFT scaling
InverseFFTScaling(&time_domain_buffer);
}
void FFTFilter::ComplexVectorProduct(const vector<kiss_fft_cpx>& input_a,
const vector<kiss_fft_cpx>& input_b,
vector<kiss_fft_cpx>* result) const {
assert(result);
assert(input_a.size() == input_b.size());
result->resize(input_a.size());
for (int i = 0; i < result->size(); ++i) {
float result_real = input_a[i].r * input_b[i].r
- input_a[i].i * input_b[i].i;
float result_imag = input_a[i].r * input_b[i].i
+ input_a[i].i * input_b[i].r;
(*result)[i].r = result_real;
(*result)[i].i = result_imag;
}
}
void FFTFilter::GetResult(vector<float>* signal_block) {
assert(signal_block);
signal_block->resize(max_kernel_len_);
int curr_buf = buffer_selector_;
int prev_buf = !buffer_selector_;
for (int i = 0; i < max_kernel_len_; ++i) {
(*signal_block)[i] = signal_time_domain_buffer_[curr_buf][i]
+ signal_time_domain_buffer_[prev_buf][i + max_kernel_len_]; // Add overlap from previous FFT transform.
}
}

View File

@@ -0,0 +1,51 @@
#pragma once
#include<vector>
#include"../kissfft/kiss_fftr.h"
using namespace std;
//滤波器
class FFTFilter
{
public:
FFTFilter(int filter_len); //filter_len滤波器长度
~FFTFilter();
void SetTimeDomainKernel(const vector<float>& kernel); //设置时域滤波器核,滤波器核用于在时域执行滤波操作
void AddTimeDomainKernel(const vector<float>& kernel);
void SetFreqDomainKernel(const vector<float>& kernel);
void AddFreqDomainKernel(const vector<float>& kernel);
//执行前向FFT变换将时域信号转换为频域信号
void ForwardTransform(const vector<float>& time_signal, vector<float>* freq_signal) const;
void InverseTransform(const vector<float>& freq_signal, vector<float>* time_signal) const;
void AddSignalBlock(const vector<float>& signal_block); //添加信号块到滤波器中。这个方法用于处理连续的信号块。
void GetResult(vector<float>* signal_block); //获取处理后的信号块
private:
void Init();
void ComplexVectorProduct(const vector<kiss_fft_cpx>& input_a, const vector<kiss_fft_cpx>& input_b, vector<kiss_fft_cpx>* result) const;
void VectorCopyWithZeroPadding(const vector<kiss_fft_scalar>& input, vector<kiss_fft_scalar>* output) const;
void InverseFFTScaling(vector<float>* signal) const;
int max_kernel_len_; //滤波器核的最大长度
int fft_len_;
std::vector<float> filter_state_;
std::vector<float> window_;
bool kernel_defined_;
std::vector<kiss_fft_scalar> kernel_time_domain_buffer_;
std::vector<kiss_fft_cpx> kernel_freq_domain_buffer_;
int buffer_selector_; //0 1
std::vector<vector<kiss_fft_scalar> > signal_time_domain_buffer_;
std::vector<vector<kiss_fft_cpx> > signal_freq_domain_buffer_;
std::vector<kiss_fft_cpx> filtered_freq_domain_buffer_;
kiss_fftr_cfg forward_fft_;
kiss_fftr_cfg inverse_fft_;
};

View File

@@ -0,0 +1,172 @@
#include"HRTF.h"
#include"global/Base.h"
HRTF::HRTF():
m_filter(128)
{
BuildReacherTree();
}
HRTF::~HRTF()
{
}
void HRTF::GetDirection(int& p_elevation, int& p_azimuth)
{
p_elevation = m_elevation;
p_azimuth = m_is_swap_left_and_right ? -m_azimuth : m_azimuth;
}
int HRTF::SetDirection(int p_elevation, int p_azimuth)
{
while (p_azimuth > 180)
p_azimuth -= 360;
while (p_azimuth < -180)
p_azimuth += 360;
if (m_real_elevation == p_elevation && m_real_azimuth == p_azimuth)
return 0;
m_real_elevation = p_elevation;
m_real_azimuth = p_azimuth;
m_is_swap_left_and_right = (p_azimuth < 0);
p_azimuth = m_is_swap_left_and_right ? -p_azimuth : p_azimuth;
int index = SearchNearestIndex(p_elevation, p_azimuth);
if (index == m_direction_index)
return 0;
m_direction_index = index;
m_elevation = kHRTFDirection[index][0];
m_azimuth = kHRTFDirection[index][1];
return 1;
}
void HRTF::GetLeftEarTimeHRTF(std::vector<float>& p_data)
{
const short* target = nullptr;
if (m_is_swap_left_and_right)
target = kHRTFData[m_direction_index][1];
else
target = kHRTFData[m_direction_index][0];
p_data.clear();
p_data.reserve(kHRTFFilterLen);
for (int i = 0; i < kHRTFFilterLen; i++) {
p_data.push_back(static_cast<float>(target[i]));
}
}
void HRTF::GetLeftEarFreqHRTF(std::vector<float>& p_data)
{
std::vector<float> m_timeHTRF;
GetLeftEarTimeHRTF(m_timeHTRF);
m_filter.ForwardTransform(m_timeHTRF, &p_data);
}
void HRTF::GetRightEarTimeHRTF(std::vector<float>& p_data)
{
const short* target = nullptr;
if (m_is_swap_left_and_right)
target = kHRTFData[m_direction_index][0];
else
target = kHRTFData[m_direction_index][1];
p_data.clear();
p_data.reserve(kHRTFFilterLen);
for (int i = 0; i < kHRTFFilterLen; i++) {
p_data.push_back(static_cast<float>(target[i]));
}
}
void HRTF::GetRightEarFreqHRTF(std::vector<float>& p_data)
{
std::vector<float> m_timeHTRF;
GetRightEarTimeHRTF(m_timeHTRF);
m_filter.ForwardTransform(m_timeHTRF, &p_data);
}
void HRTF::BuildReacherTree()
{
for (int i = -40; i < 100; i += 10){
hrtf_directions.insert(std::make_pair(i, std::make_pair(std::vector<int>(), std::vector<int>())));
}
for (int i = 0; i < kHRTFNum; i++)
{
hrtf_directions[kHRTFDirection[i][0]].first.push_back(kHRTFDirection[i][1]);
hrtf_directions[kHRTFDirection[i][0]].second.push_back(i);
}
}
int HRTF::SearchNearestIndex(int p_elevation, int p_azimuth)
{
int low_elevation = 0;
int high_elevation = 0;
if (p_elevation <= -40) {
low_elevation = -40;
high_elevation = low_elevation + 10;
}
else if (p_elevation >= 90) {
high_elevation = 90;
low_elevation = high_elevation - 10;
}
else {
low_elevation = p_elevation < 0 ? 10 * (int)(p_elevation / 10. - 1) : 10 * (int)(p_elevation / 10.);
high_elevation = low_elevation + 10;
}
int low_small = 0;
int low_large = 0;
for (int i = 0; i < hrtf_directions[low_elevation].first.size(); i++){
if (p_azimuth < hrtf_directions[low_elevation].first[i]){
low_small = i - 1;
low_large = i;
break;
}
}
int high_small = 0;
int high_large = 0;
for (int i = 0; i < hrtf_directions[high_elevation].first.size(); i++){
if (p_azimuth < hrtf_directions[high_elevation].first[i]) {
high_small = i - 1;
high_large = i;
break;
}
}
int index = 0;
double distance_min = CaculateSphereDistance(p_elevation, p_azimuth, low_elevation, hrtf_directions[low_elevation].first[low_small]);
index = hrtf_directions[low_elevation].second[low_small];
if (double newdistance = CaculateSphereDistance(p_elevation, p_azimuth, low_elevation, hrtf_directions[low_elevation].first[low_large]); newdistance < distance_min) {
distance_min = newdistance;
index = hrtf_directions[low_elevation].second[low_large];
}
if (double newdistance = CaculateSphereDistance(p_elevation, p_azimuth, high_elevation, hrtf_directions[high_elevation].first[high_small]); newdistance < distance_min) {
distance_min = newdistance;
index = hrtf_directions[high_elevation].second[high_small];
}
if (double newdistance = CaculateSphereDistance(p_elevation, p_azimuth, high_elevation, hrtf_directions[high_elevation].first[high_large]); newdistance < distance_min) {
distance_min = newdistance;
index = hrtf_directions[high_elevation].second[high_large];
}
return index;
}
double HRTF::CaculateSphereDistance(int p_elevation1, int p_azimuth1, int p_elevation2, int p_azimuth2)
{
double elevation1 = p_elevation1 * PI / 180.;
double elevation2 = p_elevation2 * PI / 180.;
double azimuth1 = p_azimuth1 * PI / 180.;
double azimuth2 = p_azimuth2 * PI / 180.;
double distance = acos(sin(elevation1) * sin(elevation2) + cos(elevation1) * cos(elevation2) * cos(azimuth1 - azimuth2));
return distance;
}

View File

@@ -0,0 +1,41 @@
#pragma once
#include"mit_kemar_hrtf_data.h"
#include"../global/Base.h"
#include"FFTFilter.h"
#include"Resampler.h"
class HRTF
{
public:
HRTF();
~HRTF();
void GetDirection(int& p_elevation, int& p_azimuth);
int SetDirection(int p_elevation, int p_azimuth);
void GetLeftEarTimeHRTF(std::vector<float>& p_data);
void GetLeftEarFreqHRTF(std::vector<float>& p_data);
void GetRightEarTimeHRTF(std::vector<float>& p_data);
void GetRightEarFreqHRTF(std::vector<float>& p_data);
private:
void BuildReacherTree();
int SearchNearestIndex(int p_elevation, int p_azimuth);
double CaculateSphereDistance(int p_elevation1, int p_azimuth1, int p_elevation2, int p_azimuth2);
private:
int m_real_azimuth = 0; //ˮƽ½Ç
int m_real_elevation = 0; //¸©Ñö½Ç
int m_azimuth = 0;
int m_elevation = 0;
bool m_is_swap_left_and_right = false;
int m_direction_index = 0;
typedef std::vector<float> ResampledHRTFT;
typedef std::pair<ResampledHRTFT, ResampledHRTFT> ResampledHRTFPairT;
std::vector<ResampledHRTFPairT> hrtf_resampled_time_domain_;
std::vector<ResampledHRTFPairT> hrtf_resampled_freq_domain_;
std::map<int, std::pair<std::vector<int>, std::vector<int>>> hrtf_directions;
FFTFilter m_filter;
//Resampler m_resampler;
};

View File

@@ -0,0 +1,82 @@
#include"Reberation.h"
#include<assert.h>
Reberation::Reberation(int p_block_size, int p_sampling_rate, float p_reberation_time)
: m_block_size(p_block_size),
m_reberation_output_read_pos(0) {
RenderImpulseResponse(p_block_size, p_sampling_rate, p_reberation_time);
m_left_reberation_filter = new FFTFilter(m_block_size);
m_left_reberation_filter->SetTimeDomainKernel(GetImpulseResponseLeft());
m_right_reberation_filter = new FFTFilter(m_block_size);
m_right_reberation_filter->SetTimeDomainKernel(GetImpulseResponseRight());
m_reberation_input.reserve(m_block_size);
m_reberation_output_left.resize(m_block_size, 0.0f);
m_reberation_output_right.resize(m_block_size, 0.0f);
}
Reberation::~Reberation()
{
}
void Reberation::RenderImpulseResponse(int p_block_size, int p_sampling_rate, float p_reberation_time)
{
m_impulse_response_left.resize(p_block_size, 0.0f);
m_impulse_response_right.resize(p_block_size, 0.0f);
int quiet_period = p_block_size; // filter_delay = block_size
m_quiet_period_sec = static_cast<float>(quiet_period) / static_cast<float>(p_sampling_rate);
const float exp_decay = -13.8155;
srand(0);
for (int i = 0; i < p_block_size; ++i) {
float envelope = exp(exp_decay * (i + quiet_period) / p_sampling_rate / p_reberation_time);
assert(envelope >= 0 && envelope <= 1.0);
m_impulse_response_left[i] = FloatRand() * envelope;
m_impulse_response_right[i] = FloatRand() * envelope;
}
}
float Reberation::GetQuietPeriod() const {
return m_quiet_period_sec;
}
void Reberation::AddReberation(const std::vector<float>& p_input, std::vector<float>* p_output_left, std::vector<float>* p_output_right)
{
assert(p_output_left && p_output_right);
assert(p_output_left->size() == p_output_right->size());
assert(p_input.size() == p_output_right->size());
for (int i = 0; i < p_input.size(); ++i) {
(*p_output_left)[i] += m_reberation_output_left[m_reberation_output_read_pos];
(*p_output_right)[i] += m_reberation_output_right[m_reberation_output_read_pos];
++m_reberation_output_read_pos;
}
m_reberation_input.insert(m_reberation_input.end(), p_input.begin(), p_input.end());
if (m_reberation_output_read_pos == m_block_size) {
m_left_reberation_filter->AddSignalBlock(m_reberation_input);
m_right_reberation_filter->AddSignalBlock(m_reberation_input);
m_reberation_input.clear();
m_left_reberation_filter->GetResult(&m_reberation_output_left);
m_right_reberation_filter->GetResult(&m_reberation_output_right);
m_reberation_output_read_pos = 0;
}
assert(m_reberation_output_read_pos < m_block_size);
}
float Reberation::FloatRand() {
return static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
const std::vector<float>& Reberation::GetImpulseResponseLeft() const {
return m_impulse_response_left;
}
const std::vector<float>& Reberation::GetImpulseResponseRight() const {
return m_impulse_response_right;
}

View File

@@ -0,0 +1,30 @@
#pragma once
#include<vector>
#include"FFTFilter.h"
class Reberation
{
public:
Reberation(int p_block_size, int p_sampling_rate, float p_reberation_time);
~Reberation();
float GetQuietPeriod() const;
void AddReberation(const std::vector<float>& p_input, std::vector<float>* p_output_left, std::vector<float>* p_output_right);
const std::vector<float>& GetImpulseResponseLeft() const;
const std::vector<float>& GetImpulseResponseRight() const;
private:
void RenderImpulseResponse(int p_block_size, int p_sampling_rate, float p_reberation_time);
static float FloatRand();
int m_block_size;
std::vector<float> m_impulse_response_left;
std::vector<float> m_impulse_response_right;
float m_quiet_period_sec;
FFTFilter* m_left_reberation_filter;
FFTFilter* m_right_reberation_filter;
std::vector<float> m_reberation_input;
std::vector<float> m_reberation_output_left;
std::vector<float> m_reberation_output_right;
int m_reberation_output_read_pos;
};

View File

@@ -0,0 +1,79 @@
#include <assert.h>
#include <cmath>
#include <string>
#include <iostream>
#include <cstdlib>
#include "../libsamplerate/samplerate.h"
#include "Resampler.h"
using namespace std;
Resampler::Resampler(int input_len, double resampling_factor)
: input_len_(input_len),
resample_factor_(resampling_factor) {
assert(resample_factor_ > 0);
int error;
static const int channels = 1;
libsamplerate_handle_ = src_new(SRC_SINC_BEST_QUALITY, channels, &error);
if (!libsamplerate_handle_) {
cerr << "Error during libsamplerate initialization: "
<< string(src_strerror(error));
exit(1);
}
// Next power of 2 of (input_len_ * resample_factor_)
//int output_len_bits = static_cast<int>(floor(
// log(input_len_ * resample_factor_) / log(2)));
//output_len_ = 1 << (output_len_bits + 1);
output_len_ = input_len_ * resample_factor_ + 1;
assert(libsamplerate_handle_ && "Creating libsamplerate handler failed");
}
Resampler::~Resampler() {
assert(libsamplerate_handle_);
libsamplerate_handle_ = src_delete(libsamplerate_handle_);
}
void Resampler::Resample(const vector<float>& input, vector<float>* output) {
assert(output);
if (resample_factor_ != 1.0) {
output->clear();
output->resize(output_len_, 0.0f);
SRC_DATA libsamplerate_data;
libsamplerate_data.data_in = const_cast<float*>(&input[0]);
libsamplerate_data.input_frames = input.size();
libsamplerate_data.data_out = &((*output)[0]);
libsamplerate_data.output_frames = output->size();
libsamplerate_data.src_ratio = resample_factor_;
libsamplerate_data.end_of_input = 1;
int error = src_reset(libsamplerate_handle_);
if (error != 0) {
cerr << "Error during libsamplerate reset: "
<< string(src_strerror(error));
exit(1);
}
error = src_set_ratio(libsamplerate_handle_, resample_factor_);
if (error != 0) {
cerr << "Error setting resampling ratio: " << string(src_strerror(error));
exit(1);
}
error = src_process(libsamplerate_handle_, &libsamplerate_data);
if (error != 0) {
cerr << "Error during resampling: " << string(src_strerror(error));
exit(1);
}
}
else {
// Copy signal if resample factor is 1.0
output->assign(input.begin(), input.end());
}
}
int Resampler::GetOutputLength() {
return output_len_;
}

View File

@@ -0,0 +1,26 @@
#pragma once
#include<vector>
struct SRC_STATE_tag;
//音频重采样,这对于音频处理和音频应用程序非常有用,因为它可以使音频数据适应不同的采样率或音频设备
class Resampler
{
public:
//input_len 表示输入音频的长度resampling_factor 表示重采样的因子,用于确定输出音频的采样率
Resampler(int input_len, double resampling_factor);
~Resampler();
//接受一个输入音频数据的 std::vectorinput并将重采样后的结果存储在传递的输出
void Resample(const std::vector<float>& intput, std::vector<float>* output);
//获取重采样后的音频数据的长度
int GetOutputLength();
private:
int input_len_;
int output_len_;
double resample_factor_;
SRC_STATE_tag* libsamplerate_handle_;
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,5 @@
#递归将本文件夹下所有cpp放到FUNCS中
file(GLOB_RECURSE CONTEXT ./ *.cpp)
#将FUNCS中所有cpp编译为funcs这个lib库
add_library(context ${CONTEXT} )

View File

@@ -0,0 +1,17 @@
#include"Context.h"
#include"../global/Config.h"
#include<iostream>
Context::Context(HINSTANCE p_hInstance):
m_window(m_input),
m_renderer(m_gpu),
m_audioManager(m_audioEngine)
{
m_window.initWindow(p_hInstance,SCREEN_WIDTH,SCREEN_HEIGHT);
m_gpu.initGL(SCREEN_WIDTH, SCREEN_HEIGHT, m_window.getCanvas());
m_gpu.test += std::bind(&Window::Test, m_window, std::placeholders::_1);
}
Context::~Context()
{
}

View File

@@ -0,0 +1,27 @@
#pragma once
#include"../window/Window.h"
#include"../gpu/Gpu.h"
#include"../myinput/Input.h"
#include"../renderer/Renderer.h"
#include"../scene/SceneManager.h"
#include"../time/Clock.h"
#include"../audio/AudioManager.h"
#include"../audio/WavFileReader.h"
#include"../audio/AudioEngine.h"
class Context
{
public:
Context(HINSTANCE p_hInstance);
~Context();
public:
GPU m_gpu;
Window m_window;
Input m_input;
Renderer m_renderer;
SceneManager m_sceneManager;
Clock m_clock;
AudioManager m_audioManager;
AudioEngine m_audioEngine;
WavFileReader m_wavFileReader;
};

View File

@@ -0,0 +1,32 @@
#pragma once
#include <functional>
using ListenerID = uint64_t;
template<class... ArgTypes>
class Event
{
public:
void a(){}
using Callback = std::function<void(ArgTypes...)>;
ListenerID AddListener(Callback p_callback);
ListenerID operator+=(Callback p_callback);
bool RemoveListener(ListenerID p_listenerID);
bool operator-=(ListenerID p_listenerID);
void RemoveAllListeners();
uint64_t GetListenerCount();
void Invoke(ArgTypes... p_args);
private:
std::unordered_map<ListenerID, Callback> m_callbacks; //存储该事件的所有监听者
ListenerID m_availableListenerID = 0; //一个监听者ID的自增统计量
};
#include"Event.inl"

View File

@@ -0,0 +1,48 @@
#include"Event.h"
template<class... ArgTypes>
ListenerID Event<ArgTypes...>::AddListener(Callback p_callback)
{
ListenerID listenerID = m_availableListenerID++;
m_callbacks.emplace(listenerID, p_callback);
return listenerID;
}
template<class... ArgTypes>
ListenerID Event<ArgTypes...>::operator+=(Callback p_callback)
{
return AddListener(p_callback);
}
template<class... ArgTypes>
bool Event<ArgTypes...>::RemoveListener(ListenerID p_listenerID)
{
//£¿£¿£¿£¿
return m_callbacks.erase(p_listenerID) != 0;
}
template<class... ArgTypes>
bool Event<ArgTypes...>::operator-=(ListenerID p_listenerID)
{
return RemoveListener(p_listenerID);
}
template<class... ArgTypes>
void Event<ArgTypes...>::RemoveAllListeners()
{
m_callbacks.clear();
}
template<class... ArgTypes>
uint64_t Event<ArgTypes...>::GetListenerCount()
{
return m_callbacks.size();
}
template<class... ArgTypes>
void Event<ArgTypes...>::Invoke(ArgTypes... p_args)
{
//£¡£¡£¡£¡
for (auto const& [key, value] : m_callbacks)
value(p_args...);
}

View File

@@ -0,0 +1,7 @@
#递归将本文件夹下所有cpp放到FUNCS中
file(GLOB_RECURSE GAME ./ *.cpp)
#将FUNCS中所有cpp编译为funcs这个lib库
add_library(game ${GAME} )

View File

@@ -0,0 +1,57 @@
#include"Game.h"
Game::Game(Context& p_context) :
m_context(p_context)
{
}
Game::~Game() {}
void Game::Update(float p_deltaTime)
{
/*Scene-Update*/
if (m_context.m_sceneManager.m_currentScene)
{
m_context.m_sceneManager.m_currentScene->Update(p_deltaTime);
}
/*Network-Update*/
/*Render-Update*/
m_context.m_renderer.RenderUI();
/*Audio-Update*/
m_context.m_audioManager.Update(p_deltaTime);
m_context.m_audioEngine.Update(p_deltaTime);
}
void Game::PreUpdate()
{
m_context.m_window.peekMessage();
m_context.m_window.Update();
m_context.m_gpu.clear();
}
void Game::PostUpdate()
{
m_context.m_window.swapBuffer();
}
void Game::OnEnter()
{
StartScene* m_startScene = new StartScene();
m_context.m_sceneManager.RegisterScene("start_scene", m_startScene);
m_startScene->m_context = &m_context;
m_context.m_sceneManager.ChangeScene(m_startScene);
/*Audio-Init*/
m_context.m_audioEngine.InitAudioEngine();
}
void Game::OnExit()
{
/*Audio-Exit*/
m_context.m_audioEngine.ExitAudioEngine();
}

View File

@@ -0,0 +1,17 @@
#pragma once
#include"../context/Context.h"
#include"../scene/StartScene/StartScene.h"
class Game
{
public:
Game(Context& p_context);
~Game();
void Update(float p_deltaTime);
void PreUpdate();
void PostUpdate();
void OnEnter();
void OnExit();
private:
Context& m_context;
};

View File

@@ -0,0 +1,51 @@
#pragma once
#include<iostream>
#include<vector>
#include<map>
#include <deque>
#include<cmath>
#include<assert.h>
/*
baseÖзÅһЩ¹¤¾ß
*/
#define PI 3.14159265358979323
#define DEG2RAD(theta) (0.01745329251994329 * (theta))
#define FRACTION(v) ((v) - (int)(v))
#define UNFRACTION(v) ((int)v + 1 - v)
#define SWAP_INT(a,b) swapT<int>(a,b);
template<typename T>
void swapT(T& a, T& b)
{
T temp = a;
a = b;
b = temp;
}
struct Point3D {
double x, y, z;
Point3D() : x(0.0), y(0.0), z(0.0) {}
Point3D(double p_x, double p_y, double p_z) : x(p_x), y(p_y), z(p_z) {}
};
using BYTE = unsigned char;
struct RGBA {
BYTE mB;
BYTE mG;
BYTE mR;
BYTE mA;
RGBA(
BYTE r = 255,
BYTE g = 255,
BYTE b = 255,
BYTE a = 255)
{
mR = r;
mG = g;
mB = b;
mA = a;
}
};

View File

@@ -0,0 +1,14 @@
#pragma once
#include<iostream>
#include<vector>
#include<map>
#include<cmath>
#include<assert.h>
#define SCREEN_WIDTH 800
#define SCREEN_HEIGHT 600
//一个方块的边长
#define RECTEDGE 30

View File

@@ -0,0 +1,3 @@
file(GLOB_RECURSE GPU ./ *.cpp)
add_library(gpu ${GPU})

View File

@@ -0,0 +1,23 @@
#include "FrameBuffer.h"
FrameBuffer::FrameBuffer(uint32_t width, uint32_t height, void* buffer) {
mWidth = width;
mHeight = height;
mPixelSize = mWidth * mHeight;
if (!buffer) {
buffer = new RGBA[mPixelSize];
mExternBuffer = false;
}
else {
mExternBuffer = true;
}
mColorBuffer = (RGBA*)buffer;
mDepthBuffer = new float[mPixelSize] {};
}
FrameBuffer::~FrameBuffer() {
if (!mExternBuffer && mColorBuffer) {
delete[] mColorBuffer;
}
}

View File

@@ -0,0 +1,20 @@
#pragma once
#include "../global/Base.h"
/*
* class FrameBuffer
* 帧缓存
*/
class FrameBuffer {
public:
FrameBuffer(uint32_t width, uint32_t height, void* colorBuffer = nullptr);
~FrameBuffer();
FrameBuffer(const FrameBuffer&) = delete;//不准拷贝复制
uint32_t mWidth{ 0 };
uint32_t mHeight{ 0 };
uint32_t mPixelSize{ 0 };
RGBA* mColorBuffer{ nullptr }; //首地址
float* mDepthBuffer{ nullptr };
bool mExternBuffer{ false };
};

View File

@@ -0,0 +1,112 @@
#include "Gpu.h"
GPU::GPU() {}
GPU::~GPU() {
if (mFrameBuffer) {
delete mFrameBuffer;
}
}
void GPU::initGL(const uint32_t& width, const uint32_t& height, void* buffer) {
m_screenWidth = width;
m_screenHeight = height;
mFrameBuffer = new FrameBuffer(width, height, buffer);
}
void GPU::clear() {
std::fill_n(mFrameBuffer->mColorBuffer, mFrameBuffer->mPixelSize, RGBA(0, 0, 0, 0));
std::fill_n(mFrameBuffer->mDepthBuffer, mFrameBuffer->mPixelSize, FLT_MAX);
}
void GPU::drawPoint(const int& x, const int& y, const RGBA& color, const float& depth) {
//从窗口左下角开始算起
if (x < 0 || x >= m_screenWidth || y < 0 || y >= m_screenHeight) { return; }
uint32_t pixelPos = y * mFrameBuffer->mWidth + x;
/*depth-test*/
if (depth > mFrameBuffer->mDepthBuffer[pixelPos]) { return; }
mFrameBuffer->mColorBuffer[pixelPos] = color;
mFrameBuffer->mDepthBuffer[pixelPos] = depth;
}
void GPU::drawVerticalLine(const int& x, const int& y_min, const int y_max, const RGBA& color, const float& depth)
{
for (int i = std::max(y_min,0); i <= std::min(y_max,SCREEN_HEIGHT); i++)
{
drawPoint(x, i, color, depth);
}
}
void GPU::drawRect(const int& x_min, const int x_max, const int y_min, const int y_max, const RGBA& color, const float& depth)
{
for (int i = x_min; i <= x_max; i++)
{
drawVerticalLine(i, y_min, y_max, color, depth);
}
}
void GPU::drawLine(const int& x_from, const int& y_from, const int& x_to, const int& y_to, const RGBA& color, const float& depth)
{
int disX = abs((int)(x_to - x_from));
int disY = abs((int)(y_to - y_from));
int xNow = x_from;
int yNow = y_from;
int stepX = 0;
int stepY = 0;
stepX = (x_to > x_from) ? 1 : ((x_to < x_from) ? -1 : 0);
stepY = (y_to > y_from) ? 1 : ((y_to < y_from) ? -1 : 0);
bool useXStep = true;
if (disX < disY)
{
useXStep = false;
SWAP_INT(disX, disY);
}
int sumStep = disX;
int p = 2 * disY - disX;
for (int i = 0; i < sumStep; i++)
{
float _scale = 0;
if (useXStep)
{
if (x_to != x_from)
{
_scale = (float)(xNow - x_from) / (float)(x_to - x_from);
}
}
else
{
if (y_to != y_from)
{
_scale = (float)(yNow - y_from) / (float)(y_to - y_from);
}
}
drawPoint(xNow, yNow, color);
if (p >= 0)
{
if (useXStep)
{
yNow += stepY;
}
else
{
xNow += stepX;
}
p -= 2 * disX;
}
if (useXStep)
{
xNow += stepX;
}
else
{
yNow += stepY;
}
p += 2 * disY;
}
}

View File

@@ -0,0 +1,38 @@
#pragma once
#include "../global/Base.h"
#include "../global/Config.h"
#include "FrameBuffer.h"
#include"../image/image.h"
#include"../event/event.h"
/*
* class GPU
* 模拟GPU的图形库
*/
class GPU {
public:
GPU();
~GPU();
//接受外界传入的bmp对应的内存指针以及窗体的宽/高(图形库绑定硬件驱动)
void initGL(const uint32_t& width, const uint32_t& height, void* buffer = nullptr);
//清除画布内容
void clear();
//传入像素位置,绘制成某种颜色
void drawPoint(const int& x, const int& y, const RGBA& color, const float& depth = 0);
void drawVerticalLine(const int& x, const int& y_min, const int y_max, const RGBA& color, const float& depth = 0.);
void drawRect(const int& x_min, const int x_max, const int y_min, const int y_max, const RGBA& color, const float& depth = 0.);
void drawLine(const int& x_from, const int& y_from, const int& x_to, const int& y_to, const RGBA& color, const float& depth = 0.);
public:
int m_screenWidth;
int m_screenHeight;
Event<int> test; //for-test
private:
FrameBuffer* mFrameBuffer{ nullptr }; //存储当前画布对应的bmp的内存指针作为当前绘图画板
};

Some files were not shown because too many files have changed in this diff Show More